repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
dbrattli/OSlash
oslash/cont.py
https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/cont.py#L45-L50
def bind(self, fn: Callable[[Any], 'Cont']) -> 'Cont': r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c """ return Cont(lambda c: self.run(lambda a: fn(a).run(c)))
[ "def", "bind", "(", "self", ",", "fn", ":", "Callable", "[", "[", "Any", "]", ",", "'Cont'", "]", ")", "->", "'Cont'", ":", "return", "Cont", "(", "lambda", "c", ":", "self", ".", "run", "(", "lambda", "a", ":", "fn", "(", "a", ")", ".", "run...
r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
[ "r", "Chain", "continuation", "passing", "functions", "." ]
python
train
INM-6/hybridLFPy
hybridLFPy/population.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1379-L1439
def insert_synapses(self, cell, cellindex, synParams, idx = np.array([]), X='EX', SpCell = np.array([]), synDelays = None): """ Insert synapse with `parameters`=`synparams` on cell=cell, with segment indexes given by `idx`. `SpCell` and `SpTimes` picked from Brunel network simulation Parameters ---------- cell : `LFPy.Cell` instance Postsynaptic target cell. cellindex : int Index of cell in population. synParams : dict Parameters passed to `LFPy.Synapse`. idx : numpy.ndarray Postsynaptic compartment indices. X : str presynaptic population name SpCell : numpy.ndarray Presynaptic spiking cells. synDelays : numpy.ndarray Per connection specific delays. Returns ------- None See also -------- Population.insert_all_synapses """ #Insert synapses in an iterative fashion try: spikes = self.networkSim.dbs[X].select(SpCell[:idx.size]) except AttributeError as ae: raise ae, 'could not open CachedNetwork database objects' #apply synaptic delays if synDelays is not None and idx.size > 0: for i, delay in enumerate(synDelays): if spikes[i].size > 0: spikes[i] += delay #create synapse events: for i in range(idx.size): if len(spikes[i]) == 0: pass #print 'no spike times, skipping network cell #%i' % SpCell[i] else: synParams.update({'idx' : idx[i]}) # Create synapse(s) and setting times using class LFPy.Synapse synapse = LFPy.Synapse(cell, **synParams) #SpCell is a vector, or do not exist synapse.set_spike_times(spikes[i] + cell.tstartms)
[ "def", "insert_synapses", "(", "self", ",", "cell", ",", "cellindex", ",", "synParams", ",", "idx", "=", "np", ".", "array", "(", "[", "]", ")", ",", "X", "=", "'EX'", ",", "SpCell", "=", "np", ".", "array", "(", "[", "]", ")", ",", "synDelays", ...
Insert synapse with `parameters`=`synparams` on cell=cell, with segment indexes given by `idx`. `SpCell` and `SpTimes` picked from Brunel network simulation Parameters ---------- cell : `LFPy.Cell` instance Postsynaptic target cell. cellindex : int Index of cell in population. synParams : dict Parameters passed to `LFPy.Synapse`. idx : numpy.ndarray Postsynaptic compartment indices. X : str presynaptic population name SpCell : numpy.ndarray Presynaptic spiking cells. synDelays : numpy.ndarray Per connection specific delays. Returns ------- None See also -------- Population.insert_all_synapses
[ "Insert", "synapse", "with", "parameters", "=", "synparams", "on", "cell", "=", "cell", "with", "segment", "indexes", "given", "by", "idx", ".", "SpCell", "and", "SpTimes", "picked", "from", "Brunel", "network", "simulation" ]
python
train
angr/angr
angr/analyses/girlscout.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/girlscout.py#L559-L589
def _solve_forbase_address(self, function_starts, functions): """ Voting for the most possible base address. :param function_starts: :param functions: :returns: """ pseudo_base_addr = self.project.loader.main_object.min_addr base_addr_ctr = { } for s in function_starts: for f in functions: base_addr = s - f + pseudo_base_addr ctr = 1 for k in function_starts: if k - base_addr + pseudo_base_addr in functions: ctr += 1 if ctr > 5: base_addr_ctr[base_addr] = ctr if len(base_addr_ctr): base_addr, hits = sorted([(k, v) for k, v in base_addr_ctr.items()], key=lambda x: x[1], reverse=True)[0] return base_addr else: return None
[ "def", "_solve_forbase_address", "(", "self", ",", "function_starts", ",", "functions", ")", ":", "pseudo_base_addr", "=", "self", ".", "project", ".", "loader", ".", "main_object", ".", "min_addr", "base_addr_ctr", "=", "{", "}", "for", "s", "in", "function_s...
Voting for the most possible base address. :param function_starts: :param functions: :returns:
[ "Voting", "for", "the", "most", "possible", "base", "address", "." ]
python
train
jonathf/chaospy
chaospy/distributions/baseclass.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/baseclass.py#L189-L224
def pdf(self, x_data, step=1e-7): """ Probability density function. If possible the density will be calculated analytically. If not possible, it will be approximated by approximating the one-dimensional derivative of the forward Rosenblatt transformation and multiplying the component parts. Note that even if the distribution is multivariate, each component of the Rosenblatt is one-dimensional. Args: x_data (numpy.ndarray): Location for the density function. ``x_data.shape`` must be compatible with distribution shape. step (float, numpy.ndarray): If approximation is used, the step length given in the approximation of the derivative. If array provided, elements are used along each axis. Returns: (numpy.ndarray): Evaluated density function values. Shapes are related through the identity ``x_data.shape == dist.shape+out.shape``. """ x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) lower, upper = evaluation.evaluate_bound(self, x_data) f_data = numpy.zeros(x_data.shape) indices = (x_data <= upper) & (x_data >= lower) f_data[indices] = evaluation.evaluate_density(self, x_data)[indices] f_data = f_data.reshape(shape) if len(self) > 1: f_data = numpy.prod(f_data, 0) return f_data
[ "def", "pdf", "(", "self", ",", "x_data", ",", "step", "=", "1e-7", ")", ":", "x_data", "=", "numpy", ".", "asfarray", "(", "x_data", ")", "shape", "=", "x_data", ".", "shape", "x_data", "=", "x_data", ".", "reshape", "(", "len", "(", "self", ")", ...
Probability density function. If possible the density will be calculated analytically. If not possible, it will be approximated by approximating the one-dimensional derivative of the forward Rosenblatt transformation and multiplying the component parts. Note that even if the distribution is multivariate, each component of the Rosenblatt is one-dimensional. Args: x_data (numpy.ndarray): Location for the density function. ``x_data.shape`` must be compatible with distribution shape. step (float, numpy.ndarray): If approximation is used, the step length given in the approximation of the derivative. If array provided, elements are used along each axis. Returns: (numpy.ndarray): Evaluated density function values. Shapes are related through the identity ``x_data.shape == dist.shape+out.shape``.
[ "Probability", "density", "function", "." ]
python
train
BrewBlox/brewblox-service
brewblox_service/service.py
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/service.py#L68-L106
def create_parser(default_name: str) -> argparse.ArgumentParser: """ Creates the default brewblox_service ArgumentParser. Service-agnostic arguments are added. The parser allows calling code to add additional arguments before using it in create_app() Args: default_name (str): default value for the --name commandline argument. Returns: argparse.ArgumentParser: a Python ArgumentParser with defaults set. """ argparser = argparse.ArgumentParser(fromfile_prefix_chars='@') argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0') argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int) argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]') argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name) argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true') argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus') argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int) return argparser
[ "def", "create_parser", "(", "default_name", ":", "str", ")", "->", "argparse", ".", "ArgumentParser", ":", "argparser", "=", "argparse", ".", "ArgumentParser", "(", "fromfile_prefix_chars", "=", "'@'", ")", "argparser", ".", "add_argument", "(", "'-H'", ",", ...
Creates the default brewblox_service ArgumentParser. Service-agnostic arguments are added. The parser allows calling code to add additional arguments before using it in create_app() Args: default_name (str): default value for the --name commandline argument. Returns: argparse.ArgumentParser: a Python ArgumentParser with defaults set.
[ "Creates", "the", "default", "brewblox_service", "ArgumentParser", ".", "Service", "-", "agnostic", "arguments", "are", "added", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/aio/queue.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/queue.py#L611-L630
async def listArtifacts(self, *args, **kwargs): """ Get Artifacts from Run Returns a list of artifacts and associated meta-data for a given run. As a task may have many artifacts paging may be necessary. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as the query-string option: `continuationToken`. By default this end-point will list up-to 1000 artifacts in a single page you may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-artifacts-response.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
[ "async", "def", "listArtifacts", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"listArtifacts\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ...
Get Artifacts from Run Returns a list of artifacts and associated meta-data for a given run. As a task may have many artifacts paging may be necessary. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as the query-string option: `continuationToken`. By default this end-point will list up-to 1000 artifacts in a single page you may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-artifacts-response.json#`` This method is ``experimental``
[ "Get", "Artifacts", "from", "Run" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/reftrack.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L2246-L2269
def get_suggestions(self, reftrack): """Return a list with possible children for this reftrack Each Reftrack may want different children. E.g. a Asset wants to suggest a shader for itself and all assets that are linked in to it in the database. Suggestions only apply for enities with status other than None. A suggestion is a tuple of typ and element. It will be used to create a newlen :class:`Reftrack`. The parent will be this instance, root and interface will of course be the same. This will delegate the call to the appropriate :class:`ReftypeInterface`. So suggestions may vary for every typ and might depend on the status of the reftrack. :param reftrack: the reftrack which needs suggestions :type reftrack: :class:`Reftrack` :returns: list of suggestions, tuples of type and element. :rtype: list :raises: None """ inter = self.get_typ_interface(reftrack.get_typ()) return inter.get_suggestions(reftrack)
[ "def", "get_suggestions", "(", "self", ",", "reftrack", ")", ":", "inter", "=", "self", ".", "get_typ_interface", "(", "reftrack", ".", "get_typ", "(", ")", ")", "return", "inter", ".", "get_suggestions", "(", "reftrack", ")" ]
Return a list with possible children for this reftrack Each Reftrack may want different children. E.g. a Asset wants to suggest a shader for itself and all assets that are linked in to it in the database. Suggestions only apply for enities with status other than None. A suggestion is a tuple of typ and element. It will be used to create a newlen :class:`Reftrack`. The parent will be this instance, root and interface will of course be the same. This will delegate the call to the appropriate :class:`ReftypeInterface`. So suggestions may vary for every typ and might depend on the status of the reftrack. :param reftrack: the reftrack which needs suggestions :type reftrack: :class:`Reftrack` :returns: list of suggestions, tuples of type and element. :rtype: list :raises: None
[ "Return", "a", "list", "with", "possible", "children", "for", "this", "reftrack" ]
python
train
pantsbuild/pants
src/python/pants/util/meta.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/meta.py#L78-L104
def staticproperty(func): """Use as a decorator on a method definition to make it a class-level attribute (without binding). This decorator can be applied to a method or a staticmethod. This decorator does not bind any arguments. Usage: >>> other_x = 'value' >>> class Foo(object): ... @staticproperty ... def x(): ... return other_x ... >>> Foo.x 'value' Setting or deleting the attribute of this name will overwrite this property. The docstring of the classproperty `x` for a class `C` can be obtained by `C.__dict__['x'].__doc__`. """ doc = func.__doc__ if not isinstance(func, staticmethod): func = staticmethod(func) return ClassPropertyDescriptor(func, doc)
[ "def", "staticproperty", "(", "func", ")", ":", "doc", "=", "func", ".", "__doc__", "if", "not", "isinstance", "(", "func", ",", "staticmethod", ")", ":", "func", "=", "staticmethod", "(", "func", ")", "return", "ClassPropertyDescriptor", "(", "func", ",",...
Use as a decorator on a method definition to make it a class-level attribute (without binding). This decorator can be applied to a method or a staticmethod. This decorator does not bind any arguments. Usage: >>> other_x = 'value' >>> class Foo(object): ... @staticproperty ... def x(): ... return other_x ... >>> Foo.x 'value' Setting or deleting the attribute of this name will overwrite this property. The docstring of the classproperty `x` for a class `C` can be obtained by `C.__dict__['x'].__doc__`.
[ "Use", "as", "a", "decorator", "on", "a", "method", "definition", "to", "make", "it", "a", "class", "-", "level", "attribute", "(", "without", "binding", ")", "." ]
python
train
libChEBI/libChEBIpy
libchebipy/_parsers.py
https://github.com/libChEBI/libChEBIpy/blob/89f223a91f518619d5e3910070d283adcac1626e/libchebipy/_parsers.py#L425-L447
def get_references(chebi_ids): '''Returns references''' references = [] chebi_ids = [str(chebi_id) for chebi_id in chebi_ids] filename = get_file('reference.tsv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') if tokens[0] in chebi_ids: # Append Reference: if len(tokens) > 3: ref = Reference(tokens[1], tokens[2], tokens[3], tokens[4]) else: ref = Reference(tokens[1], tokens[2]) references.append(ref) return references
[ "def", "get_references", "(", "chebi_ids", ")", ":", "references", "=", "[", "]", "chebi_ids", "=", "[", "str", "(", "chebi_id", ")", "for", "chebi_id", "in", "chebi_ids", "]", "filename", "=", "get_file", "(", "'reference.tsv.gz'", ")", "with", "io", ".",...
Returns references
[ "Returns", "references" ]
python
train
hydpy-dev/hydpy
hydpy/models/llake/llake_derived.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/llake/llake_derived.py#L74-L95
def update(self): """Calulate the auxilary term. >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') >>> n(3) >>> v(0., 1e5, 1e6) >>> q(_1=[0., 1., 2.], _7=[0., 2., 5.]) >>> maxdt('12h') >>> derived.seconds.update() >>> derived.nmbsubsteps.update() >>> derived.vq.update() >>> derived.vq vq(toy_1_1_0_0_0=[0.0, 243200.0, 2086400.0], toy_7_1_0_0_0=[0.0, 286400.0, 2216000.0]) """ con = self.subpars.pars.control der = self.subpars for (toy, qs) in con.q: setattr(self, str(toy), 2.*con.v+der.seconds/der.nmbsubsteps*qs) self.refresh()
[ "def", "update", "(", "self", ")", ":", "con", "=", "self", ".", "subpars", ".", "pars", ".", "control", "der", "=", "self", ".", "subpars", "for", "(", "toy", ",", "qs", ")", "in", "con", ".", "q", ":", "setattr", "(", "self", ",", "str", "(",...
Calulate the auxilary term. >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') >>> n(3) >>> v(0., 1e5, 1e6) >>> q(_1=[0., 1., 2.], _7=[0., 2., 5.]) >>> maxdt('12h') >>> derived.seconds.update() >>> derived.nmbsubsteps.update() >>> derived.vq.update() >>> derived.vq vq(toy_1_1_0_0_0=[0.0, 243200.0, 2086400.0], toy_7_1_0_0_0=[0.0, 286400.0, 2216000.0])
[ "Calulate", "the", "auxilary", "term", "." ]
python
train
crodjer/paster
paster/config.py
https://github.com/crodjer/paster/blob/0cd7230074850ba74e80c740a8bc2502645dd743/paster/config.py#L41-L55
def get_config(section, option, allow_empty_option=True, default=""): ''' Get data from configs ''' try: value = config.get(section, option) if value is None or len(value) == 0: if allow_empty_option: return "" else: return default else: return value except ConfigParser.NoSectionError: return default
[ "def", "get_config", "(", "section", ",", "option", ",", "allow_empty_option", "=", "True", ",", "default", "=", "\"\"", ")", ":", "try", ":", "value", "=", "config", ".", "get", "(", "section", ",", "option", ")", "if", "value", "is", "None", "or", ...
Get data from configs
[ "Get", "data", "from", "configs" ]
python
train
Miserlou/Zappa
example/authmodule.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/example/authmodule.py#L104-L134
def _addMethod(self, effect, verb, resource, conditions): """Adds a method to the internal lists of allowed or denied methods. Each object in the internal list contains a resource ARN and a condition statement. The condition statement can be null.""" if verb != "*" and not hasattr(HttpVerb, verb): raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class") resourcePattern = re.compile(self.pathRegex) if not resourcePattern.match(resource): raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex) if resource[:1] == "/": resource = resource[1:] resourceArn = ("arn:aws:execute-api:" + self.region + ":" + self.awsAccountId + ":" + self.restApiId + "/" + self.stage + "/" + verb + "/" + resource) if effect.lower() == "allow": self.allowMethods.append({ 'resourceArn' : resourceArn, 'conditions' : conditions }) elif effect.lower() == "deny": self.denyMethods.append({ 'resourceArn' : resourceArn, 'conditions' : conditions })
[ "def", "_addMethod", "(", "self", ",", "effect", ",", "verb", ",", "resource", ",", "conditions", ")", ":", "if", "verb", "!=", "\"*\"", "and", "not", "hasattr", "(", "HttpVerb", ",", "verb", ")", ":", "raise", "NameError", "(", "\"Invalid HTTP verb \"", ...
Adds a method to the internal lists of allowed or denied methods. Each object in the internal list contains a resource ARN and a condition statement. The condition statement can be null.
[ "Adds", "a", "method", "to", "the", "internal", "lists", "of", "allowed", "or", "denied", "methods", ".", "Each", "object", "in", "the", "internal", "list", "contains", "a", "resource", "ARN", "and", "a", "condition", "statement", ".", "The", "condition", ...
python
train
aiortc/aiortc
aiortc/utils.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/utils.py#L29-L33
def uint16_gte(a: int, b: int) -> bool: """ Return a >= b. """ return (a == b) or uint16_gt(a, b)
[ "def", "uint16_gte", "(", "a", ":", "int", ",", "b", ":", "int", ")", "->", "bool", ":", "return", "(", "a", "==", "b", ")", "or", "uint16_gt", "(", "a", ",", "b", ")" ]
Return a >= b.
[ "Return", "a", ">", "=", "b", "." ]
python
train
PMEAL/OpenPNM
openpnm/algorithms/GenericTransport.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/GenericTransport.py#L318-L341
def _build_A(self, force=False): r""" Builds the coefficient matrix based on conductances between pores. The conductance to use is specified in the algorithm's ``settings`` under ``conductance``. In subclasses (e.g. ``FickianDiffusion``) this is set by default, though it can be overwritten. Parameters ---------- force : Boolean (default is ``False``) If set to ``True`` then the A matrix is built from new. If ``False`` (the default), a cached version of A is returned. The cached version is *clean* in the sense that no boundary conditions or sources terms have been added to it. """ if force: self._pure_A = None if self._pure_A is None: network = self.project.network phase = self.project.phases()[self.settings['phase']] g = phase[self.settings['conductance']] am = network.create_adjacency_matrix(weights=g, fmt='coo') self._pure_A = spgr.laplacian(am) self.A = self._pure_A.copy()
[ "def", "_build_A", "(", "self", ",", "force", "=", "False", ")", ":", "if", "force", ":", "self", ".", "_pure_A", "=", "None", "if", "self", ".", "_pure_A", "is", "None", ":", "network", "=", "self", ".", "project", ".", "network", "phase", "=", "s...
r""" Builds the coefficient matrix based on conductances between pores. The conductance to use is specified in the algorithm's ``settings`` under ``conductance``. In subclasses (e.g. ``FickianDiffusion``) this is set by default, though it can be overwritten. Parameters ---------- force : Boolean (default is ``False``) If set to ``True`` then the A matrix is built from new. If ``False`` (the default), a cached version of A is returned. The cached version is *clean* in the sense that no boundary conditions or sources terms have been added to it.
[ "r", "Builds", "the", "coefficient", "matrix", "based", "on", "conductances", "between", "pores", ".", "The", "conductance", "to", "use", "is", "specified", "in", "the", "algorithm", "s", "settings", "under", "conductance", ".", "In", "subclasses", "(", "e", ...
python
train
horazont/aioxmpp
aioxmpp/xml.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/xml.py#L738-L759
def send(self, xso): """ Send a single XML stream object. :param xso: Object to serialise and send. :type xso: :class:`aioxmpp.xso.XSO` :raises Exception: from any serialisation errors, usually :class:`ValueError`. Serialise the `xso` and send it over the stream. If any serialisation error occurs, no data is sent over the stream and the exception is re-raised; the :meth:`send` method thus provides strong exception safety. .. warning:: The behaviour of :meth:`send` after :meth:`abort` or :meth:`close` and before :meth:`start` is undefined. """ with self._writer.buffer(): xso.unparse_to_sax(self._writer)
[ "def", "send", "(", "self", ",", "xso", ")", ":", "with", "self", ".", "_writer", ".", "buffer", "(", ")", ":", "xso", ".", "unparse_to_sax", "(", "self", ".", "_writer", ")" ]
Send a single XML stream object. :param xso: Object to serialise and send. :type xso: :class:`aioxmpp.xso.XSO` :raises Exception: from any serialisation errors, usually :class:`ValueError`. Serialise the `xso` and send it over the stream. If any serialisation error occurs, no data is sent over the stream and the exception is re-raised; the :meth:`send` method thus provides strong exception safety. .. warning:: The behaviour of :meth:`send` after :meth:`abort` or :meth:`close` and before :meth:`start` is undefined.
[ "Send", "a", "single", "XML", "stream", "object", "." ]
python
train
Yelp/detect-secrets
detect_secrets/core/log.py
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/log.py#L5-L32
def get_logger(name=None, format_string=None): """ :type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting """ logging.captureWarnings(True) log = logging.getLogger(name) # Bind custom method to instance. # Source: https://stackoverflow.com/a/2982 log.set_debug_level = _set_debug_level.__get__(log) log.set_debug_level(0) if not format_string: format_string = '[%(module)s]\t%(levelname)s\t%(message)s' # Setting up log formats log.handlers = [] handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter(format_string), ) log.addHandler(handler) return log
[ "def", "get_logger", "(", "name", "=", "None", ",", "format_string", "=", "None", ")", ":", "logging", ".", "captureWarnings", "(", "True", ")", "log", "=", "logging", ".", "getLogger", "(", "name", ")", "# Bind custom method to instance.", "# Source: https://st...
:type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting
[ ":", "type", "name", ":", "str", ":", "param", "name", ":", "used", "for", "declaring", "log", "channels", "." ]
python
train
qubole/qds-sdk-py
qds_sdk/template.py
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L233-L255
def runTemplate(id, data={}): """ Run an existing Template and waits for the Result. Prints result to stdout. Args: `id`: ID of the template to run `data`: json data containing the input_vars Returns: An integer as status (0: success, 1: failure) """ conn = Qubole.agent() path = str(id) + "/run" res = conn.post(Template.element_path(path), data) cmdType = res['command_type'] cmdId = res['id'] cmdClass = eval(cmdType) cmd = cmdClass.find(cmdId) while not Command.is_done(cmd.status): time.sleep(Qubole.poll_interval) cmd = cmdClass.find(cmd.id) return Template.getResult(cmdClass, cmd)
[ "def", "runTemplate", "(", "id", ",", "data", "=", "{", "}", ")", ":", "conn", "=", "Qubole", ".", "agent", "(", ")", "path", "=", "str", "(", "id", ")", "+", "\"/run\"", "res", "=", "conn", ".", "post", "(", "Template", ".", "element_path", "(",...
Run an existing Template and waits for the Result. Prints result to stdout. Args: `id`: ID of the template to run `data`: json data containing the input_vars Returns: An integer as status (0: success, 1: failure)
[ "Run", "an", "existing", "Template", "and", "waits", "for", "the", "Result", ".", "Prints", "result", "to", "stdout", "." ]
python
train
landscapeio/pylint-common
pylint_common/augmentations.py
https://github.com/landscapeio/pylint-common/blob/c4d492ec25dca42e78508bf30dcfdac7ed71898f/pylint_common/augmentations.py#L12-L28
def allow_attribute_comments(chain, node): """ This augmentation is to allow comments on class attributes, for example: class SomeClass(object): some_attribute = 5 ''' This is a docstring for the above attribute ''' """ # TODO: find the relevant citation for why this is the correct way to comment attributes if isinstance(node.previous_sibling(), astroid.Assign) and \ isinstance(node.parent, (astroid.Class, astroid.Module)) and \ isinstance(node.value, astroid.Const) and \ isinstance(node.value.value, BASESTRING): return chain()
[ "def", "allow_attribute_comments", "(", "chain", ",", "node", ")", ":", "# TODO: find the relevant citation for why this is the correct way to comment attributes", "if", "isinstance", "(", "node", ".", "previous_sibling", "(", ")", ",", "astroid", ".", "Assign", ")", "and...
This augmentation is to allow comments on class attributes, for example: class SomeClass(object): some_attribute = 5 ''' This is a docstring for the above attribute '''
[ "This", "augmentation", "is", "to", "allow", "comments", "on", "class", "attributes", "for", "example", ":" ]
python
train
cggh/scikit-allel
allel/stats/sf.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L133-L149
def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out
[ "def", "scale_sfs", "(", "s", ")", ":", "k", "=", "np", ".", "arange", "(", "s", ".", "size", ")", "out", "=", "s", "*", "k", "return", "out" ]
Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum.
[ "Scale", "a", "site", "frequency", "spectrum", "." ]
python
train
evyatarmeged/Raccoon
raccoon_src/utils/help_utils.py
https://github.com/evyatarmeged/Raccoon/blob/985797f73329976ec9c3fefbe4bbb3c74096ca51/raccoon_src/utils/help_utils.py#L61-L66
def validate_port_range(cls, port_range): """Validate port range for Nmap scan""" ports = port_range.split("-") if all(ports) and int(ports[-1]) <= 65535 and not len(ports) != 2: return True raise ScannerException("Invalid port range {}".format(port_range))
[ "def", "validate_port_range", "(", "cls", ",", "port_range", ")", ":", "ports", "=", "port_range", ".", "split", "(", "\"-\"", ")", "if", "all", "(", "ports", ")", "and", "int", "(", "ports", "[", "-", "1", "]", ")", "<=", "65535", "and", "not", "l...
Validate port range for Nmap scan
[ "Validate", "port", "range", "for", "Nmap", "scan" ]
python
train
Arubacloud/pyArubaCloud
ArubaCloud/SharedStorage/SharedStorage.py
https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/SharedStorage/SharedStorage.py#L13-L20
def get(self): """ Retrieve the current configured SharedStorages entries :return: [list] List containing the current SharedStorages entries """ request = self._call(GetSharedStorages) response = request.commit() return response['Value']
[ "def", "get", "(", "self", ")", ":", "request", "=", "self", ".", "_call", "(", "GetSharedStorages", ")", "response", "=", "request", ".", "commit", "(", ")", "return", "response", "[", "'Value'", "]" ]
Retrieve the current configured SharedStorages entries :return: [list] List containing the current SharedStorages entries
[ "Retrieve", "the", "current", "configured", "SharedStorages", "entries", ":", "return", ":", "[", "list", "]", "List", "containing", "the", "current", "SharedStorages", "entries" ]
python
train
odlgroup/odl
odl/tomo/backends/astra_cuda.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/backends/astra_cuda.py#L122-L169
def create_ids(self): """Create ASTRA objects.""" # Create input and output arrays if self.geometry.motion_partition.ndim == 1: motion_shape = self.geometry.motion_partition.shape else: # Need to flatten 2- or 3-dimensional angles into one axis motion_shape = (np.prod(self.geometry.motion_partition.shape),) proj_shape = motion_shape + self.geometry.det_partition.shape proj_ndim = len(proj_shape) if proj_ndim == 2: astra_proj_shape = proj_shape astra_vol_shape = self.reco_space.shape elif proj_ndim == 3: # The `u` and `v` axes of the projection data are swapped, # see explanation in `astra_*_3d_geom_to_vec`. astra_proj_shape = (proj_shape[1], proj_shape[0], proj_shape[2]) astra_vol_shape = self.reco_space.shape self.in_array = np.empty(astra_vol_shape, dtype='float32', order='C') self.out_array = np.empty(astra_proj_shape, dtype='float32', order='C') # Create ASTRA data structures vol_geom = astra_volume_geometry(self.reco_space) proj_geom = astra_projection_geometry(self.geometry) self.vol_id = astra_data(vol_geom, datatype='volume', ndim=self.reco_space.ndim, data=self.in_array, allow_copy=False) self.proj_id = astra_projector('nearest', vol_geom, proj_geom, ndim=proj_ndim, impl='cuda') self.sino_id = astra_data(proj_geom, datatype='projection', ndim=proj_ndim, data=self.out_array, allow_copy=False) # Create algorithm self.algo_id = astra_algorithm( 'forward', proj_ndim, self.vol_id, self.sino_id, proj_id=self.proj_id, impl='cuda')
[ "def", "create_ids", "(", "self", ")", ":", "# Create input and output arrays", "if", "self", ".", "geometry", ".", "motion_partition", ".", "ndim", "==", "1", ":", "motion_shape", "=", "self", ".", "geometry", ".", "motion_partition", ".", "shape", "else", ":...
Create ASTRA objects.
[ "Create", "ASTRA", "objects", "." ]
python
train
django-danceschool/django-danceschool
danceschool/core/models.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L1804-L1821
def getMultiSeriesRegistrations(self,q_filter=Q(),name_series=False,**kwargs): ''' Use the getSeriesRegistered method above to get a list of each series the person has registered for. The return only indicates whether they are registered more than once for the same series (e.g. for keeping track of dance admissions for couples who register under one name). ''' series_registered = self.getSeriesRegistered(q_filter,distinct=False,counter=False,**kwargs) counter_items = Counter(series_registered).items() multireg_list = [x for x in counter_items if x[1] > 1] if name_series and multireg_list: if 'year' in kwargs or 'month' in kwargs: return [str(x[1]) + 'x: ' + x[0].classDescription.title for x in multireg_list] else: return [str(x[1]) + 'x: ' + x[0].__str__() for x in multireg_list] elif multireg_list: return '%sx registration' % max([x[1] for x in multireg_list])
[ "def", "getMultiSeriesRegistrations", "(", "self", ",", "q_filter", "=", "Q", "(", ")", ",", "name_series", "=", "False", ",", "*", "*", "kwargs", ")", ":", "series_registered", "=", "self", ".", "getSeriesRegistered", "(", "q_filter", ",", "distinct", "=", ...
Use the getSeriesRegistered method above to get a list of each series the person has registered for. The return only indicates whether they are registered more than once for the same series (e.g. for keeping track of dance admissions for couples who register under one name).
[ "Use", "the", "getSeriesRegistered", "method", "above", "to", "get", "a", "list", "of", "each", "series", "the", "person", "has", "registered", "for", ".", "The", "return", "only", "indicates", "whether", "they", "are", "registered", "more", "than", "once", ...
python
train
saltstack/pytest-salt
versioneer.py
https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/versioneer.py#L1928-L1962
def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass(" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors
[ "def", "scan_setup_py", "(", ")", ":", "found", "=", "set", "(", ")", "setters", "=", "False", "errors", "=", "0", "with", "open", "(", "\"setup.py\"", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", ...
Validate the contents of setup.py against Versioneer's expectations.
[ "Validate", "the", "contents", "of", "setup", ".", "py", "against", "Versioneer", "s", "expectations", "." ]
python
train
Netflix-Skunkworks/swag-client
swag_client/migrations/versions/v2.py
https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/migrations/versions/v2.py#L7-L173
def upgrade(account): """Transforms data from a v1 format to a v2 format""" environ = 'test' if 'prod' in account['tags']: environ = 'prod' owner = 'netflix' if not account['ours']: owner = 'third-party' services = [] if account['metadata'].get('s3_name'): services.append( dict( name='s3', metadata=dict( name=account['metadata']['s3_name'] ), status=[ dict( region='all', enabled=True ) ] ) ) if account['metadata'].get('cloudtrail_index'): services.append( dict( name='cloudtrail', metadata=dict( esIndex=account['metadata']['cloudtrail_index'], kibanaUrl=account['metadata']['cloudtrail_kibana_url'] ), status=[ dict( region='all', enabled=True ) ] ) ) if account.get('bastion'): services.append( dict( name='bastion', metadata=dict( hostname=account['bastion'] ), status=[ dict( region='all', enabled=True ) ] ) ) for service in account['services'].keys(): s = dict( name=service, status=[ dict( region='all', enabled=account['services'][service].get('enabled', True) ) ] ) if service == 'spinnaker': s['metadata'] = {'name': account['services'][service]['name']} if service == 'lazyfalcon': if account['services'][service].get('owner'): s['metadata'] = {'owner': account['services'][service]['owner']} if service == 'titus': s['metadata'] = {'stacks': account['services'][service]['stacks']} services.append(s) if account['metadata'].get('project_id'): item_id = account['metadata']['project_id'] elif account['metadata'].get('account_number'): item_id = account['metadata']['account_number'] else: raise Exception('No id found, are you sure this is in v1 swag format.') status = [] if account['type'] == 'aws': status = [ { 'region': 'us-east-1', 'status': 'ready' }, { 'region': 'us-west-2', 'status': 'ready' }, { 'region': 'eu-west-1', 'status': 'ready' }, { 'region': 'us-east-2', 'status': 'in-active' }, { 'region': 'us-west-1', 'status': 'in-active' }, { 'region': 'ca-central-1', 'status': 'in-active' }, { 'region': 'ap-south-1', 'status': 'in-active' }, { 'region': 'ap-northeast-2', 'status': 'in-active' }, { 'region': 'ap-northeast-1', 'status': 'in-active' }, { 'region': 'ap-southeast-1', 'status': 'in-active' }, { 'region': 'ap-southeast-2', 'status': 'in-active' }, { 'region': 'eu-west-2', 'status': 'in-active' }, { 'region': 'eu-central-1', 'status': 'in-active' }, { 'region': 'sa-east-1', 'status': 'in-active' }, ] return dict( id=item_id, email=account['metadata'].get('email'), name=account['name'], contacts=account['owners'], provider=account['type'], status=status, tags=list(set(account['tags'])), environment=environ, description=account['description'], sensitive=account['cmc_required'], owner=owner, aliases=account['alias'], services=services, account_status=account['account_status'] )
[ "def", "upgrade", "(", "account", ")", ":", "environ", "=", "'test'", "if", "'prod'", "in", "account", "[", "'tags'", "]", ":", "environ", "=", "'prod'", "owner", "=", "'netflix'", "if", "not", "account", "[", "'ours'", "]", ":", "owner", "=", "'third-...
Transforms data from a v1 format to a v2 format
[ "Transforms", "data", "from", "a", "v1", "format", "to", "a", "v2", "format" ]
python
train
deschler/django-modeltranslation
modeltranslation/manager.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/manager.py#L365-L370
def get_or_create(self, **kwargs): """ Allows to override population mode with a ``populate`` method. """ with auto_populate(self._populate_mode): return super(MultilingualQuerySet, self).get_or_create(**kwargs)
[ "def", "get_or_create", "(", "self", ",", "*", "*", "kwargs", ")", ":", "with", "auto_populate", "(", "self", ".", "_populate_mode", ")", ":", "return", "super", "(", "MultilingualQuerySet", ",", "self", ")", ".", "get_or_create", "(", "*", "*", "kwargs", ...
Allows to override population mode with a ``populate`` method.
[ "Allows", "to", "override", "population", "mode", "with", "a", "populate", "method", "." ]
python
train
graphistry/pygraphistry
graphistry/plotter.py
https://github.com/graphistry/pygraphistry/blob/3dfc50e60232c6f5fedd6e5fa9d3048b606944b8/graphistry/plotter.py#L249-L269
def settings(self, height=None, url_params={}, render=None): """Specify iframe height and add URL parameter dictionary. The library takes care of URI component encoding for the dictionary. :param height: Height in pixels. :type height: Integer. :param url_params: Dictionary of querystring parameters to append to the URL. :type url_params: Dictionary :param render: Whether to render the visualization using the native notebook environment (default True), or return the visualization URL :type render: Boolean """ res = copy.copy(self) res._height = height or self._height res._url_params = dict(self._url_params, **url_params) res._render = self._render if render == None else render return res
[ "def", "settings", "(", "self", ",", "height", "=", "None", ",", "url_params", "=", "{", "}", ",", "render", "=", "None", ")", ":", "res", "=", "copy", ".", "copy", "(", "self", ")", "res", ".", "_height", "=", "height", "or", "self", ".", "_heig...
Specify iframe height and add URL parameter dictionary. The library takes care of URI component encoding for the dictionary. :param height: Height in pixels. :type height: Integer. :param url_params: Dictionary of querystring parameters to append to the URL. :type url_params: Dictionary :param render: Whether to render the visualization using the native notebook environment (default True), or return the visualization URL :type render: Boolean
[ "Specify", "iframe", "height", "and", "add", "URL", "parameter", "dictionary", "." ]
python
train
PGower/PyCanvas
pycanvas/apis/external_tools.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L254-L575
def create_external_tool_courses(self, name, course_id, consumer_key, privacy_level, shared_secret, account_navigation_enabled=None, account_navigation_selection_height=None, account_navigation_selection_width=None, account_navigation_text=None, account_navigation_url=None, config_type=None, config_url=None, config_xml=None, course_home_sub_navigation_enabled=None, course_home_sub_navigation_icon_url=None, course_home_sub_navigation_text=None, course_home_sub_navigation_url=None, course_navigation_default=None, course_navigation_enabled=None, course_navigation_text=None, course_navigation_visibility=None, course_navigation_windowTarget=None, custom_fields_field_name=None, description=None, domain=None, editor_button_enabled=None, editor_button_icon_url=None, editor_button_message_type=None, editor_button_selection_height=None, editor_button_selection_width=None, editor_button_url=None, homework_submission_enabled=None, homework_submission_message_type=None, homework_submission_text=None, homework_submission_url=None, icon_url=None, link_selection_enabled=None, link_selection_message_type=None, link_selection_text=None, link_selection_url=None, migration_selection_enabled=None, migration_selection_message_type=None, migration_selection_url=None, not_selectable=None, oauth_compliant=None, resource_selection_enabled=None, resource_selection_icon_url=None, resource_selection_selection_height=None, resource_selection_selection_width=None, resource_selection_url=None, text=None, tool_configuration_enabled=None, tool_configuration_message_type=None, tool_configuration_url=None, url=None, user_navigation_enabled=None, user_navigation_text=None, user_navigation_url=None): """ Create an external tool. Create an external tool in the specified course/account. The created tool will be returned, see the "show" endpoint for an example. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - name """The name of the tool""" data["name"] = name # REQUIRED - privacy_level """What information to send to the external tool.""" self._validate_enum(privacy_level, ["anonymous", "name_only", "public"]) data["privacy_level"] = privacy_level # REQUIRED - consumer_key """The consumer key for the external tool""" data["consumer_key"] = consumer_key # REQUIRED - shared_secret """The shared secret with the external tool""" data["shared_secret"] = shared_secret # OPTIONAL - description """A description of the tool""" if description is not None: data["description"] = description # OPTIONAL - url """The url to match links against. Either "url" or "domain" should be set, not both.""" if url is not None: data["url"] = url # OPTIONAL - domain """The domain to match links against. Either "url" or "domain" should be set, not both.""" if domain is not None: data["domain"] = domain # OPTIONAL - icon_url """The url of the icon to show for this tool""" if icon_url is not None: data["icon_url"] = icon_url # OPTIONAL - text """The default text to show for this tool""" if text is not None: data["text"] = text # OPTIONAL - custom_fields[field_name] """Custom fields that will be sent to the tool consumer; can be used multiple times""" if custom_fields_field_name is not None: data["custom_fields[field_name]"] = custom_fields_field_name # OPTIONAL - account_navigation[url] """The url of the external tool for account navigation""" if account_navigation_url is not None: data["account_navigation[url]"] = account_navigation_url # OPTIONAL - account_navigation[enabled] """Set this to enable this feature""" if account_navigation_enabled is not None: data["account_navigation[enabled]"] = account_navigation_enabled # OPTIONAL - account_navigation[text] """The text that will show on the left-tab in the account navigation""" if account_navigation_text is not None: data["account_navigation[text]"] = account_navigation_text # OPTIONAL - account_navigation[selection_width] """The width of the dialog the tool is launched in""" if account_navigation_selection_width is not None: data["account_navigation[selection_width]"] = account_navigation_selection_width # OPTIONAL - account_navigation[selection_height] """The height of the dialog the tool is launched in""" if account_navigation_selection_height is not None: data["account_navigation[selection_height]"] = account_navigation_selection_height # OPTIONAL - user_navigation[url] """The url of the external tool for user navigation""" if user_navigation_url is not None: data["user_navigation[url]"] = user_navigation_url # OPTIONAL - user_navigation[enabled] """Set this to enable this feature""" if user_navigation_enabled is not None: data["user_navigation[enabled]"] = user_navigation_enabled # OPTIONAL - user_navigation[text] """The text that will show on the left-tab in the user navigation""" if user_navigation_text is not None: data["user_navigation[text]"] = user_navigation_text # OPTIONAL - course_home_sub_navigation[url] """The url of the external tool for right-side course home navigation menu""" if course_home_sub_navigation_url is not None: data["course_home_sub_navigation[url]"] = course_home_sub_navigation_url # OPTIONAL - course_home_sub_navigation[enabled] """Set this to enable this feature""" if course_home_sub_navigation_enabled is not None: data["course_home_sub_navigation[enabled]"] = course_home_sub_navigation_enabled # OPTIONAL - course_home_sub_navigation[text] """The text that will show on the right-side course home navigation menu""" if course_home_sub_navigation_text is not None: data["course_home_sub_navigation[text]"] = course_home_sub_navigation_text # OPTIONAL - course_home_sub_navigation[icon_url] """The url of the icon to show in the right-side course home navigation menu""" if course_home_sub_navigation_icon_url is not None: data["course_home_sub_navigation[icon_url]"] = course_home_sub_navigation_icon_url # OPTIONAL - course_navigation[enabled] """Set this to enable this feature""" if course_navigation_enabled is not None: data["course_navigation[enabled]"] = course_navigation_enabled # OPTIONAL - course_navigation[text] """The text that will show on the left-tab in the course navigation""" if course_navigation_text is not None: data["course_navigation[text]"] = course_navigation_text # OPTIONAL - course_navigation[visibility] """Who will see the navigation tab. "admins" for course admins, "members" for students, null for everyone""" if course_navigation_visibility is not None: self._validate_enum(course_navigation_visibility, ["admins", "members"]) data["course_navigation[visibility]"] = course_navigation_visibility # OPTIONAL - course_navigation[windowTarget] """Determines how the navigation tab will be opened. "_blank" Launches the external tool in a new window or tab. "_self" (Default) Launches the external tool in an iframe inside of Canvas.""" if course_navigation_windowTarget is not None: self._validate_enum(course_navigation_windowTarget, ["_blank", "_self"]) data["course_navigation[windowTarget]"] = course_navigation_windowTarget # OPTIONAL - course_navigation[default] """Whether the navigation option will show in the course by default or whether the teacher will have to explicitly enable it""" if course_navigation_default is not None: data["course_navigation[default]"] = course_navigation_default # OPTIONAL - editor_button[url] """The url of the external tool""" if editor_button_url is not None: data["editor_button[url]"] = editor_button_url # OPTIONAL - editor_button[enabled] """Set this to enable this feature""" if editor_button_enabled is not None: data["editor_button[enabled]"] = editor_button_enabled # OPTIONAL - editor_button[icon_url] """The url of the icon to show in the WYSIWYG editor""" if editor_button_icon_url is not None: data["editor_button[icon_url]"] = editor_button_icon_url # OPTIONAL - editor_button[selection_width] """The width of the dialog the tool is launched in""" if editor_button_selection_width is not None: data["editor_button[selection_width]"] = editor_button_selection_width # OPTIONAL - editor_button[selection_height] """The height of the dialog the tool is launched in""" if editor_button_selection_height is not None: data["editor_button[selection_height]"] = editor_button_selection_height # OPTIONAL - editor_button[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if editor_button_message_type is not None: data["editor_button[message_type]"] = editor_button_message_type # OPTIONAL - homework_submission[url] """The url of the external tool""" if homework_submission_url is not None: data["homework_submission[url]"] = homework_submission_url # OPTIONAL - homework_submission[enabled] """Set this to enable this feature""" if homework_submission_enabled is not None: data["homework_submission[enabled]"] = homework_submission_enabled # OPTIONAL - homework_submission[text] """The text that will show on the homework submission tab""" if homework_submission_text is not None: data["homework_submission[text]"] = homework_submission_text # OPTIONAL - homework_submission[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if homework_submission_message_type is not None: data["homework_submission[message_type]"] = homework_submission_message_type # OPTIONAL - link_selection[url] """The url of the external tool""" if link_selection_url is not None: data["link_selection[url]"] = link_selection_url # OPTIONAL - link_selection[enabled] """Set this to enable this feature""" if link_selection_enabled is not None: data["link_selection[enabled]"] = link_selection_enabled # OPTIONAL - link_selection[text] """The text that will show for the link selection text""" if link_selection_text is not None: data["link_selection[text]"] = link_selection_text # OPTIONAL - link_selection[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if link_selection_message_type is not None: data["link_selection[message_type]"] = link_selection_message_type # OPTIONAL - migration_selection[url] """The url of the external tool""" if migration_selection_url is not None: data["migration_selection[url]"] = migration_selection_url # OPTIONAL - migration_selection[enabled] """Set this to enable this feature""" if migration_selection_enabled is not None: data["migration_selection[enabled]"] = migration_selection_enabled # OPTIONAL - migration_selection[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if migration_selection_message_type is not None: data["migration_selection[message_type]"] = migration_selection_message_type # OPTIONAL - tool_configuration[url] """The url of the external tool""" if tool_configuration_url is not None: data["tool_configuration[url]"] = tool_configuration_url # OPTIONAL - tool_configuration[enabled] """Set this to enable this feature""" if tool_configuration_enabled is not None: data["tool_configuration[enabled]"] = tool_configuration_enabled # OPTIONAL - tool_configuration[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if tool_configuration_message_type is not None: data["tool_configuration[message_type]"] = tool_configuration_message_type # OPTIONAL - resource_selection[url] """The url of the external tool""" if resource_selection_url is not None: data["resource_selection[url]"] = resource_selection_url # OPTIONAL - resource_selection[enabled] """Set this to enable this feature""" if resource_selection_enabled is not None: data["resource_selection[enabled]"] = resource_selection_enabled # OPTIONAL - resource_selection[icon_url] """The url of the icon to show in the module external tool list""" if resource_selection_icon_url is not None: data["resource_selection[icon_url]"] = resource_selection_icon_url # OPTIONAL - resource_selection[selection_width] """The width of the dialog the tool is launched in""" if resource_selection_selection_width is not None: data["resource_selection[selection_width]"] = resource_selection_selection_width # OPTIONAL - resource_selection[selection_height] """The height of the dialog the tool is launched in""" if resource_selection_selection_height is not None: data["resource_selection[selection_height]"] = resource_selection_selection_height # OPTIONAL - config_type """Configuration can be passed in as CC xml instead of using query parameters. If this value is "by_url" or "by_xml" then an xml configuration will be expected in either the "config_xml" or "config_url" parameter. Note that the name parameter overrides the tool name provided in the xml""" if config_type is not None: data["config_type"] = config_type # OPTIONAL - config_xml """XML tool configuration, as specified in the CC xml specification. This is required if "config_type" is set to "by_xml"""" if config_xml is not None: data["config_xml"] = config_xml # OPTIONAL - config_url """URL where the server can retrieve an XML tool configuration, as specified in the CC xml specification. This is required if "config_type" is set to "by_url"""" if config_url is not None: data["config_url"] = config_url # OPTIONAL - not_selectable """Default: false, if set to true the tool won't show up in the external tool selection UI in modules and assignments""" if not_selectable is not None: data["not_selectable"] = not_selectable # OPTIONAL - oauth_compliant """Default: false, if set to true LTI query params will not be copied to the post body.""" if oauth_compliant is not None: data["oauth_compliant"] = oauth_compliant self.logger.debug("POST /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True)
[ "def", "create_external_tool_courses", "(", "self", ",", "name", ",", "course_id", ",", "consumer_key", ",", "privacy_level", ",", "shared_secret", ",", "account_navigation_enabled", "=", "None", ",", "account_navigation_selection_height", "=", "None", ",", "account_nav...
Create an external tool. Create an external tool in the specified course/account. The created tool will be returned, see the "show" endpoint for an example.
[ "Create", "an", "external", "tool", ".", "Create", "an", "external", "tool", "in", "the", "specified", "course", "/", "account", ".", "The", "created", "tool", "will", "be", "returned", "see", "the", "show", "endpoint", "for", "an", "example", "." ]
python
train
rootpy/rootpy
rootpy/context.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/context.py#L81-L91
def preserve_batch_state(): """ Context manager which ensures the batch state is the same on exit as it was on entry. """ with LOCK: old = ROOT.gROOT.IsBatch() try: yield finally: ROOT.gROOT.SetBatch(old)
[ "def", "preserve_batch_state", "(", ")", ":", "with", "LOCK", ":", "old", "=", "ROOT", ".", "gROOT", ".", "IsBatch", "(", ")", "try", ":", "yield", "finally", ":", "ROOT", ".", "gROOT", ".", "SetBatch", "(", "old", ")" ]
Context manager which ensures the batch state is the same on exit as it was on entry.
[ "Context", "manager", "which", "ensures", "the", "batch", "state", "is", "the", "same", "on", "exit", "as", "it", "was", "on", "entry", "." ]
python
train
PiotrDabkowski/Js2Py
js2py/legecy_translators/translator.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/legecy_translators/translator.py#L73-L117
def translate_func(name, block, args): """Translates functions and all nested functions to Python code. name - name of that function (global functions will be available under var while inline will be available directly under this name ) block - code of the function (*with* brackets {} ) args - arguments that this function takes""" inline = name.startswith('PyJsLvalInline') real_name = '' if inline: name, real_name = name.split('@') arglist = ', '.join(args) + ', ' if args else '' code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist) # register local variables scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary for arg in args: scope += ', %s:%s' % (repr(arg), arg) if real_name: scope += ', %s:%s' % (repr(real_name), name) code += indent('var = Scope({%s}, var)\n' % scope) block, nested_hoisted, nested_inline = remove_functions(block) py_code, to_register = translate_flow(block) #register variables declared with var and names of hoisted functions. to_register += nested_hoisted.keys() if to_register: code += indent('var.registers(%s)\n' % str(to_register)) for nested_name, info in nested_hoisted.iteritems(): nested_block, nested_args = info new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args) # Now put definition of hoisted function on the top code += indent(new_code) code += indent( 'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name)) code += indent( 'var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name)) for nested_name, info in nested_inline.iteritems(): nested_block, nested_args = info new_code = translate_func(nested_name, nested_block, nested_args) # Inject definitions of inline functions just before usage # nested inline names have this format : LVAL_NAME@REAL_NAME py_code = inject_before_lval(py_code, nested_name.split('@')[0], new_code) if py_code.strip(): code += indent(py_code) return code
[ "def", "translate_func", "(", "name", ",", "block", ",", "args", ")", ":", "inline", "=", "name", ".", "startswith", "(", "'PyJsLvalInline'", ")", "real_name", "=", "''", "if", "inline", ":", "name", ",", "real_name", "=", "name", ".", "split", "(", "'...
Translates functions and all nested functions to Python code. name - name of that function (global functions will be available under var while inline will be available directly under this name ) block - code of the function (*with* brackets {} ) args - arguments that this function takes
[ "Translates", "functions", "and", "all", "nested", "functions", "to", "Python", "code", ".", "name", "-", "name", "of", "that", "function", "(", "global", "functions", "will", "be", "available", "under", "var", "while", "inline", "will", "be", "available", "...
python
valid
liminspace/dju-image
dju_image/image.py
https://github.com/liminspace/dju-image/blob/b06eb3be2069cd6cb52cf1e26c2c761883142d4e/dju_image/image.py#L42-L58
def is_image(f, types=('png', 'jpeg', 'gif'), set_content_type=True): """ Return True if file f is image (types type) and set its correct content_type and filename extension. Example: if is_image(request.FILES['file']): print 'File is image' if is_image(open('/tmp/image.jpeg', 'rb')): print 'File is image' """ assert isinstance(types, (list, tuple)) t = image_get_format(f) if t not in [t.lower() for t in types]: return False if set_content_type: set_uploaded_file_content_type_and_file_ext(f, t) return True
[ "def", "is_image", "(", "f", ",", "types", "=", "(", "'png'", ",", "'jpeg'", ",", "'gif'", ")", ",", "set_content_type", "=", "True", ")", ":", "assert", "isinstance", "(", "types", ",", "(", "list", ",", "tuple", ")", ")", "t", "=", "image_get_forma...
Return True if file f is image (types type) and set its correct content_type and filename extension. Example: if is_image(request.FILES['file']): print 'File is image' if is_image(open('/tmp/image.jpeg', 'rb')): print 'File is image'
[ "Return", "True", "if", "file", "f", "is", "image", "(", "types", "type", ")", "and", "set", "its", "correct", "content_type", "and", "filename", "extension", ".", "Example", ":", "if", "is_image", "(", "request", ".", "FILES", "[", "file", "]", ")", "...
python
train
nugget/python-anthemav
anthemav/connection.py
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/connection.py#L124-L129
def halt(self): """Close the AVR device connection and wait for a resume() request.""" self.log.warning('Halting connection to AVR') self._halted = True if self.protocol.transport: self.protocol.transport.close()
[ "def", "halt", "(", "self", ")", ":", "self", ".", "log", ".", "warning", "(", "'Halting connection to AVR'", ")", "self", ".", "_halted", "=", "True", "if", "self", ".", "protocol", ".", "transport", ":", "self", ".", "protocol", ".", "transport", ".", ...
Close the AVR device connection and wait for a resume() request.
[ "Close", "the", "AVR", "device", "connection", "and", "wait", "for", "a", "resume", "()", "request", "." ]
python
train
spyder-ide/spyder
spyder/utils/vcs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/vcs.py#L52-L57
def get_vcs_info(path): """Return support status dict if path is under VCS root""" for info in SUPPORTED: vcs_path = osp.join(path, info['rootdir']) if osp.isdir(vcs_path): return info
[ "def", "get_vcs_info", "(", "path", ")", ":", "for", "info", "in", "SUPPORTED", ":", "vcs_path", "=", "osp", ".", "join", "(", "path", ",", "info", "[", "'rootdir'", "]", ")", "if", "osp", ".", "isdir", "(", "vcs_path", ")", ":", "return", "info" ]
Return support status dict if path is under VCS root
[ "Return", "support", "status", "dict", "if", "path", "is", "under", "VCS", "root" ]
python
train
ynop/audiomate
audiomate/corpus/io/speech_commands.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/io/speech_commands.py#L41-L61
def _load_folder(folder_entry, corpus): """ Load the given subfolder into the corpus (e.g. bed, one, ...) """ for wav_path in glob.glob(os.path.join(folder_entry.path, '*.wav')): wav_name = os.path.basename(wav_path) basename, __ = os.path.splitext(wav_name) command = folder_entry.name file_idx = '{}_{}'.format(basename, command) issuer_idx = str(basename).split('_', maxsplit=1)[0] corpus.new_file(wav_path, file_idx) if issuer_idx not in corpus.issuers.keys(): corpus.import_issuers(issuers.Speaker( issuer_idx )) utt = corpus.new_utterance(file_idx, file_idx, issuer_idx) labels = annotations.LabelList.create_single(command, idx=audiomate.corpus.LL_WORD_TRANSCRIPT) utt.set_label_list(labels)
[ "def", "_load_folder", "(", "folder_entry", ",", "corpus", ")", ":", "for", "wav_path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "folder_entry", ".", "path", ",", "'*.wav'", ")", ")", ":", "wav_name", "=", "os", ".", "path...
Load the given subfolder into the corpus (e.g. bed, one, ...)
[ "Load", "the", "given", "subfolder", "into", "the", "corpus", "(", "e", ".", "g", ".", "bed", "one", "...", ")" ]
python
train
hkff/FodtlMon
fodtlmon/ltl/ltl.py
https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/ltl/ltl.py#L73-L113
def walk(self, filters: str=None, filter_type: type=None, pprint=False, depth=-1): """ Iterate tree in pre-order wide-first search order :param filters: filter by python expression :param filter_type: Filter by class :return: """ children = self.children() if children is None: children = [] res = [] if depth == 0: return res elif depth != -1: depth -= 1 for child in children: if isinstance(child, Formula): tmp = child.walk(filters=filters, filter_type=filter_type, pprint=pprint, depth=depth) if tmp: res.extend(tmp) if filter_type is None: if filters is not None: if eval(filters) is True: res.append(self) else: res.append(self) elif isinstance(self, filter_type): if filters is not None: if eval(filters) is True: res.append(self) else: res.append(self) if pprint: res = [str(x) + " " for x in res] res = "\n".join(res) return res
[ "def", "walk", "(", "self", ",", "filters", ":", "str", "=", "None", ",", "filter_type", ":", "type", "=", "None", ",", "pprint", "=", "False", ",", "depth", "=", "-", "1", ")", ":", "children", "=", "self", ".", "children", "(", ")", "if", "chil...
Iterate tree in pre-order wide-first search order :param filters: filter by python expression :param filter_type: Filter by class :return:
[ "Iterate", "tree", "in", "pre", "-", "order", "wide", "-", "first", "search", "order" ]
python
train
CivicSpleen/ambry
ambry/library/__init__.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/__init__.py#L655-L712
def _checkin_remote_bundle(self, remote, ref): """ Checkin a remote bundle from a remote :param remote: a Remote object :param ref: Any bundle reference :return: The vid of the loaded bundle """ from ambry.bundle.process import call_interval from ambry.orm.exc import NotFoundError from ambry.orm import Remote from ambry.util.flo import copy_file_or_flo from tempfile import NamedTemporaryFile assert isinstance(remote, Remote) @call_interval(5) def cb(r, total): self.logger.info("{}: Downloaded {} bytes".format(ref, total)) b = None try: b = self.bundle(ref) self.logger.info("{}: Already installed".format(ref)) vid = b.identity.vid except NotFoundError: self.logger.info("{}: Syncing".format(ref)) db_dir = self.filesystem.downloads('bundles') db_f = os.path.join(db_dir, ref) #FIXME. Could get multiple versions of same file. ie vid and vname if not os.path.exists(os.path.join(db_dir, db_f)): self.logger.info("Downloading bundle '{}' to '{}".format(ref, db_f)) with open(db_f, 'wb') as f_out: with remote.checkout(ref) as f: copy_file_or_flo(f, f_out, cb=cb) f_out.flush() self.checkin_bundle(db_f) b = self.bundle(ref) # Should exist now. b.dataset.data['remote_name'] = remote.short_name b.dataset.upstream = remote.url b.dstate = b.STATES.CHECKEDOUT b.commit() finally: if b: b.progress.close() vid = b.identity.vid return vid
[ "def", "_checkin_remote_bundle", "(", "self", ",", "remote", ",", "ref", ")", ":", "from", "ambry", ".", "bundle", ".", "process", "import", "call_interval", "from", "ambry", ".", "orm", ".", "exc", "import", "NotFoundError", "from", "ambry", ".", "orm", "...
Checkin a remote bundle from a remote :param remote: a Remote object :param ref: Any bundle reference :return: The vid of the loaded bundle
[ "Checkin", "a", "remote", "bundle", "from", "a", "remote", ":", "param", "remote", ":", "a", "Remote", "object", ":", "param", "ref", ":", "Any", "bundle", "reference", ":", "return", ":", "The", "vid", "of", "the", "loaded", "bundle" ]
python
train
srevenant/dictlib
dictlib/__init__.py
https://github.com/srevenant/dictlib/blob/88d743aa897d9c2c6de3c405522f9de3ba2aa869/dictlib/__init__.py#L116-L127
def dug(obj, key, value): """ Inverse of dig: recursively set a value in a dictionary, using dot notation. >>> test = {"a":{"b":{"c":1}}} >>> dug(test, "a.b.c", 10) >>> test {'a': {'b': {'c': 10}}} """ array = key.split(".") return _dug(obj, value, *array)
[ "def", "dug", "(", "obj", ",", "key", ",", "value", ")", ":", "array", "=", "key", ".", "split", "(", "\".\"", ")", "return", "_dug", "(", "obj", ",", "value", ",", "*", "array", ")" ]
Inverse of dig: recursively set a value in a dictionary, using dot notation. >>> test = {"a":{"b":{"c":1}}} >>> dug(test, "a.b.c", 10) >>> test {'a': {'b': {'c': 10}}}
[ "Inverse", "of", "dig", ":", "recursively", "set", "a", "value", "in", "a", "dictionary", "using", "dot", "notation", "." ]
python
train
galaxy-genome-annotation/python-apollo
apollo/annotations/__init__.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/annotations/__init__.py#L130-L158
def set_symbol(self, feature_id, symbol, organism=None, sequence=None): """ Set a feature's description :type feature_id: str :param feature_id: Feature UUID :type symbol: str :param symbol: Feature symbol :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """ data = { 'features': [ { 'uniquename': feature_id, 'symbol': symbol, } ], } data = self._update_data(data, organism, sequence) return self.post('setSymbol', data)
[ "def", "set_symbol", "(", "self", ",", "feature_id", ",", "symbol", ",", "organism", "=", "None", ",", "sequence", "=", "None", ")", ":", "data", "=", "{", "'features'", ":", "[", "{", "'uniquename'", ":", "feature_id", ",", "'symbol'", ":", "symbol", ...
Set a feature's description :type feature_id: str :param feature_id: Feature UUID :type symbol: str :param symbol: Feature symbol :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]})
[ "Set", "a", "feature", "s", "description" ]
python
train
numenta/htmresearch
projects/nik/nik_htm.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/nik/nik_htm.py#L276-L281
def save(self, filename="temp.pkl"): """ Save TM in the filename specified above """ output = open(filename, 'wb') cPickle.dump(self.tm, output, protocol=cPickle.HIGHEST_PROTOCOL)
[ "def", "save", "(", "self", ",", "filename", "=", "\"temp.pkl\"", ")", ":", "output", "=", "open", "(", "filename", ",", "'wb'", ")", "cPickle", ".", "dump", "(", "self", ".", "tm", ",", "output", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL...
Save TM in the filename specified above
[ "Save", "TM", "in", "the", "filename", "specified", "above" ]
python
train
haikuginger/beekeeper
beekeeper/hive.py
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/hive.py#L74-L82
def from_version(self, version, require_https=False): """ Create a Hive object based on the information in the object and the version passed into the method. """ if version is None or self.version() == version: return self else: return Hive.from_url(self.get_version_url(version), require_https=require_https)
[ "def", "from_version", "(", "self", ",", "version", ",", "require_https", "=", "False", ")", ":", "if", "version", "is", "None", "or", "self", ".", "version", "(", ")", "==", "version", ":", "return", "self", "else", ":", "return", "Hive", ".", "from_u...
Create a Hive object based on the information in the object and the version passed into the method.
[ "Create", "a", "Hive", "object", "based", "on", "the", "information", "in", "the", "object", "and", "the", "version", "passed", "into", "the", "method", "." ]
python
train
manns/pyspread
pyspread/src/lib/charts.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/charts.py#L56-L68
def object2code(key, code): """Returns code for widget from dict object""" if key in ["xscale", "yscale"]: if code == "log": code = True else: code = False else: code = unicode(code) return code
[ "def", "object2code", "(", "key", ",", "code", ")", ":", "if", "key", "in", "[", "\"xscale\"", ",", "\"yscale\"", "]", ":", "if", "code", "==", "\"log\"", ":", "code", "=", "True", "else", ":", "code", "=", "False", "else", ":", "code", "=", "unico...
Returns code for widget from dict object
[ "Returns", "code", "for", "widget", "from", "dict", "object" ]
python
train
ralphje/imagemounter
imagemounter/volume_system.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L68-L76
def _make_subvolume(self, **args): """Creates a subvolume, adds it to this class and returns it.""" from imagemounter.volume import Volume v = Volume(disk=self.disk, parent=self.parent, volume_detector=self.volume_detector, **args) # vstype is not passed down, let it decide for itself. self.volumes.append(v) return v
[ "def", "_make_subvolume", "(", "self", ",", "*", "*", "args", ")", ":", "from", "imagemounter", ".", "volume", "import", "Volume", "v", "=", "Volume", "(", "disk", "=", "self", ".", "disk", ",", "parent", "=", "self", ".", "parent", ",", "volume_detect...
Creates a subvolume, adds it to this class and returns it.
[ "Creates", "a", "subvolume", "adds", "it", "to", "this", "class", "and", "returns", "it", "." ]
python
train
vbwagner/ctypescrypto
ctypescrypto/cms.py
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/cms.py#L131-L155
def create(data, cert, pkey, flags=Flags.BINARY, certs=None): """ Creates SignedData message by signing data with pkey and certificate. @param data - data to sign @param cert - signer's certificate @param pkey - pkey object with private key to sign @param flags - OReed combination of Flags constants @param certs - list of X509 objects to include into CMS """ if not pkey.cansign: raise ValueError("Specified keypair has no private part") if cert.pubkey != pkey: raise ValueError("Certificate doesn't match public key") bio = Membio(data) if certs is not None and len(certs) > 0: certstack_obj = StackOfX509(certs) # keep reference to prevent immediate __del__ call certstack = certstack_obj.ptr else: certstack = None ptr = libcrypto.CMS_sign(cert.cert, pkey.key, certstack, bio.bio, flags) if ptr is None: raise CMSError("signing message") return SignedData(ptr)
[ "def", "create", "(", "data", ",", "cert", ",", "pkey", ",", "flags", "=", "Flags", ".", "BINARY", ",", "certs", "=", "None", ")", ":", "if", "not", "pkey", ".", "cansign", ":", "raise", "ValueError", "(", "\"Specified keypair has no private part\"", ")", ...
Creates SignedData message by signing data with pkey and certificate. @param data - data to sign @param cert - signer's certificate @param pkey - pkey object with private key to sign @param flags - OReed combination of Flags constants @param certs - list of X509 objects to include into CMS
[ "Creates", "SignedData", "message", "by", "signing", "data", "with", "pkey", "and", "certificate", "." ]
python
train
ethereum/web3.py
ens/main.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/ens/main.py#L96-L130
def setup_address(self, name, address=default, transact={}): """ Set up the name to point to the supplied address. The sender of the transaction must own the name, or its parent name. Example: If the caller owns ``parentname.eth`` with no subdomains and calls this method with ``sub.parentname.eth``, then ``sub`` will be created as part of this call. :param str name: ENS name to set up :param str address: name will point to this address, in checksum format. If ``None``, erase the record. If not specified, name will point to the owner's address. :param dict transact: the transaction configuration, like in :meth:`~web3.eth.Eth.sendTransaction` :raises InvalidName: if ``name`` has invalid syntax :raises UnauthorizedError: if ``'from'`` in `transact` does not own `name` """ owner = self.setup_owner(name, transact=transact) self._assert_control(owner, name) if is_none_or_zero_address(address): address = None elif address is default: address = owner elif is_binary_address(address): address = to_checksum_address(address) elif not is_checksum_address(address): raise ValueError("You must supply the address in checksum format") if self.address(name) == address: return None if address is None: address = EMPTY_ADDR_HEX transact['from'] = owner resolver = self._set_resolver(name, transact=transact) return resolver.functions.setAddr(raw_name_to_hash(name), address).transact(transact)
[ "def", "setup_address", "(", "self", ",", "name", ",", "address", "=", "default", ",", "transact", "=", "{", "}", ")", ":", "owner", "=", "self", ".", "setup_owner", "(", "name", ",", "transact", "=", "transact", ")", "self", ".", "_assert_control", "(...
Set up the name to point to the supplied address. The sender of the transaction must own the name, or its parent name. Example: If the caller owns ``parentname.eth`` with no subdomains and calls this method with ``sub.parentname.eth``, then ``sub`` will be created as part of this call. :param str name: ENS name to set up :param str address: name will point to this address, in checksum format. If ``None``, erase the record. If not specified, name will point to the owner's address. :param dict transact: the transaction configuration, like in :meth:`~web3.eth.Eth.sendTransaction` :raises InvalidName: if ``name`` has invalid syntax :raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
[ "Set", "up", "the", "name", "to", "point", "to", "the", "supplied", "address", ".", "The", "sender", "of", "the", "transaction", "must", "own", "the", "name", "or", "its", "parent", "name", "." ]
python
train
zarr-developers/zarr
zarr/indexing.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/indexing.py#L477-L493
def ix_(selection, shape): """Convert an orthogonal selection to a numpy advanced (fancy) selection, like numpy.ix_ but with support for slices and single ints.""" # normalisation selection = replace_ellipsis(selection, shape) # replace slice and int as these are not supported by numpy.ix_ selection = [slice_to_range(dim_sel, dim_len) if isinstance(dim_sel, slice) else [dim_sel] if is_integer(dim_sel) else dim_sel for dim_sel, dim_len in zip(selection, shape)] # now get numpy to convert to a coordinate selection selection = np.ix_(*selection) return selection
[ "def", "ix_", "(", "selection", ",", "shape", ")", ":", "# normalisation", "selection", "=", "replace_ellipsis", "(", "selection", ",", "shape", ")", "# replace slice and int as these are not supported by numpy.ix_", "selection", "=", "[", "slice_to_range", "(", "dim_se...
Convert an orthogonal selection to a numpy advanced (fancy) selection, like numpy.ix_ but with support for slices and single ints.
[ "Convert", "an", "orthogonal", "selection", "to", "a", "numpy", "advanced", "(", "fancy", ")", "selection", "like", "numpy", ".", "ix_", "but", "with", "support", "for", "slices", "and", "single", "ints", "." ]
python
train
acrisci/i3ipc-python
i3ipc/i3ipc.py
https://github.com/acrisci/i3ipc-python/blob/243d353434cdd2a93a9ca917c6bbf07b865c39af/i3ipc/i3ipc.py#L453-L460
def _ipc_send(self, sock, message_type, payload): ''' Send and receive a message from the ipc. NOTE: this is not thread safe ''' sock.sendall(self._pack(message_type, payload)) data, msg_type = self._ipc_recv(sock) return data
[ "def", "_ipc_send", "(", "self", ",", "sock", ",", "message_type", ",", "payload", ")", ":", "sock", ".", "sendall", "(", "self", ".", "_pack", "(", "message_type", ",", "payload", ")", ")", "data", ",", "msg_type", "=", "self", ".", "_ipc_recv", "(", ...
Send and receive a message from the ipc. NOTE: this is not thread safe
[ "Send", "and", "receive", "a", "message", "from", "the", "ipc", ".", "NOTE", ":", "this", "is", "not", "thread", "safe" ]
python
train
loli/medpy
bin/medpy_grid.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/bin/medpy_grid.py#L122-L144
def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() # get the number of dimensions in the image if args.example: args.example_image, args.example_header = load(args.example) dimensions = args.example_image.ndim else: dimensions = len(args.shape) # check and, if required, modify the spacing argument if isinstance(args.spacing, int): args.spacing = [args.spacing] * dimensions elif len(args.spacing) != dimensions: raise argparse.ArgumentTypeError('the grid spacing ({}) must contain the same number of elements as the output image has dimensions ({})'.format(','.join(map(str, args.spacing)), dimensions)) # check further arguments if args.offset and len(args.offset) != dimensions: raise argparse.ArgumentTypeError('the offset ({}) must contain the same number of elements as the output image has dimensions ({})'.format(','.join(map(str, args.offset)), dimensions)) if args.pixelspacing and len(args.pixelspacing) != dimensions: raise argparse.ArgumentTypeError('the supplied pixel spacing ({}) must contain the same number of elements as the output image has dimensions ({})'.format(','.join(map(str, args.pixelspacing)), dimensions)) return args
[ "def", "getArguments", "(", "parser", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "# get the number of dimensions in the image", "if", "args", ".", "example", ":", "args", ".", "example_image", ",", "args", ".", "example_header", "=", "load", ...
Provides additional validation of the arguments collected by argparse.
[ "Provides", "additional", "validation", "of", "the", "arguments", "collected", "by", "argparse", "." ]
python
train
SuperCowPowers/workbench
workbench/workers/pcap_graph.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/pcap_graph.py#L33-L37
def add_node(self, node_id, name, labels): ''' Cache aware add_node ''' if node_id not in self.node_cache: self.workbench.add_node(node_id, name, labels) self.node_cache.add(node_id)
[ "def", "add_node", "(", "self", ",", "node_id", ",", "name", ",", "labels", ")", ":", "if", "node_id", "not", "in", "self", ".", "node_cache", ":", "self", ".", "workbench", ".", "add_node", "(", "node_id", ",", "name", ",", "labels", ")", "self", "....
Cache aware add_node
[ "Cache", "aware", "add_node" ]
python
train
scanny/python-pptx
pptx/oxml/table.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/table.py#L198-L205
def anchor(self, anchor_enum_idx): """ Set value of anchor attribute on ``<a:tcPr>`` child element """ if anchor_enum_idx is None and self.tcPr is None: return tcPr = self.get_or_add_tcPr() tcPr.anchor = anchor_enum_idx
[ "def", "anchor", "(", "self", ",", "anchor_enum_idx", ")", ":", "if", "anchor_enum_idx", "is", "None", "and", "self", ".", "tcPr", "is", "None", ":", "return", "tcPr", "=", "self", ".", "get_or_add_tcPr", "(", ")", "tcPr", ".", "anchor", "=", "anchor_enu...
Set value of anchor attribute on ``<a:tcPr>`` child element
[ "Set", "value", "of", "anchor", "attribute", "on", "<a", ":", "tcPr", ">", "child", "element" ]
python
train
ANTsX/ANTsPy
ants/core/ants_transform.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/core/ants_transform.py#L79-L85
def set_fixed_parameters(self, parameters): """ Set parameters of transform """ if not isinstance(parameters, np.ndarray): parameters = np.asarray(parameters) libfn = utils.get_lib_fn('setTransformFixedParameters%s'%self._libsuffix) libfn(self.pointer, parameters.tolist())
[ "def", "set_fixed_parameters", "(", "self", ",", "parameters", ")", ":", "if", "not", "isinstance", "(", "parameters", ",", "np", ".", "ndarray", ")", ":", "parameters", "=", "np", ".", "asarray", "(", "parameters", ")", "libfn", "=", "utils", ".", "get_...
Set parameters of transform
[ "Set", "parameters", "of", "transform" ]
python
train
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L10299-L10306
def _doc_parms(cls): """Return a tuple of the doc parms.""" axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)) name = (cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else 'scalar') name2 = cls.__name__ return axis_descr, name, name2
[ "def", "_doc_parms", "(", "cls", ")", ":", "axis_descr", "=", "\"{%s}\"", "%", "', '", ".", "join", "(", "\"{0} ({1})\"", ".", "format", "(", "a", ",", "i", ")", "for", "i", ",", "a", "in", "enumerate", "(", "cls", ".", "_AXIS_ORDERS", ")", ")", "n...
Return a tuple of the doc parms.
[ "Return", "a", "tuple", "of", "the", "doc", "parms", "." ]
python
train
apache/spark
python/pyspark/streaming/dstream.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L409-L416
def slice(self, begin, end): """ Return all the RDDs between 'begin' to 'end' (both included) `begin`, `end` could be datetime.datetime() or unix_timestamp """ jrdds = self._jdstream.slice(self._jtime(begin), self._jtime(end)) return [RDD(jrdd, self._sc, self._jrdd_deserializer) for jrdd in jrdds]
[ "def", "slice", "(", "self", ",", "begin", ",", "end", ")", ":", "jrdds", "=", "self", ".", "_jdstream", ".", "slice", "(", "self", ".", "_jtime", "(", "begin", ")", ",", "self", ".", "_jtime", "(", "end", ")", ")", "return", "[", "RDD", "(", "...
Return all the RDDs between 'begin' to 'end' (both included) `begin`, `end` could be datetime.datetime() or unix_timestamp
[ "Return", "all", "the", "RDDs", "between", "begin", "to", "end", "(", "both", "included", ")" ]
python
train
wummel/linkchecker
linkcheck/plugins/markdowncheck.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/plugins/markdowncheck.py#L84-L89
def read_config(cls, configparser): """Read configuration file options.""" config = dict() config[cls._filename_re_key] = configparser.get(cls.__name__, cls._filename_re_key) \ if configparser.has_option(cls.__name__, cls._filename_re_key) else None return config
[ "def", "read_config", "(", "cls", ",", "configparser", ")", ":", "config", "=", "dict", "(", ")", "config", "[", "cls", ".", "_filename_re_key", "]", "=", "configparser", ".", "get", "(", "cls", ".", "__name__", ",", "cls", ".", "_filename_re_key", ")", ...
Read configuration file options.
[ "Read", "configuration", "file", "options", "." ]
python
train
inveniosoftware/invenio-webhooks
invenio_webhooks/signatures.py
https://github.com/inveniosoftware/invenio-webhooks/blob/f407cb2245464543ee474a81189fb9d3978bdde5/invenio_webhooks/signatures.py#L33-L44
def get_hmac(message): """Calculate HMAC value of message using ``WEBHOOKS_SECRET_KEY``. :param message: String to calculate HMAC for. """ key = current_app.config['WEBHOOKS_SECRET_KEY'] hmac_value = hmac.new( key.encode('utf-8') if hasattr(key, 'encode') else key, message.encode('utf-8') if hasattr(message, 'encode') else message, sha1 ).hexdigest() return hmac_value
[ "def", "get_hmac", "(", "message", ")", ":", "key", "=", "current_app", ".", "config", "[", "'WEBHOOKS_SECRET_KEY'", "]", "hmac_value", "=", "hmac", ".", "new", "(", "key", ".", "encode", "(", "'utf-8'", ")", "if", "hasattr", "(", "key", ",", "'encode'",...
Calculate HMAC value of message using ``WEBHOOKS_SECRET_KEY``. :param message: String to calculate HMAC for.
[ "Calculate", "HMAC", "value", "of", "message", "using", "WEBHOOKS_SECRET_KEY", "." ]
python
train
apache/incubator-mxnet
python/mxnet/context.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/context.py#L244-L259
def num_gpus(): """Query CUDA for the number of GPUs present. Raises ------ Will raise an exception on any CUDA error. Returns ------- count : int The number of GPUs. """ count = ctypes.c_int() check_call(_LIB.MXGetGPUCount(ctypes.byref(count))) return count.value
[ "def", "num_gpus", "(", ")", ":", "count", "=", "ctypes", ".", "c_int", "(", ")", "check_call", "(", "_LIB", ".", "MXGetGPUCount", "(", "ctypes", ".", "byref", "(", "count", ")", ")", ")", "return", "count", ".", "value" ]
Query CUDA for the number of GPUs present. Raises ------ Will raise an exception on any CUDA error. Returns ------- count : int The number of GPUs.
[ "Query", "CUDA", "for", "the", "number", "of", "GPUs", "present", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/gallery/gallery_client.py#L434-L450
def create_draft_for_edit_extension(self, publisher_name, extension_name): """CreateDraftForEditExtension. [Preview API] :param str publisher_name: :param str extension_name: :rtype: :class:`<ExtensionDraft> <azure.devops.v5_0.gallery.models.ExtensionDraft>` """ route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') response = self._send(http_method='POST', location_id='02b33873-4e61-496e-83a2-59d1df46b7d8', version='5.0-preview.1', route_values=route_values) return self._deserialize('ExtensionDraft', response)
[ "def", "create_draft_for_edit_extension", "(", "self", ",", "publisher_name", ",", "extension_name", ")", ":", "route_values", "=", "{", "}", "if", "publisher_name", "is", "not", "None", ":", "route_values", "[", "'publisherName'", "]", "=", "self", ".", "_seria...
CreateDraftForEditExtension. [Preview API] :param str publisher_name: :param str extension_name: :rtype: :class:`<ExtensionDraft> <azure.devops.v5_0.gallery.models.ExtensionDraft>`
[ "CreateDraftForEditExtension", ".", "[", "Preview", "API", "]", ":", "param", "str", "publisher_name", ":", ":", "param", "str", "extension_name", ":", ":", "rtype", ":", ":", "class", ":", "<ExtensionDraft", ">", "<azure", ".", "devops", ".", "v5_0", ".", ...
python
train
Nike-Inc/cerberus-python-client
cerberus/client.py
https://github.com/Nike-Inc/cerberus-python-client/blob/ef38356822e722fcb6a6ed4a1b38a5b493e753ae/cerberus/client.py#L371-L377
def _parse_metadata_filename(self, metadata): """ Parse the header metadata to pull out the filename and then store it under the key 'filename' """ index = metadata['Content-Disposition'].index('=')+1 metadata['filename'] = metadata['Content-Disposition'][index:].replace('"', '') return metadata
[ "def", "_parse_metadata_filename", "(", "self", ",", "metadata", ")", ":", "index", "=", "metadata", "[", "'Content-Disposition'", "]", ".", "index", "(", "'='", ")", "+", "1", "metadata", "[", "'filename'", "]", "=", "metadata", "[", "'Content-Disposition'", ...
Parse the header metadata to pull out the filename and then store it under the key 'filename'
[ "Parse", "the", "header", "metadata", "to", "pull", "out", "the", "filename", "and", "then", "store", "it", "under", "the", "key", "filename" ]
python
train
IdentityPython/pysaml2
src/saml2/mdbcache.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mdbcache.py#L158-L172
def active(self, subject_id, entity_id): """ Returns the status of assertions from a specific entity_id. :param subject_id: The ID of the subject :param entity_id: The entity ID of the entity_id of the assertion :return: True or False depending on if the assertion is still valid or not. """ item = self._cache.find_one({"subject_id": subject_id, "entity_id": entity_id}) try: return time_util.not_on_or_after(item["timestamp"]) except ToOld: return False
[ "def", "active", "(", "self", ",", "subject_id", ",", "entity_id", ")", ":", "item", "=", "self", ".", "_cache", ".", "find_one", "(", "{", "\"subject_id\"", ":", "subject_id", ",", "\"entity_id\"", ":", "entity_id", "}", ")", "try", ":", "return", "time...
Returns the status of assertions from a specific entity_id. :param subject_id: The ID of the subject :param entity_id: The entity ID of the entity_id of the assertion :return: True or False depending on if the assertion is still valid or not.
[ "Returns", "the", "status", "of", "assertions", "from", "a", "specific", "entity_id", "." ]
python
train
fred49/linshare-api
linshareapi/user/documents.py
https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/user/documents.py#L63-L70
def get(self, uuid): """ Get one document store into LinShare.""" #return self.core.get("documents/" + uuid) documents = (v for v in self.list() if v.get('uuid') == uuid) for i in documents: self.log.debug(i) return i return None
[ "def", "get", "(", "self", ",", "uuid", ")", ":", "#return self.core.get(\"documents/\" + uuid)", "documents", "=", "(", "v", "for", "v", "in", "self", ".", "list", "(", ")", "if", "v", ".", "get", "(", "'uuid'", ")", "==", "uuid", ")", "for", "i", "...
Get one document store into LinShare.
[ "Get", "one", "document", "store", "into", "LinShare", "." ]
python
train
hellosign/hellosign-python-sdk
hellosign_sdk/utils/request.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/utils/request.py#L214-L242
def _check_error(self, response, json_response=None): ''' Check for HTTP error code from the response, raise exception if there's any Args: response (object): Object returned by requests' `get` and `post` methods json_response (dict): JSON response, if applicable Raises: HTTPError: If the status code of response is either 4xx or 5xx Returns: True if status code is not error code ''' # If status code is 4xx or 5xx, that should be an error if response.status_code >= 400: json_response = json_response or self._get_json_response(response) err_cls = self._check_http_error_code(response.status_code) try: raise err_cls("%s error: %s" % (response.status_code, json_response["error"]["error_msg"]), response.status_code) # This is to catch error when we post get oauth data except TypeError: raise err_cls("%s error: %s" % (response.status_code, json_response["error_description"]), response.status_code) # Return True if everything is OK return True
[ "def", "_check_error", "(", "self", ",", "response", ",", "json_response", "=", "None", ")", ":", "# If status code is 4xx or 5xx, that should be an error", "if", "response", ".", "status_code", ">=", "400", ":", "json_response", "=", "json_response", "or", "self", ...
Check for HTTP error code from the response, raise exception if there's any Args: response (object): Object returned by requests' `get` and `post` methods json_response (dict): JSON response, if applicable Raises: HTTPError: If the status code of response is either 4xx or 5xx Returns: True if status code is not error code
[ "Check", "for", "HTTP", "error", "code", "from", "the", "response", "raise", "exception", "if", "there", "s", "any" ]
python
train
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/cli.py
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L151-L178
def check(): """Command for checking upgrades.""" upgrader = InvenioUpgrader() logger = upgrader.get_logger() try: # Run upgrade pre-checks upgrades = upgrader.get_upgrades() # Check if there's anything to upgrade if not upgrades: logger.info("All upgrades have been applied.") return logger.info("Following upgrade(s) have not been applied yet:") for u in upgrades: logger.info( " * {0} {1}".format(u.name, u.info)) logger.info("Running pre-upgrade checks...") upgrader.pre_upgrade_checks(upgrades) logger.info("Upgrade check successful - estimated time for upgrading" " Invenio is %s..." % upgrader.human_estimate(upgrades)) except RuntimeError as e: for msg in e.args: logger.error(unicode(msg)) logger.error("Upgrade check failed. Aborting.") raise
[ "def", "check", "(", ")", ":", "upgrader", "=", "InvenioUpgrader", "(", ")", "logger", "=", "upgrader", ".", "get_logger", "(", ")", "try", ":", "# Run upgrade pre-checks", "upgrades", "=", "upgrader", ".", "get_upgrades", "(", ")", "# Check if there's anything ...
Command for checking upgrades.
[ "Command", "for", "checking", "upgrades", "." ]
python
train
maas/python-libmaas
maas/client/utils/profiles.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/profiles.py#L82-L90
def replace(self, **updates): """Return a new profile with the given updates. Unspecified fields will be the same as this instance. See `__new__` for details on the arguments. """ state = self.dump() state.update(updates) return self.__class__(**state)
[ "def", "replace", "(", "self", ",", "*", "*", "updates", ")", ":", "state", "=", "self", ".", "dump", "(", ")", "state", ".", "update", "(", "updates", ")", "return", "self", ".", "__class__", "(", "*", "*", "state", ")" ]
Return a new profile with the given updates. Unspecified fields will be the same as this instance. See `__new__` for details on the arguments.
[ "Return", "a", "new", "profile", "with", "the", "given", "updates", "." ]
python
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/network.py
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/network.py#L91-L98
def _set_default_vertex_attributes(self) -> None: """Assign default values on attributes to all vertices.""" self.graph.vs["l2fc"] = 0 self.graph.vs["padj"] = 0.5 self.graph.vs["symbol"] = self.graph.vs["name"] self.graph.vs["diff_expressed"] = False self.graph.vs["up_regulated"] = False self.graph.vs["down_regulated"] = False
[ "def", "_set_default_vertex_attributes", "(", "self", ")", "->", "None", ":", "self", ".", "graph", ".", "vs", "[", "\"l2fc\"", "]", "=", "0", "self", ".", "graph", ".", "vs", "[", "\"padj\"", "]", "=", "0.5", "self", ".", "graph", ".", "vs", "[", ...
Assign default values on attributes to all vertices.
[ "Assign", "default", "values", "on", "attributes", "to", "all", "vertices", "." ]
python
train
inveniosoftware/invenio-celery
invenio_celery/ext.py
https://github.com/inveniosoftware/invenio-celery/blob/4d075d5dbdb7ee849abdb0c8d7e7a49cb7973474/invenio_celery/ext.py#L91-L95
def get_active_tasks(self): """Return a list of UUIDs of active tasks.""" current_tasks = self.celery.control.inspect().active() or dict() return [ task.get('id') for host in current_tasks.values() for task in host]
[ "def", "get_active_tasks", "(", "self", ")", ":", "current_tasks", "=", "self", ".", "celery", ".", "control", ".", "inspect", "(", ")", ".", "active", "(", ")", "or", "dict", "(", ")", "return", "[", "task", ".", "get", "(", "'id'", ")", "for", "h...
Return a list of UUIDs of active tasks.
[ "Return", "a", "list", "of", "UUIDs", "of", "active", "tasks", "." ]
python
train
royi1000/py-libhdate
hdate/date.py
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/date.py#L117-L121
def gdate(self): """Return the Gregorian date for the given Hebrew date object.""" if self._last_updated == "gdate": return self._gdate return conv.jdn_to_gdate(self._jdn)
[ "def", "gdate", "(", "self", ")", ":", "if", "self", ".", "_last_updated", "==", "\"gdate\"", ":", "return", "self", ".", "_gdate", "return", "conv", ".", "jdn_to_gdate", "(", "self", ".", "_jdn", ")" ]
Return the Gregorian date for the given Hebrew date object.
[ "Return", "the", "Gregorian", "date", "for", "the", "given", "Hebrew", "date", "object", "." ]
python
train
cdgriffith/Reusables
reusables/cli.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L59-L69
def popd(): """Go back to where you once were. :return: saved directory stack """ try: directory = _saved_paths.pop(0) except IndexError: return [os.getcwd()] os.chdir(directory) return [directory] + _saved_paths
[ "def", "popd", "(", ")", ":", "try", ":", "directory", "=", "_saved_paths", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "return", "[", "os", ".", "getcwd", "(", ")", "]", "os", ".", "chdir", "(", "directory", ")", "return", "[", "direct...
Go back to where you once were. :return: saved directory stack
[ "Go", "back", "to", "where", "you", "once", "were", "." ]
python
train
bitprophet/ssh
ssh/transport.py
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L847-L863
def send_ignore(self, bytes=None): """ Send a junk packet across the encrypted link. This is sometimes used to add "noise" to a connection to confuse would-be attackers. It can also be used as a keep-alive for long lived connections traversing firewalls. @param bytes: the number of random bytes to send in the payload of the ignored packet -- defaults to a random number from 10 to 41. @type bytes: int """ m = Message() m.add_byte(chr(MSG_IGNORE)) if bytes is None: bytes = (ord(rng.read(1)) % 32) + 10 m.add_bytes(rng.read(bytes)) self._send_user_message(m)
[ "def", "send_ignore", "(", "self", ",", "bytes", "=", "None", ")", ":", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "chr", "(", "MSG_IGNORE", ")", ")", "if", "bytes", "is", "None", ":", "bytes", "=", "(", "ord", "(", "rng", ".", "...
Send a junk packet across the encrypted link. This is sometimes used to add "noise" to a connection to confuse would-be attackers. It can also be used as a keep-alive for long lived connections traversing firewalls. @param bytes: the number of random bytes to send in the payload of the ignored packet -- defaults to a random number from 10 to 41. @type bytes: int
[ "Send", "a", "junk", "packet", "across", "the", "encrypted", "link", ".", "This", "is", "sometimes", "used", "to", "add", "noise", "to", "a", "connection", "to", "confuse", "would", "-", "be", "attackers", ".", "It", "can", "also", "be", "used", "as", ...
python
train
LudovicRousseau/pyscard
smartcard/pcsc/PCSCCardConnection.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/pcsc/PCSCCardConnection.py#L33-L45
def translateprotocolmask(protocol): """Translate CardConnection protocol mask into PCSC protocol mask.""" pcscprotocol = 0 if None != protocol: if CardConnection.T0_protocol & protocol: pcscprotocol |= SCARD_PROTOCOL_T0 if CardConnection.T1_protocol & protocol: pcscprotocol |= SCARD_PROTOCOL_T1 if CardConnection.RAW_protocol & protocol: pcscprotocol |= SCARD_PROTOCOL_RAW if CardConnection.T15_protocol & protocol: pcscprotocol |= SCARD_PROTOCOL_T15 return pcscprotocol
[ "def", "translateprotocolmask", "(", "protocol", ")", ":", "pcscprotocol", "=", "0", "if", "None", "!=", "protocol", ":", "if", "CardConnection", ".", "T0_protocol", "&", "protocol", ":", "pcscprotocol", "|=", "SCARD_PROTOCOL_T0", "if", "CardConnection", ".", "T...
Translate CardConnection protocol mask into PCSC protocol mask.
[ "Translate", "CardConnection", "protocol", "mask", "into", "PCSC", "protocol", "mask", "." ]
python
train
Miserlou/Zappa
example/authmodule.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/example/authmodule.py#L15-L61
def lambda_handler(event, context): print("Client token: " + event['authorizationToken']) print("Method ARN: " + event['methodArn']) """validate the incoming token""" """and produce the principal user identifier associated with the token""" """this could be accomplished in a number of ways:""" """1. Call out to OAuth provider""" """2. Decode a JWT token inline""" """3. Lookup in a self-managed DB""" principalId = "user|a1b2c3d4" """you can send a 401 Unauthorized response to the client by failing like so:""" """raise Exception('Unauthorized')""" """if the token is valid, a policy must be generated which will allow or deny access to the client""" """if access is denied, the client will receive a 403 Access Denied response""" """if access is allowed, API Gateway will proceed with the backend integration configured on the method that was called""" """this function must generate a policy that is associated with the recognized principal user identifier.""" """depending on your use case, you might store policies in a DB, or generate them on the fly""" """keep in mind, the policy is cached for 5 minutes by default (TTL is configurable in the authorizer)""" """and will apply to subsequent calls to any method/resource in the RestApi""" """made with the same token""" """the example policy below denies access to all resources in the RestApi""" tmp = event['methodArn'].split(':') apiGatewayArnTmp = tmp[5].split('/') awsAccountId = tmp[4] policy = AuthPolicy(principalId, awsAccountId) policy.restApiId = apiGatewayArnTmp[0] policy.region = tmp[3] policy.stage = apiGatewayArnTmp[1] # Blueprint denies all methods by default # policy.denyAllMethods() # Example allows all methods policy.allowAllMethods() """policy.allowMethod(HttpVerb.GET, "/pets/*")""" """finally, build the policy and exit the function using return""" return policy.build()
[ "def", "lambda_handler", "(", "event", ",", "context", ")", ":", "print", "(", "\"Client token: \"", "+", "event", "[", "'authorizationToken'", "]", ")", "print", "(", "\"Method ARN: \"", "+", "event", "[", "'methodArn'", "]", ")", "\"\"\"and produce the principal...
validate the incoming token
[ "validate", "the", "incoming", "token" ]
python
train
quantopian/pyfolio
pyfolio/risk.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/risk.py#L119-L171
def compute_sector_exposures(positions, sectors, sector_dict=SECTORS): """ Returns arrays of long, short and gross sector exposures of an algorithm's positions Parameters ---------- positions : pd.DataFrame Daily equity positions of algorithm, in dollars. - See full explanation in compute_style_factor_exposures. sectors : pd.DataFrame Daily Morningstar sector code per asset - See full explanation in create_risk_tear_sheet sector_dict : dict or OrderedDict Dictionary of all sectors - Keys are sector codes (e.g. ints or strings) and values are sector names (which must be strings) - Defaults to Morningstar sectors """ sector_ids = sector_dict.keys() long_exposures = [] short_exposures = [] gross_exposures = [] net_exposures = [] positions_wo_cash = positions.drop('cash', axis='columns') long_exposure = positions_wo_cash[positions_wo_cash > 0] \ .sum(axis='columns') short_exposure = positions_wo_cash[positions_wo_cash < 0] \ .abs().sum(axis='columns') gross_exposure = positions_wo_cash.abs().sum(axis='columns') for sector_id in sector_ids: in_sector = positions_wo_cash[sectors == sector_id] long_sector = in_sector[in_sector > 0] \ .sum(axis='columns').divide(long_exposure) short_sector = in_sector[in_sector < 0] \ .sum(axis='columns').divide(short_exposure) gross_sector = in_sector.abs().sum(axis='columns') \ .divide(gross_exposure) net_sector = long_sector.subtract(short_sector) long_exposures.append(long_sector) short_exposures.append(short_sector) gross_exposures.append(gross_sector) net_exposures.append(net_sector) return long_exposures, short_exposures, gross_exposures, net_exposures
[ "def", "compute_sector_exposures", "(", "positions", ",", "sectors", ",", "sector_dict", "=", "SECTORS", ")", ":", "sector_ids", "=", "sector_dict", ".", "keys", "(", ")", "long_exposures", "=", "[", "]", "short_exposures", "=", "[", "]", "gross_exposures", "=...
Returns arrays of long, short and gross sector exposures of an algorithm's positions Parameters ---------- positions : pd.DataFrame Daily equity positions of algorithm, in dollars. - See full explanation in compute_style_factor_exposures. sectors : pd.DataFrame Daily Morningstar sector code per asset - See full explanation in create_risk_tear_sheet sector_dict : dict or OrderedDict Dictionary of all sectors - Keys are sector codes (e.g. ints or strings) and values are sector names (which must be strings) - Defaults to Morningstar sectors
[ "Returns", "arrays", "of", "long", "short", "and", "gross", "sector", "exposures", "of", "an", "algorithm", "s", "positions" ]
python
valid
tommyod/streprogen
streprogen/program.py
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/program.py#L768-L802
def to_tex(self, text_size='large', table_width=5, clear_pages = False): """ Write the program information to a .tex file, which can be rendered to .pdf running pdflatex. The program can then be printed and brought to the gym. Parameters ---------- text_size The tex text size, e.g. '\small', 'normalsize', 'large', 'Large' or 'LARGE'. table_width The table with of the .tex code. Returns ------- string Program as tex. """ # If rendered, find the length of the longest '6 x 75kg'-type string max_ex_scheme = 0 if self._rendered: for (week, day, dynamic_ex) in self._yield_week_day_dynamic(): lengths = [len(s) for s in self._rendered[week][day][dynamic_ex]['strings']] max_ex_scheme = max(max_ex_scheme, max(lengths)) env = self.jinja2_environment template = env.get_template(self.TEMPLATE_NAMES['tex']) return template.render(program=self, text_size=text_size, table_width=table_width, clear_pages = clear_pages)
[ "def", "to_tex", "(", "self", ",", "text_size", "=", "'large'", ",", "table_width", "=", "5", ",", "clear_pages", "=", "False", ")", ":", "# If rendered, find the length of the longest '6 x 75kg'-type string", "max_ex_scheme", "=", "0", "if", "self", ".", "_rendered...
Write the program information to a .tex file, which can be rendered to .pdf running pdflatex. The program can then be printed and brought to the gym. Parameters ---------- text_size The tex text size, e.g. '\small', 'normalsize', 'large', 'Large' or 'LARGE'. table_width The table with of the .tex code. Returns ------- string Program as tex.
[ "Write", "the", "program", "information", "to", "a", ".", "tex", "file", "which", "can", "be", "rendered", "to", ".", "pdf", "running", "pdflatex", ".", "The", "program", "can", "then", "be", "printed", "and", "brought", "to", "the", "gym", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/state/client_handlers.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/state/client_handlers.py#L550-L579
def _format_batch_statuses(statuses, batch_ids, tracker): """Takes a statuses dict and formats it for transmission with Protobuf and ZMQ. Args: statuses (dict of int): Dict with batch ids as the key, status as value batch_ids (list of str): The batch ids in their original order tracker (BatchTracker): A batch tracker with access to invalid info """ proto_statuses = [] for batch_id in batch_ids: if statuses[batch_id] == \ client_batch_submit_pb2.ClientBatchStatus.INVALID: invalid_txns = tracker.get_invalid_txn_info(batch_id) for txn_info in invalid_txns: try: txn_info['transaction_id'] = txn_info.pop('id') except KeyError as e: LOGGER.debug(e) else: invalid_txns = None proto_statuses.append( client_batch_submit_pb2.ClientBatchStatus( batch_id=batch_id, status=statuses[batch_id], invalid_transactions=invalid_txns)) return proto_statuses
[ "def", "_format_batch_statuses", "(", "statuses", ",", "batch_ids", ",", "tracker", ")", ":", "proto_statuses", "=", "[", "]", "for", "batch_id", "in", "batch_ids", ":", "if", "statuses", "[", "batch_id", "]", "==", "client_batch_submit_pb2", ".", "ClientBatchSt...
Takes a statuses dict and formats it for transmission with Protobuf and ZMQ. Args: statuses (dict of int): Dict with batch ids as the key, status as value batch_ids (list of str): The batch ids in their original order tracker (BatchTracker): A batch tracker with access to invalid info
[ "Takes", "a", "statuses", "dict", "and", "formats", "it", "for", "transmission", "with", "Protobuf", "and", "ZMQ", "." ]
python
train
DataBiosphere/toil
src/toil/fileStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L994-L1075
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False): """ Used to process the caching of a file. This depends on whether a file is being written to file store, or read from it. WRITING The file is in localTempDir. It needs to be linked into cache if possible. READING The file is already in the cache dir. Depending on whether it is modifiable or not, does it need to be linked to the required location, or copied. If it is copied, can the file still be retained in cache? :param str localFilePath: Path to the Source file :param jobStoreFileID: jobStoreID for the file :param str callingFunc: Who called this function, 'write' or 'read' :param bool mutable: See modifiable in readGlobalFile """ assert callingFunc in ('read', 'write') with self.cacheLock() as lockFileHandle: cachedFile = self.encodedFileID(jobStoreFileID) # The file to be cached MUST originate in the environment of the TOIL temp directory if (os.stat(self.localCacheDir).st_dev != os.stat(os.path.dirname(localFilePath)).st_dev): raise InvalidSourceCacheError('Attempting to cache a file across file systems ' 'cachedir = %s, file = %s.' % (self.localCacheDir, localFilePath)) if not localFilePath.startswith(self.localTempDir): raise InvalidSourceCacheError('Attempting a cache operation on a non-local file ' '%s.' % localFilePath) if callingFunc == 'read' and mutable: shutil.copyfile(cachedFile, localFilePath) fileSize = os.stat(cachedFile).st_size cacheInfo = self._CacheState._load(self.cacheStateFile) cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0 if not cacheInfo.isBalanced(): os.remove(cachedFile) cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0 logger.debug('Could not download both download ' + '%s as mutable and add to ' % os.path.basename(localFilePath) + 'cache. Hence only mutable copy retained.') else: logger.debug('CACHE: Added file with ID \'%s\' to the cache.' % jobStoreFileID) jobState = self._JobState(cacheInfo.jobState[self.jobID]) jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False) cacheInfo.jobState[self.jobID] = jobState.__dict__ cacheInfo.write(self.cacheStateFile) else: # There are two possibilities, read and immutable, and write. both cases do # almost the same thing except for the direction of the os.link hence we're # writing them together. if callingFunc == 'read': # and mutable is inherently False src = cachedFile dest = localFilePath # To mirror behaviour of shutil.copyfile if os.path.exists(dest): os.remove(dest) else: # write src = localFilePath dest = cachedFile try: os.link(src, dest) except OSError as err: if err.errno != errno.EEXIST: raise # If we get the EEXIST error, it can only be from write since in read we are # explicitly deleting the file. This shouldn't happen with the .partial # logic hence we raise a cache error. raise CacheError('Attempting to recache a file %s.' % src) else: # Chmod the cached file. Cached files can never be modified. os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) # Return the filesize of cachedFile to the job and increase the cached size # The values passed here don't matter since rFS looks at the file only for # the stat self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle, fileAlreadyCached=False) if callingFunc == 'read': logger.debug('CACHE: Read file with ID \'%s\' from the cache.' % jobStoreFileID) else: logger.debug('CACHE: Added file with ID \'%s\' to the cache.' % jobStoreFileID)
[ "def", "addToCache", "(", "self", ",", "localFilePath", ",", "jobStoreFileID", ",", "callingFunc", ",", "mutable", "=", "False", ")", ":", "assert", "callingFunc", "in", "(", "'read'", ",", "'write'", ")", "with", "self", ".", "cacheLock", "(", ")", "as", ...
Used to process the caching of a file. This depends on whether a file is being written to file store, or read from it. WRITING The file is in localTempDir. It needs to be linked into cache if possible. READING The file is already in the cache dir. Depending on whether it is modifiable or not, does it need to be linked to the required location, or copied. If it is copied, can the file still be retained in cache? :param str localFilePath: Path to the Source file :param jobStoreFileID: jobStoreID for the file :param str callingFunc: Who called this function, 'write' or 'read' :param bool mutable: See modifiable in readGlobalFile
[ "Used", "to", "process", "the", "caching", "of", "a", "file", ".", "This", "depends", "on", "whether", "a", "file", "is", "being", "written", "to", "file", "store", "or", "read", "from", "it", ".", "WRITING", "The", "file", "is", "in", "localTempDir", ...
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/billing/models/service_package_quota_history_reservation.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/billing/models/service_package_quota_history_reservation.py#L67-L82
def account_id(self, account_id): """ Sets the account_id of this ServicePackageQuotaHistoryReservation. Account ID. :param account_id: The account_id of this ServicePackageQuotaHistoryReservation. :type: str """ if account_id is None: raise ValueError("Invalid value for `account_id`, must not be `None`") if account_id is not None and len(account_id) > 250: raise ValueError("Invalid value for `account_id`, length must be less than or equal to `250`") if account_id is not None and len(account_id) < 1: raise ValueError("Invalid value for `account_id`, length must be greater than or equal to `1`") self._account_id = account_id
[ "def", "account_id", "(", "self", ",", "account_id", ")", ":", "if", "account_id", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `account_id`, must not be `None`\"", ")", "if", "account_id", "is", "not", "None", "and", "len", "(", "account_id...
Sets the account_id of this ServicePackageQuotaHistoryReservation. Account ID. :param account_id: The account_id of this ServicePackageQuotaHistoryReservation. :type: str
[ "Sets", "the", "account_id", "of", "this", "ServicePackageQuotaHistoryReservation", ".", "Account", "ID", "." ]
python
train
jupyter-widgets/ipywidgets
ipywidgets/widgets/widget.py
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget.py#L212-L223
def _show_traceback(method): """decorator for showing tracebacks in IPython""" def m(self, *args, **kwargs): try: return(method(self, *args, **kwargs)) except Exception as e: ip = get_ipython() if ip is None: self.log.warning("Exception in widget method %s: %s", method, e, exc_info=True) else: ip.showtraceback() return m
[ "def", "_show_traceback", "(", "method", ")", ":", "def", "m", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "(", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "except", "E...
decorator for showing tracebacks in IPython
[ "decorator", "for", "showing", "tracebacks", "in", "IPython" ]
python
train
facelessuser/bracex
bracex/__init__.py
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L140-L146
def set_expanding(self): """Set that we are expanding a sequence, and return whether a release is required by the caller.""" status = not self.expanding if status: self.expanding = True return status
[ "def", "set_expanding", "(", "self", ")", ":", "status", "=", "not", "self", ".", "expanding", "if", "status", ":", "self", ".", "expanding", "=", "True", "return", "status" ]
Set that we are expanding a sequence, and return whether a release is required by the caller.
[ "Set", "that", "we", "are", "expanding", "a", "sequence", "and", "return", "whether", "a", "release", "is", "required", "by", "the", "caller", "." ]
python
train
user-cont/conu
conu/backend/nspawn/container.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/nspawn/container.py#L306-L374
def run_systemdrun( self, command, internal_background=False, return_full_dict=False, **kwargs): """ execute command via systemd-run inside container :param command: list of command params :param internal_background: not used now :param kwargs: pass params to subprocess :return: dict with result """ internalkw = deepcopy(kwargs) or {} original_ignore_st = internalkw.get("ignore_status", False) original_return_st = internalkw.get("return_output", False) internalkw["ignore_status"] = True internalkw["return_output"] = False unit_name = constants.CONU_ARTIFACT_TAG + "unit_" + random_str() opts = ["-M", self.name, "--unit", unit_name] lpath = "/var/tmp/{}".format(unit_name) comout = {} if self._run_systemdrun_decide(): add_wait_var = "--wait" else: # keep service exist after it finish, to be able to read exit code add_wait_var = "-r" if internal_background: add_wait_var = "" if add_wait_var: opts.append(add_wait_var) # TODO: behave move similar to run_cmd function, unable to work with clean subprocess objects because systemd-run # does not support return stderr, stdout, and return code directly # find way how to do this in better way, machinectl shell is not possible # https://github.com/systemd/systemd/issues/5879 # https://github.com/systemd/systemd/issues/5878 bashworkaround = [ "/bin/bash", "-c", "({comm})>{path}.stdout 2>{path}.stderr".format( comm=" ".join(command), path=lpath)] whole_cmd = ["systemd-run"] + opts + bashworkaround comout['command'] = command comout['return_code'] = run_cmd(whole_cmd, **internalkw) or 0 if not internal_background: if not self._run_systemdrun_decide(): comout['return_code'] = self._systemctl_wait_until_finish( self.name, unit_name) if self.is_running(): self.copy_from( "{pin}.stdout".format( pin=lpath), "{pin}.stdout".format( pin=lpath)) with open("{pin}.stdout".format(pin=lpath)) as f: comout['stdout'] = f.read() self.copy_from( "{pin}.stderr".format( pin=lpath), "{pin}.stderr".format( pin=lpath)) with open("{pin}.stderr".format(pin=lpath)) as f: comout['stderr'] = f.read() logger.debug(comout) if not original_ignore_st and comout['return_code'] != 0: raise subprocess.CalledProcessError(comout['command'], comout) if return_full_dict: return comout if original_return_st: return comout['stdout'] else: return comout['return_code']
[ "def", "run_systemdrun", "(", "self", ",", "command", ",", "internal_background", "=", "False", ",", "return_full_dict", "=", "False", ",", "*", "*", "kwargs", ")", ":", "internalkw", "=", "deepcopy", "(", "kwargs", ")", "or", "{", "}", "original_ignore_st",...
execute command via systemd-run inside container :param command: list of command params :param internal_background: not used now :param kwargs: pass params to subprocess :return: dict with result
[ "execute", "command", "via", "systemd", "-", "run", "inside", "container" ]
python
train
Knoema/knoema-python-driver
knoema/__init__.py
https://github.com/Knoema/knoema-python-driver/blob/e98b13db3e4df51c208c272e2977bfbe4c6e5532/knoema/__init__.py#L7-L25
def get(dataset = None, include_metadata = False, mnemonics = None, **dim_values): """Use this function to get data from Knoema dataset.""" if not dataset and not mnemonics: raise ValueError('Dataset id is not specified') if mnemonics and dim_values: raise ValueError('The function does not support specifying mnemonics and selection in a single call') config = ApiConfig() client = ApiClient(config.host, config.app_id, config.app_secret) client.check_correct_host() ds = client.get_dataset(dataset) if dataset else None reader = MnemonicsDataReader(client, mnemonics) if mnemonics else StreamingDataReader(client, dim_values) if ds.type == 'Regular' else PivotDataReader(client, dim_values) reader.include_metadata = include_metadata reader.dataset = ds return reader.get_pandasframe()
[ "def", "get", "(", "dataset", "=", "None", ",", "include_metadata", "=", "False", ",", "mnemonics", "=", "None", ",", "*", "*", "dim_values", ")", ":", "if", "not", "dataset", "and", "not", "mnemonics", ":", "raise", "ValueError", "(", "'Dataset id is not ...
Use this function to get data from Knoema dataset.
[ "Use", "this", "function", "to", "get", "data", "from", "Knoema", "dataset", "." ]
python
train
geometalab/pyGeoTile
pygeotile/tile.py
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L63-L67
def for_latitude_longitude(cls, latitude, longitude, zoom): """Creates a tile from lat/lon in WGS84""" point = Point.from_latitude_longitude(latitude=latitude, longitude=longitude) pixel_x, pixel_y = point.pixels(zoom=zoom) return cls.for_pixels(pixel_x=pixel_x, pixel_y=pixel_y, zoom=zoom)
[ "def", "for_latitude_longitude", "(", "cls", ",", "latitude", ",", "longitude", ",", "zoom", ")", ":", "point", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "latitude", ",", "longitude", "=", "longitude", ")", "pixel_x", ",", "pixel_y", ...
Creates a tile from lat/lon in WGS84
[ "Creates", "a", "tile", "from", "lat", "/", "lon", "in", "WGS84" ]
python
train
developersociety/django-glitter
glitter/page.py
https://github.com/developersociety/django-glitter/blob/2c0280ec83afee80deee94ee3934fc54239c2e87/glitter/page.py#L324-L352
def default_blocks(self): """ Return a list of default block tuples (appname.ModelName, verbose name). Next to the dropdown list of block types, a small number of common blocks which are frequently used can be added immediately to a column with one click. This method defines the list of default blocks. """ # Use the block list provided by settings if it's defined block_list = getattr(settings, 'GLITTER_DEFAULT_BLOCKS', None) if block_list is not None: return block_list # Try and auto fill in default blocks if the apps are installed block_list = [] for block in GLITTER_FALLBACK_BLOCKS: app_name, model_name = block.split('.') try: model_class = apps.get_model(app_name, model_name) verbose_name = capfirst(model_class._meta.verbose_name) block_list.append((block, verbose_name)) except LookupError: # Block isn't installed - don't add it as a quick add default pass return block_list
[ "def", "default_blocks", "(", "self", ")", ":", "# Use the block list provided by settings if it's defined", "block_list", "=", "getattr", "(", "settings", ",", "'GLITTER_DEFAULT_BLOCKS'", ",", "None", ")", "if", "block_list", "is", "not", "None", ":", "return", "bloc...
Return a list of default block tuples (appname.ModelName, verbose name). Next to the dropdown list of block types, a small number of common blocks which are frequently used can be added immediately to a column with one click. This method defines the list of default blocks.
[ "Return", "a", "list", "of", "default", "block", "tuples", "(", "appname", ".", "ModelName", "verbose", "name", ")", "." ]
python
train
Kopachris/seshet
seshet/bot.py
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L41-L48
def quit(self): """Remove this user from all channels and reinitialize the user's list of joined channels. """ for c in self.channels: c.users.remove(self.nick) self.channels = []
[ "def", "quit", "(", "self", ")", ":", "for", "c", "in", "self", ".", "channels", ":", "c", ".", "users", ".", "remove", "(", "self", ".", "nick", ")", "self", ".", "channels", "=", "[", "]" ]
Remove this user from all channels and reinitialize the user's list of joined channels.
[ "Remove", "this", "user", "from", "all", "channels", "and", "reinitialize", "the", "user", "s", "list", "of", "joined", "channels", "." ]
python
train
welchbj/sublemon
sublemon/utils.py
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/utils.py#L22-L32
def crossplat_loop_run(coro) -> Any: """Cross-platform method for running a subprocess-spawning coroutine.""" if sys.platform == 'win32': signal.signal(signal.SIGINT, signal.SIG_DFL) loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) with contextlib.closing(loop): return loop.run_until_complete(coro)
[ "def", "crossplat_loop_run", "(", "coro", ")", "->", "Any", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_DFL", ")", "loop", "=", "asyncio", ".", "ProactorEventLoo...
Cross-platform method for running a subprocess-spawning coroutine.
[ "Cross", "-", "platform", "method", "for", "running", "a", "subprocess", "-", "spawning", "coroutine", "." ]
python
train
7sDream/zhihu-py3
zhihu/answer.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/answer.py#L145-L154
def content(self): """以处理过的Html代码形式返回答案内容. :return: 答案内容 :rtype: str """ answer_wrap = self.soup.find('div', id='zh-question-answer-wrap') content = answer_wrap.find('div', class_='zm-editable-content') content = answer_content_process(content) return content
[ "def", "content", "(", "self", ")", ":", "answer_wrap", "=", "self", ".", "soup", ".", "find", "(", "'div'", ",", "id", "=", "'zh-question-answer-wrap'", ")", "content", "=", "answer_wrap", ".", "find", "(", "'div'", ",", "class_", "=", "'zm-editable-conte...
以处理过的Html代码形式返回答案内容. :return: 答案内容 :rtype: str
[ "以处理过的Html代码形式返回答案内容", "." ]
python
train
daler/trackhub
trackhub/track.py
https://github.com/daler/trackhub/blob/e4655f79177822529f80b923df117e38e28df702/trackhub/track.py#L300-L321
def add_subgroups(self, subgroups): """ Update the subgroups for this track. Note that in contrast to :meth:`CompositeTrack`, which takes a list of :class:`SubGroupDefinition` objects representing the allowed subgroups, this method takes a single dictionary indicating the particular subgroups for this track. Parameters ---------- subgroups : dict Dictionary of subgroups, e.g., {'celltype': 'K562', 'treatment': 'a'}. Each key must match a SubGroupDefinition name in the composite's subgroups list. Each value must match a key in that SubGroupDefinition.mapping dictionary. """ if subgroups is None: subgroups = {} assert isinstance(subgroups, dict) self.subgroups.update(subgroups)
[ "def", "add_subgroups", "(", "self", ",", "subgroups", ")", ":", "if", "subgroups", "is", "None", ":", "subgroups", "=", "{", "}", "assert", "isinstance", "(", "subgroups", ",", "dict", ")", "self", ".", "subgroups", ".", "update", "(", "subgroups", ")" ...
Update the subgroups for this track. Note that in contrast to :meth:`CompositeTrack`, which takes a list of :class:`SubGroupDefinition` objects representing the allowed subgroups, this method takes a single dictionary indicating the particular subgroups for this track. Parameters ---------- subgroups : dict Dictionary of subgroups, e.g., {'celltype': 'K562', 'treatment': 'a'}. Each key must match a SubGroupDefinition name in the composite's subgroups list. Each value must match a key in that SubGroupDefinition.mapping dictionary.
[ "Update", "the", "subgroups", "for", "this", "track", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/addons.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/addons.py#L608-L642
def addIcon(iconActor, pos=3, size=0.08): """Add an inset icon mesh into the same renderer. :param pos: icon position in the range [1-4] indicating one of the 4 corners, or it can be a tuple (x,y) as a fraction of the renderer size. :param float size: size of the square inset. .. hint:: |icon| |icon.py|_ """ vp = settings.plotter_instance if not vp.renderer: colors.printc("~lightningWarning: Use addIcon() after first rendering the scene.", c=3) save_int = vp.interactive vp.show(interactive=0) vp.interactive = save_int widget = vtk.vtkOrientationMarkerWidget() widget.SetOrientationMarker(iconActor) widget.SetInteractor(vp.interactor) if utils.isSequence(pos): widget.SetViewport(pos[0] - size, pos[1] - size, pos[0] + size, pos[1] + size) else: if pos < 2: widget.SetViewport(0, 1 - 2 * size, size * 2, 1) elif pos == 2: widget.SetViewport(1 - 2 * size, 1 - 2 * size, 1, 1) elif pos == 3: widget.SetViewport(0, 0, size * 2, size * 2) elif pos == 4: widget.SetViewport(1 - 2 * size, 0, 1, size * 2) widget.EnabledOn() widget.InteractiveOff() vp.widgets.append(widget) if iconActor in vp.actors: vp.actors.remove(iconActor) return widget
[ "def", "addIcon", "(", "iconActor", ",", "pos", "=", "3", ",", "size", "=", "0.08", ")", ":", "vp", "=", "settings", ".", "plotter_instance", "if", "not", "vp", ".", "renderer", ":", "colors", ".", "printc", "(", "\"~lightningWarning: Use addIcon() after fir...
Add an inset icon mesh into the same renderer. :param pos: icon position in the range [1-4] indicating one of the 4 corners, or it can be a tuple (x,y) as a fraction of the renderer size. :param float size: size of the square inset. .. hint:: |icon| |icon.py|_
[ "Add", "an", "inset", "icon", "mesh", "into", "the", "same", "renderer", "." ]
python
train
etobella/python-xmlsig
src/xmlsig/signature_context.py
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L226-L239
def digest(self, method, node): """ Returns the digest of an object from a method name :param method: hash method :type method: str :param node: Object to hash :type node: str :return: hash result """ if method not in constants.TransformUsageDigestMethod: raise Exception('Method not allowed') lib = hashlib.new(constants.TransformUsageDigestMethod[method]) lib.update(node) return base64.b64encode(lib.digest())
[ "def", "digest", "(", "self", ",", "method", ",", "node", ")", ":", "if", "method", "not", "in", "constants", ".", "TransformUsageDigestMethod", ":", "raise", "Exception", "(", "'Method not allowed'", ")", "lib", "=", "hashlib", ".", "new", "(", "constants",...
Returns the digest of an object from a method name :param method: hash method :type method: str :param node: Object to hash :type node: str :return: hash result
[ "Returns", "the", "digest", "of", "an", "object", "from", "a", "method", "name", ":", "param", "method", ":", "hash", "method", ":", "type", "method", ":", "str", ":", "param", "node", ":", "Object", "to", "hash", ":", "type", "node", ":", "str", ":"...
python
train
pmorissette/bt
bt/core.py
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L831-L838
def price(self): """ Current price. """ # if accessing and stale - update first if self._needupdate or self.now != self.parent.now: self.update(self.root.now) return self._price
[ "def", "price", "(", "self", ")", ":", "# if accessing and stale - update first", "if", "self", ".", "_needupdate", "or", "self", ".", "now", "!=", "self", ".", "parent", ".", "now", ":", "self", ".", "update", "(", "self", ".", "root", ".", "now", ")", ...
Current price.
[ "Current", "price", "." ]
python
train
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L123-L130
def uniform_get(sequence, index, default=None): """Uniform `dict`/`list` item getter, where `index` is interpreted as a key for maps and as numeric index for lists.""" if isinstance(sequence, abc.Mapping): return sequence.get(index, default) else: return sequence[index] if index < len(sequence) else default
[ "def", "uniform_get", "(", "sequence", ",", "index", ",", "default", "=", "None", ")", ":", "if", "isinstance", "(", "sequence", ",", "abc", ".", "Mapping", ")", ":", "return", "sequence", ".", "get", "(", "index", ",", "default", ")", "else", ":", "...
Uniform `dict`/`list` item getter, where `index` is interpreted as a key for maps and as numeric index for lists.
[ "Uniform", "dict", "/", "list", "item", "getter", "where", "index", "is", "interpreted", "as", "a", "key", "for", "maps", "and", "as", "numeric", "index", "for", "lists", "." ]
python
train
h2oai/h2o-3
h2o-docs/src/product/sphinxext/apigen.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-docs/src/product/sphinxext/apigen.py#L371-L392
def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules,outdir)
[ "def", "write_api_docs", "(", "self", ",", "outdir", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "# compose list of modules", "modules", "=", "self", ".", "discover_modules", "...
Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules
[ "Generate", "API", "reST", "files", "." ]
python
test
pecan/pecan
pecan/core.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/core.py#L202-L223
def load_app(config, **kwargs): ''' Used to load a ``Pecan`` application and its environment based on passed configuration. :param config: Can be a dictionary containing configuration, a string which represents a (relative) configuration filename returns a pecan.Pecan object ''' from .configuration import _runtime_conf, set_config set_config(config, overwrite=True) for package_name in getattr(_runtime_conf.app, 'modules', []): module = __import__(package_name, fromlist=['app']) if hasattr(module, 'app') and hasattr(module.app, 'setup_app'): app = module.app.setup_app(_runtime_conf, **kwargs) app.config = _runtime_conf return app raise RuntimeError( 'No app.setup_app found in any of the configured app.modules' )
[ "def", "load_app", "(", "config", ",", "*", "*", "kwargs", ")", ":", "from", ".", "configuration", "import", "_runtime_conf", ",", "set_config", "set_config", "(", "config", ",", "overwrite", "=", "True", ")", "for", "package_name", "in", "getattr", "(", "...
Used to load a ``Pecan`` application and its environment based on passed configuration. :param config: Can be a dictionary containing configuration, a string which represents a (relative) configuration filename returns a pecan.Pecan object
[ "Used", "to", "load", "a", "Pecan", "application", "and", "its", "environment", "based", "on", "passed", "configuration", "." ]
python
train
Erotemic/utool
utool/util_str.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L3492-L3510
def find_block_end(row, line_list, sentinal, direction=1): """ Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs """ import re row_ = row line_ = line_list[row_] flag1 = row_ == 0 or row_ == len(line_list) - 1 flag2 = re.match(sentinal, line_) if not (flag1 or flag2): while True: if (row_ == 0 or row_ == len(line_list) - 1): break line_ = line_list[row_] if re.match(sentinal, line_): break row_ += direction return row_
[ "def", "find_block_end", "(", "row", ",", "line_list", ",", "sentinal", ",", "direction", "=", "1", ")", ":", "import", "re", "row_", "=", "row", "line_", "=", "line_list", "[", "row_", "]", "flag1", "=", "row_", "==", "0", "or", "row_", "==", "len",...
Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs
[ "Searches", "up", "and", "down", "until", "it", "finds", "the", "endpoints", "of", "a", "block", "Rectify", "with", "find_paragraph_end", "in", "pyvim_funcs" ]
python
train
fastai/fastai
fastai/widgets/image_cleaner.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/widgets/image_cleaner.py#L36-L39
def from_similars(cls, learn, layer_ls:list=[0, 7, 2], **kwargs): "Gets the indices for the most similar images." train_ds, train_idxs = cls.get_similars_idxs(learn, layer_ls, **kwargs) return train_ds, train_idxs
[ "def", "from_similars", "(", "cls", ",", "learn", ",", "layer_ls", ":", "list", "=", "[", "0", ",", "7", ",", "2", "]", ",", "*", "*", "kwargs", ")", ":", "train_ds", ",", "train_idxs", "=", "cls", ".", "get_similars_idxs", "(", "learn", ",", "laye...
Gets the indices for the most similar images.
[ "Gets", "the", "indices", "for", "the", "most", "similar", "images", "." ]
python
train
jxtech/wechatpy
wechatpy/client/api/user.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/user.py#L39-L63
def get_followers(self, first_user_id=None): """ 获取一页用户列表(当关注用户过多的情况下,这个接口只会返回一部分用户) 详情请参考 https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140840 :param first_user_id: 可选。第一个拉取的 OPENID,不填默认从头开始拉取 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') followers = client.user.get_followers() """ params = {} if first_user_id: params['next_openid'] = first_user_id return self._get( 'user/get', params=params )
[ "def", "get_followers", "(", "self", ",", "first_user_id", "=", "None", ")", ":", "params", "=", "{", "}", "if", "first_user_id", ":", "params", "[", "'next_openid'", "]", "=", "first_user_id", "return", "self", ".", "_get", "(", "'user/get'", ",", "params...
获取一页用户列表(当关注用户过多的情况下,这个接口只会返回一部分用户) 详情请参考 https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140840 :param first_user_id: 可选。第一个拉取的 OPENID,不填默认从头开始拉取 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') followers = client.user.get_followers()
[ "获取一页用户列表", "(", "当关注用户过多的情况下,这个接口只会返回一部分用户", ")" ]
python
train
tcalmant/ipopo
pelix/shell/parser.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/parser.py#L576-L601
def __print_namespace_help(self, session, namespace, cmd_name=None): """ Prints the documentation of all the commands in the given name space, or only of the given command :param session: Session Handler :param namespace: Name space of the command :param cmd_name: Name of the command to show, None to show them all """ session.write_line("=== Name space '{0}' ===", namespace) # Get all commands in this name space if cmd_name is None: names = [command for command in self._commands[namespace]] names.sort() else: names = [cmd_name] first_cmd = True for command in names: if not first_cmd: # Print an empty line session.write_line("\n") self.__print_command_help(session, namespace, command) first_cmd = False
[ "def", "__print_namespace_help", "(", "self", ",", "session", ",", "namespace", ",", "cmd_name", "=", "None", ")", ":", "session", ".", "write_line", "(", "\"=== Name space '{0}' ===\"", ",", "namespace", ")", "# Get all commands in this name space", "if", "cmd_name",...
Prints the documentation of all the commands in the given name space, or only of the given command :param session: Session Handler :param namespace: Name space of the command :param cmd_name: Name of the command to show, None to show them all
[ "Prints", "the", "documentation", "of", "all", "the", "commands", "in", "the", "given", "name", "space", "or", "only", "of", "the", "given", "command" ]
python
train
tipsi/aiozk
aiozk/protocol/primitives.py
https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/primitives.py#L28-L40
def parse(cls, buff, offset): """ Given a buffer and offset, returns the parsed value and new offset. Uses the ``format`` class attribute to unpack the data from the buffer and determine the used up number of bytes. """ primitive_struct = struct.Struct("!" + cls.fmt) value = primitive_struct.unpack_from(buff, offset)[0] offset += primitive_struct.size return value, offset
[ "def", "parse", "(", "cls", ",", "buff", ",", "offset", ")", ":", "primitive_struct", "=", "struct", ".", "Struct", "(", "\"!\"", "+", "cls", ".", "fmt", ")", "value", "=", "primitive_struct", ".", "unpack_from", "(", "buff", ",", "offset", ")", "[", ...
Given a buffer and offset, returns the parsed value and new offset. Uses the ``format`` class attribute to unpack the data from the buffer and determine the used up number of bytes.
[ "Given", "a", "buffer", "and", "offset", "returns", "the", "parsed", "value", "and", "new", "offset", "." ]
python
train
tornadoweb/tornado
tornado/httpserver.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/httpserver.py#L332-L352
def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None: """Rewrite the ``remote_ip`` and ``protocol`` fields.""" # Squid uses X-Forwarded-For, others use X-Real-Ip ip = headers.get("X-Forwarded-For", self.remote_ip) # Skip trusted downstream hosts in X-Forwarded-For list for ip in (cand.strip() for cand in reversed(ip.split(","))): if ip not in self.trusted_downstream: break ip = headers.get("X-Real-Ip", ip) if netutil.is_valid_ip(ip): self.remote_ip = ip # AWS uses X-Forwarded-Proto proto_header = headers.get( "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol) ) if proto_header: # use only the last proto entry if there is more than one # TODO: support trusting mutiple layers of proxied protocol proto_header = proto_header.split(",")[-1].strip() if proto_header in ("http", "https"): self.protocol = proto_header
[ "def", "_apply_xheaders", "(", "self", ",", "headers", ":", "httputil", ".", "HTTPHeaders", ")", "->", "None", ":", "# Squid uses X-Forwarded-For, others use X-Real-Ip", "ip", "=", "headers", ".", "get", "(", "\"X-Forwarded-For\"", ",", "self", ".", "remote_ip", "...
Rewrite the ``remote_ip`` and ``protocol`` fields.
[ "Rewrite", "the", "remote_ip", "and", "protocol", "fields", "." ]
python
train
taizilongxu/douban.fm
doubanfm/colorset/colors.py
https://github.com/taizilongxu/douban.fm/blob/d65126d3bd3e12d8a7109137caff8da0efc22b2f/doubanfm/colorset/colors.py#L38-L44
def color_func(func_name): """ Call color function base on name """ if str(func_name).isdigit(): return term_color(int(func_name)) return globals()[func_name]
[ "def", "color_func", "(", "func_name", ")", ":", "if", "str", "(", "func_name", ")", ".", "isdigit", "(", ")", ":", "return", "term_color", "(", "int", "(", "func_name", ")", ")", "return", "globals", "(", ")", "[", "func_name", "]" ]
Call color function base on name
[ "Call", "color", "function", "base", "on", "name" ]
python
train
CivicSpleen/ambry
ambry/bundle/bundle.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L1347-L1352
def record_stage_state(self, phase, stage): """Record the completion times of phases and stages""" key = '{}-{}'.format(phase, stage if stage else 1) self.buildstate.state[key] = time()
[ "def", "record_stage_state", "(", "self", ",", "phase", ",", "stage", ")", ":", "key", "=", "'{}-{}'", ".", "format", "(", "phase", ",", "stage", "if", "stage", "else", "1", ")", "self", ".", "buildstate", ".", "state", "[", "key", "]", "=", "time", ...
Record the completion times of phases and stages
[ "Record", "the", "completion", "times", "of", "phases", "and", "stages" ]
python
train
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L48-L64
def clear_all(): """ Clear any and all stored state from Orca. """ _TABLES.clear() _COLUMNS.clear() _STEPS.clear() _BROADCASTS.clear() _INJECTABLES.clear() _TABLE_CACHE.clear() _COLUMN_CACHE.clear() _INJECTABLE_CACHE.clear() for m in _MEMOIZED.values(): m.value.clear_cached() _MEMOIZED.clear() logger.debug('pipeline state cleared')
[ "def", "clear_all", "(", ")", ":", "_TABLES", ".", "clear", "(", ")", "_COLUMNS", ".", "clear", "(", ")", "_STEPS", ".", "clear", "(", ")", "_BROADCASTS", ".", "clear", "(", ")", "_INJECTABLES", ".", "clear", "(", ")", "_TABLE_CACHE", ".", "clear", "...
Clear any and all stored state from Orca.
[ "Clear", "any", "and", "all", "stored", "state", "from", "Orca", "." ]
python
train
HazardDede/dictmentor
dictmentor/validator.py
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/validator.py#L212-L275
def is_file(raise_ex: bool = False, summary: bool = True, **items: Any) -> ValidationReturn: """ Tests if the given key-value pairs (items) are physical existent files or links to regular files. Per default this function yields whether ``True`` or ``False`` depending on the fact if all items withstand the validation or not. Per default the validation / evaluation is short-circuit and will return as soon an item evaluates to ``False``. When ``raise_ex`` is set to ``True`` the function will raise a meaningful error message after the first item evaluates to ``False`` (short-circuit). When ``summary`` is set to ``False`` a dictionary is returned containing the individual evaluation result ofeach item (non short-circuit). Examples: >>> import tempfile >>> tmp = tempfile.NamedTemporaryFile() >>> Validator.is_file(file=tmp.name) True >>> Validator.is_file(nonfile="iamsurethatthisdontexist") False >>> Validator.is_file(file=tmp.name, nonfile="iamsurethatthisdontexist") False >>> (Validator.is_file(file=tmp.name, nonfile="iamsurethatthisdontexist", summary=False) ... == {'file': True, 'nonfile': False}) True >>> Validator.is_file(file=tmp.name, nonfile="iamsurethatthisdontexist", raise_ex=True) Traceback (most recent call last): ... ValueError: 'nonfile' is not a file >>> with open(tmp.name) as fp: ... Validator.is_file(nonfile=fp) False Args: raise_ex (bool, optional): If set to ``True`` an exception is raised if at least one i tem is validated to ``False`` (works short-circuit and will abort the validation when the first item is evaluated to ``False``). summary (bool, optional): If set to ``False`` instead of returning just a single ``bool`` the validation will return a dictionary containing the individual evaluation result of each item. Returns: (boolean or dictionary): ``True`` when the value was successfully validated; ``False`` otherwise. If ``summary`` is set to ``False`` a dictionary containing the individual evaluation result of each item will be returned. If ``raise_ex`` is set to True, instead of returning False a meaningful error will be raised. """ return Validator.__test_all( condition=lambda _, val: Path(val).is_file(), formatter=lambda name, _: "'{varname}' is not a file".format(varname=name), validators=[partial(Validator.instance_of, target_type=str)], raise_ex=raise_ex, summary=summary, **items )
[ "def", "is_file", "(", "raise_ex", ":", "bool", "=", "False", ",", "summary", ":", "bool", "=", "True", ",", "*", "*", "items", ":", "Any", ")", "->", "ValidationReturn", ":", "return", "Validator", ".", "__test_all", "(", "condition", "=", "lambda", "...
Tests if the given key-value pairs (items) are physical existent files or links to regular files. Per default this function yields whether ``True`` or ``False`` depending on the fact if all items withstand the validation or not. Per default the validation / evaluation is short-circuit and will return as soon an item evaluates to ``False``. When ``raise_ex`` is set to ``True`` the function will raise a meaningful error message after the first item evaluates to ``False`` (short-circuit). When ``summary`` is set to ``False`` a dictionary is returned containing the individual evaluation result ofeach item (non short-circuit). Examples: >>> import tempfile >>> tmp = tempfile.NamedTemporaryFile() >>> Validator.is_file(file=tmp.name) True >>> Validator.is_file(nonfile="iamsurethatthisdontexist") False >>> Validator.is_file(file=tmp.name, nonfile="iamsurethatthisdontexist") False >>> (Validator.is_file(file=tmp.name, nonfile="iamsurethatthisdontexist", summary=False) ... == {'file': True, 'nonfile': False}) True >>> Validator.is_file(file=tmp.name, nonfile="iamsurethatthisdontexist", raise_ex=True) Traceback (most recent call last): ... ValueError: 'nonfile' is not a file >>> with open(tmp.name) as fp: ... Validator.is_file(nonfile=fp) False Args: raise_ex (bool, optional): If set to ``True`` an exception is raised if at least one i tem is validated to ``False`` (works short-circuit and will abort the validation when the first item is evaluated to ``False``). summary (bool, optional): If set to ``False`` instead of returning just a single ``bool`` the validation will return a dictionary containing the individual evaluation result of each item. Returns: (boolean or dictionary): ``True`` when the value was successfully validated; ``False`` otherwise. If ``summary`` is set to ``False`` a dictionary containing the individual evaluation result of each item will be returned. If ``raise_ex`` is set to True, instead of returning False a meaningful error will be raised.
[ "Tests", "if", "the", "given", "key", "-", "value", "pairs", "(", "items", ")", "are", "physical", "existent", "files", "or", "links", "to", "regular", "files", ".", "Per", "default", "this", "function", "yields", "whether", "True", "or", "False", "dependi...
python
train