repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
radujica/baloo
baloo/core/indexes/multi.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/core/indexes/multi.py#L120-L133
def evaluate(self, verbose=False, decode=True, passes=None, num_threads=1, apply_experimental=True): """Evaluates by creating a MultiIndex containing evaluated data and index. See `LazyResult` Returns ------- MultiIndex MultiIndex with evaluated data. """ evaluated_data = [v.evaluate(verbose, decode, passes, num_threads, apply_experimental) for v in self.values] return MultiIndex(evaluated_data, self.names)
[ "def", "evaluate", "(", "self", ",", "verbose", "=", "False", ",", "decode", "=", "True", ",", "passes", "=", "None", ",", "num_threads", "=", "1", ",", "apply_experimental", "=", "True", ")", ":", "evaluated_data", "=", "[", "v", ".", "evaluate", "(",...
Evaluates by creating a MultiIndex containing evaluated data and index. See `LazyResult` Returns ------- MultiIndex MultiIndex with evaluated data.
[ "Evaluates", "by", "creating", "a", "MultiIndex", "containing", "evaluated", "data", "and", "index", "." ]
python
train
RedFantom/ttkthemes
ci.py
https://github.com/RedFantom/ttkthemes/blob/e7fc354c02faf0e3eb4842d7f44131a1c43dd299/ci.py#L38-L48
def run_command(command): """ :param command: command to run on os.system :return: exit code """ print("Running system command: ", command) return_info = os.system(command) if sys.platform == "win32": return return_info else: return os.WEXITSTATUS(return_info)
[ "def", "run_command", "(", "command", ")", ":", "print", "(", "\"Running system command: \"", ",", "command", ")", "return_info", "=", "os", ".", "system", "(", "command", ")", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "return", "return_info", "e...
:param command: command to run on os.system :return: exit code
[ ":", "param", "command", ":", "command", "to", "run", "on", "os", ".", "system", ":", "return", ":", "exit", "code" ]
python
train
Jokymon/binstruct
binstruct.py
https://github.com/Jokymon/binstruct/blob/d42248294f819b39e93d8985041387db3ee7c1af/binstruct.py#L19-L34
def big_endian(original_class): """The big_endian function is a class decorator for classes derived from :class:`.StructTemplate`. By default a StructTemplate class interpretes its fields in little endian format. Using this decorator you change this behavior. :param original_class: The class you want to turn into a big endian structure.""" orig_init = original_class.__init__ def __init__(self, *args, **kwargs): orig_init(self, *args, **kwargs) self.endian = "big" original_class.__init__ = __init__ return original_class
[ "def", "big_endian", "(", "original_class", ")", ":", "orig_init", "=", "original_class", ".", "__init__", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "orig_init", "(", "self", ",", "*", "args", ",", "*", "*", ...
The big_endian function is a class decorator for classes derived from :class:`.StructTemplate`. By default a StructTemplate class interpretes its fields in little endian format. Using this decorator you change this behavior. :param original_class: The class you want to turn into a big endian structure.
[ "The", "big_endian", "function", "is", "a", "class", "decorator", "for", "classes", "derived", "from", ":", "class", ":", ".", "StructTemplate", ".", "By", "default", "a", "StructTemplate", "class", "interpretes", "its", "fields", "in", "little", "endian", "fo...
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L480-L517
def algebra_simplify(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the algebra simplify dataset. Each sample is a symbolic math expression involving unknown variables. The task is to simplify the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the expression to simplify, and target-list is a list of tokens encoding the resulting math expression after simplifying. Raises: ValueError: If `max_depth` < `min_depth`. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size, digits=5) for _ in range(nbr_cases): sample, target = generate_algebra_simplify_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) }
[ "def", "algebra_simplify", "(", "alphabet_size", "=", "26", ",", "min_depth", "=", "0", ",", "max_depth", "=", "2", ",", "nbr_cases", "=", "10000", ")", ":", "if", "max_depth", "<", "min_depth", ":", "raise", "ValueError", "(", "\"max_depth must be greater tha...
Generate the algebra simplify dataset. Each sample is a symbolic math expression involving unknown variables. The task is to simplify the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the expression to simplify, and target-list is a list of tokens encoding the resulting math expression after simplifying. Raises: ValueError: If `max_depth` < `min_depth`.
[ "Generate", "the", "algebra", "simplify", "dataset", "." ]
python
train
victorlei/smop
smop/rewrite.py
https://github.com/victorlei/smop/blob/bdad96b715d1dd75ce8ab4724f76b9b1bb1f61cd/smop/rewrite.py#L63-L73
def let_statement(u): """ If LHS is a plain variable, and RHS is a matrix enclosed in square brackets, replace the matrix expr with a funcall. """ if u.__class__ is node.let: if (u.ret.__class__ is node.ident and u.args.__class__ is node.matrix): u.args = node.funcall(func_expr=node.ident("matlabarray"), args=node.expr_list([u.args]))
[ "def", "let_statement", "(", "u", ")", ":", "if", "u", ".", "__class__", "is", "node", ".", "let", ":", "if", "(", "u", ".", "ret", ".", "__class__", "is", "node", ".", "ident", "and", "u", ".", "args", ".", "__class__", "is", "node", ".", "matri...
If LHS is a plain variable, and RHS is a matrix enclosed in square brackets, replace the matrix expr with a funcall.
[ "If", "LHS", "is", "a", "plain", "variable", "and", "RHS", "is", "a", "matrix", "enclosed", "in", "square", "brackets", "replace", "the", "matrix", "expr", "with", "a", "funcall", "." ]
python
train
Rapptz/discord.py
discord/ext/commands/bot.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/bot.py#L399-L417
def remove_listener(self, func, name=None): """Removes a listener from the pool of listeners. Parameters ----------- func The function that was used as a listener to remove. name: :class:`str` The name of the event we want to remove. Defaults to ``func.__name__``. """ name = func.__name__ if name is None else name if name in self.extra_events: try: self.extra_events[name].remove(func) except ValueError: pass
[ "def", "remove_listener", "(", "self", ",", "func", ",", "name", "=", "None", ")", ":", "name", "=", "func", ".", "__name__", "if", "name", "is", "None", "else", "name", "if", "name", "in", "self", ".", "extra_events", ":", "try", ":", "self", ".", ...
Removes a listener from the pool of listeners. Parameters ----------- func The function that was used as a listener to remove. name: :class:`str` The name of the event we want to remove. Defaults to ``func.__name__``.
[ "Removes", "a", "listener", "from", "the", "pool", "of", "listeners", "." ]
python
train
dwavesystems/dwave-system
dwave/system/composites/cutoffcomposite.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/system/composites/cutoffcomposite.py#L147-L177
def _restore_isolated(sampleset, bqm, isolated): """Return samples-like by adding isolated variables into sampleset in a way that minimizes the energy (relative to the other non-isolated variables). """ samples = sampleset.record.sample variables = sampleset.variables new_samples = np.empty((len(sampleset), len(isolated)), dtype=samples.dtype) # we don't let the isolated variables interact with each other for now because # it will slow this down substantially for col, v in enumerate(isolated): try: neighbours, biases = zip(*((u, bias) for u, bias in bqm.adj[v].items() if u in variables)) # ignore other isolates except ValueError: # happens when only neighbors are other isolated variables new_samples[:, col] = bqm.linear[v] <= 0 continue idxs = [variables.index[u] for u in neighbours] # figure out which value for v would minimize the energy # v(h_v + \sum_u J_uv * u) new_samples[:, col] = samples[:, idxs].dot(biases) < -bqm.linear[v] if bqm.vartype is dimod.SPIN: new_samples = 2*new_samples - 1 return np.concatenate((samples, new_samples), axis=1), list(variables) + isolated
[ "def", "_restore_isolated", "(", "sampleset", ",", "bqm", ",", "isolated", ")", ":", "samples", "=", "sampleset", ".", "record", ".", "sample", "variables", "=", "sampleset", ".", "variables", "new_samples", "=", "np", ".", "empty", "(", "(", "len", "(", ...
Return samples-like by adding isolated variables into sampleset in a way that minimizes the energy (relative to the other non-isolated variables).
[ "Return", "samples", "-", "like", "by", "adding", "isolated", "variables", "into", "sampleset", "in", "a", "way", "that", "minimizes", "the", "energy", "(", "relative", "to", "the", "other", "non", "-", "isolated", "variables", ")", "." ]
python
train
nsqio/pynsq
nsq/__init__.py
https://github.com/nsqio/pynsq/blob/48bf62d65ea63cddaa401efb23187b95511dbc84/nsq/__init__.py#L42-L48
def run(): """ Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` """ signal.signal(signal.SIGTERM, _handle_term_signal) signal.signal(signal.SIGINT, _handle_term_signal) tornado.ioloop.IOLoop.instance().start()
[ "def", "run", "(", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "_handle_term_signal", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "_handle_term_signal", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "i...
Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer`
[ "Starts", "any", "instantiated", ":", "class", ":", "nsq", ".", "Reader", "or", ":", "class", ":", "nsq", ".", "Writer" ]
python
test
houtianze/bypy
bypy/bypy.py
https://github.com/houtianze/bypy/blob/c59b6183e2fca45f11138bbcdec6247449b2eaad/bypy/bypy.py#L2633-L2672
def compare(self, remotedir = None, localdir = None, skip_remote_only_dirs = False): ''' Usage: compare [remotedir] [localdir] - \ compare the remote directory with the local directory remotedir - the remote directory at Baidu Yun (after app's directory). \ if not specified, it defaults to the root directory. localdir - the local directory, if not specified, it defaults to the current directory. skip_remote_only_dirs - skip remote-only sub-directories (faster if the remote \ directory is much larger than the local one). it defaults to False. ''' same, diff, local, remote = self.__compare(get_pcs_path(remotedir), localdir, str2bool(skip_remote_only_dirs)) pr("==== Same files ===") for c in same: pr("{} - {}".format(c[0], c[1])) pr("==== Different files ===") for d in diff: pr("{} - {}".format(d[0], d[1])) pr("==== Local only ====") for l in local: pr("{} - {}".format(l[0], l[1])) pr("==== Remote only ====") for r in remote: pr("{} - {}".format(r[0], r[1])) pr("\nStatistics:") pr("--------------------------------") pr("Same: {}".format(len(same))) pr("Different: {}".format(len(diff))) pr("Local only: {}".format(len(local))) pr("Remote only: {}".format(len(remote))) self.result['same'] = same self.result['diff'] = diff self.result['local'] = local self.result['remote'] = remote return const.ENoError
[ "def", "compare", "(", "self", ",", "remotedir", "=", "None", ",", "localdir", "=", "None", ",", "skip_remote_only_dirs", "=", "False", ")", ":", "same", ",", "diff", ",", "local", ",", "remote", "=", "self", ".", "__compare", "(", "get_pcs_path", "(", ...
Usage: compare [remotedir] [localdir] - \ compare the remote directory with the local directory remotedir - the remote directory at Baidu Yun (after app's directory). \ if not specified, it defaults to the root directory. localdir - the local directory, if not specified, it defaults to the current directory. skip_remote_only_dirs - skip remote-only sub-directories (faster if the remote \ directory is much larger than the local one). it defaults to False.
[ "Usage", ":", "compare", "[", "remotedir", "]", "[", "localdir", "]", "-", "\\", "compare", "the", "remote", "directory", "with", "the", "local", "directory", "remotedir", "-", "the", "remote", "directory", "at", "Baidu", "Yun", "(", "after", "app", "s", ...
python
train
wavefrontHQ/python-client
wavefront_api_client/api/settings_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/settings_api.py#L36-L55
def get_all_permissions(self, **kwargs): # noqa: E501 """Get all permissions # noqa: E501 Returns all permissions' info data # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_permissions(async_req=True) >>> result = thread.get() :param async_req bool :return: list[BusinessActionGroupBasicDTO] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_permissions_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_all_permissions_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "get_all_permissions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "get_all_permissions_wit...
Get all permissions # noqa: E501 Returns all permissions' info data # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_permissions(async_req=True) >>> result = thread.get() :param async_req bool :return: list[BusinessActionGroupBasicDTO] If the method is called asynchronously, returns the request thread.
[ "Get", "all", "permissions", "#", "noqa", ":", "E501" ]
python
train
tariqdaouda/pyGeno
pyGeno/tools/UsefulFunctions.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/UsefulFunctions.py#L210-L250
def translateDNA(sequence, frame = 'f1', translTable_id='default') : """Translates DNA code, frame : fwd1, fwd2, fwd3, rev1, rev2, rev3""" protein = "" if frame == 'f1' : dna = sequence elif frame == 'f2': dna = sequence[1:] elif frame == 'f3' : dna = sequence[2:] elif frame == 'r1' : dna = reverseComplement(sequence) elif frame == 'r2' : dna = reverseComplement(sequence) dna = dna[1:] elif frame == 'r3' : dna = reverseComplement(sequence) dna = dna[2:] else : raise ValueError('unknown reading frame: %s, should be one of the following: fwd1, fwd2, fwd3, rev1, rev2, rev3' % frame) for i in range(0, len(dna), 3) : codon = dna[i:i+3] # Check if variant messed with selenocysteine codon if '!' in codon and codon != '!GA': codon = codon.replace('!', 'T') if (len(codon) == 3) : try : # MC protein += translTable[translTable_id][codon] except KeyError : combinaisons = polymorphicCodonCombinaisons(list(codon)) translations = set() for ci in range(len(combinaisons)): translations.add(translTable[translTable_id][combinaisons[ci]]) protein += '/'.join(translations) return protein
[ "def", "translateDNA", "(", "sequence", ",", "frame", "=", "'f1'", ",", "translTable_id", "=", "'default'", ")", ":", "protein", "=", "\"\"", "if", "frame", "==", "'f1'", ":", "dna", "=", "sequence", "elif", "frame", "==", "'f2'", ":", "dna", "=", "seq...
Translates DNA code, frame : fwd1, fwd2, fwd3, rev1, rev2, rev3
[ "Translates", "DNA", "code", "frame", ":", "fwd1", "fwd2", "fwd3", "rev1", "rev2", "rev3" ]
python
train
mrcagney/gtfstk
gtfstk/validators.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/validators.py#L69-L79
def valid_date(x: str) -> bool: """ Retrun ``True`` if ``x`` is a valid YYYYMMDD date; otherwise return ``False``. """ try: if x != dt.datetime.strptime(x, DATE_FORMAT).strftime(DATE_FORMAT): raise ValueError return True except ValueError: return False
[ "def", "valid_date", "(", "x", ":", "str", ")", "->", "bool", ":", "try", ":", "if", "x", "!=", "dt", ".", "datetime", ".", "strptime", "(", "x", ",", "DATE_FORMAT", ")", ".", "strftime", "(", "DATE_FORMAT", ")", ":", "raise", "ValueError", "return",...
Retrun ``True`` if ``x`` is a valid YYYYMMDD date; otherwise return ``False``.
[ "Retrun", "True", "if", "x", "is", "a", "valid", "YYYYMMDD", "date", ";", "otherwise", "return", "False", "." ]
python
train
dsandersAzure/python_cowbull_game
python_cowbull_game/GameController.py
https://github.com/dsandersAzure/python_cowbull_game/blob/82a0d8ee127869123d4fad51a8cd1707879e368f/python_cowbull_game/GameController.py#L237-L248
def _start_again_message(self, message=None): """Simple method to form a start again message and give the answer in readable form.""" logging.debug("Start again message delivered: {}".format(message)) the_answer = ', '.join( [str(d) for d in self.game.answer][:-1] ) + ', and ' + [str(d) for d in self.game.answer][-1] return "{0}{1} The correct answer was {2}. Please start a new game.".format( message, "." if message[-1] not in [".", ",", ";", ":", "!"] else "", the_answer )
[ "def", "_start_again_message", "(", "self", ",", "message", "=", "None", ")", ":", "logging", ".", "debug", "(", "\"Start again message delivered: {}\"", ".", "format", "(", "message", ")", ")", "the_answer", "=", "', '", ".", "join", "(", "[", "str", "(", ...
Simple method to form a start again message and give the answer in readable form.
[ "Simple", "method", "to", "form", "a", "start", "again", "message", "and", "give", "the", "answer", "in", "readable", "form", "." ]
python
valid
CalebBell/thermo
thermo/eos.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/eos.py#L691-L737
def dPsat_dT(self, T): r'''Generic method to calculate the temperature derivative of vapor pressure for a specified `T`. Implements the analytical derivative of the two polynomials described in `Psat`. As with `Psat`, results above the critical temperature are meaningless. The first-order polynomial which is used to calculate it under 0.32 Tc may not be physicall meaningful, due to there normally not being a volume solution to the EOS which can produce that low of a pressure. Parameters ---------- T : float Temperature, [K] Returns ------- dPsat_dT : float Derivative of vapor pressure with respect to temperature, [Pa/K] Notes ----- There is a small step change at 0.32 Tc for all EOS due to the two switch between polynomials at that point. Useful for calculating enthalpy of vaporization with the Clausius Clapeyron Equation. Derived with SymPy's diff and cse. ''' a_alphas = self.a_alpha_and_derivatives(T) alpha, d_alpha_dT = a_alphas[0]/self.a, a_alphas[1]/self.a Tr = T/self.Tc if Tr >= 0.32: c = self.Psat_coeffs x0 = alpha/T x1 = -self.Tc*x0 + 1 x2 = c[0]*x1 x3 = c[2] - x1*(c[1] - x2) x4 = c[3] - x1*x3 x5 = c[4] - x1*x4 x6 = c[5] - x1*x5 x7 = c[6] - x1*x6 x8 = c[7] - x1*x7 x9 = c[8] - x1*x8 return self.Pc*(-(d_alpha_dT - x0)*(-c[9] + x1*x9 + x1*(-x1*(-x1*(-x1*(-x1*(-x1*(-x1*(-x1*(c[1] - 2*x2) + x3) + x4) + x5) + x6) + x7) + x8) + x9)) + 1./self.Tc)*exp(c[10] - x1*(c[9] - x1*(c[8] - x1*(c[7] - x1*(c[6] - x1*(c[5] - x1*(c[4] - x1*(c[3] - x1*(c[2] + x1*(-c[1] + x2)))))))))) else: c = self.Psat_coeffs_limiting return self.Pc*T*c[0]*(self.Tc*d_alpha_dT/T - self.Tc*alpha/(T*T))*exp(c[0]*(-1. + self.Tc*alpha/T) + c[1])/self.Tc + self.Pc*exp(c[0]*(-1. + self.Tc*alpha/T) + c[1])/self.Tc
[ "def", "dPsat_dT", "(", "self", ",", "T", ")", ":", "a_alphas", "=", "self", ".", "a_alpha_and_derivatives", "(", "T", ")", "alpha", ",", "d_alpha_dT", "=", "a_alphas", "[", "0", "]", "/", "self", ".", "a", ",", "a_alphas", "[", "1", "]", "/", "sel...
r'''Generic method to calculate the temperature derivative of vapor pressure for a specified `T`. Implements the analytical derivative of the two polynomials described in `Psat`. As with `Psat`, results above the critical temperature are meaningless. The first-order polynomial which is used to calculate it under 0.32 Tc may not be physicall meaningful, due to there normally not being a volume solution to the EOS which can produce that low of a pressure. Parameters ---------- T : float Temperature, [K] Returns ------- dPsat_dT : float Derivative of vapor pressure with respect to temperature, [Pa/K] Notes ----- There is a small step change at 0.32 Tc for all EOS due to the two switch between polynomials at that point. Useful for calculating enthalpy of vaporization with the Clausius Clapeyron Equation. Derived with SymPy's diff and cse.
[ "r", "Generic", "method", "to", "calculate", "the", "temperature", "derivative", "of", "vapor", "pressure", "for", "a", "specified", "T", ".", "Implements", "the", "analytical", "derivative", "of", "the", "two", "polynomials", "described", "in", "Psat", ".", "...
python
valid
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L272-L281
def module(self): """The module in which the Class is defined. Python equivalent of the CLIPS defglobal-module command. """ modname = ffi.string(lib.EnvDefclassModule(self._env, self._cls)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
[ "def", "module", "(", "self", ")", ":", "modname", "=", "ffi", ".", "string", "(", "lib", ".", "EnvDefclassModule", "(", "self", ".", "_env", ",", "self", ".", "_cls", ")", ")", "defmodule", "=", "lib", ".", "EnvFindDefmodule", "(", "self", ".", "_en...
The module in which the Class is defined. Python equivalent of the CLIPS defglobal-module command.
[ "The", "module", "in", "which", "the", "Class", "is", "defined", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/dns/zone_print.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/dns/zone_print.py#L14-L19
def cli(env, zone): """Print zone in BIND format.""" manager = SoftLayer.DNSManager(env.client) zone_id = helpers.resolve_id(manager.resolve_ids, zone, name='zone') env.fout(manager.dump_zone(zone_id))
[ "def", "cli", "(", "env", ",", "zone", ")", ":", "manager", "=", "SoftLayer", ".", "DNSManager", "(", "env", ".", "client", ")", "zone_id", "=", "helpers", ".", "resolve_id", "(", "manager", ".", "resolve_ids", ",", "zone", ",", "name", "=", "'zone'", ...
Print zone in BIND format.
[ "Print", "zone", "in", "BIND", "format", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/script_editor/script_editor.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L3525-L3542
def close_all_files(self, leave_first_editor=True): """ Closes every opened files and removes their associated **Script_Editor_tabWidget** Widget tabs. :return: Method success. :rtype: bool """ # self.__engine.start_processing("Closing All Files ...", len(self.list_editors())) success = True for file in self.list_files(): success *= True if self.close_file(file, leave_first_editor) else False if not success: break # self.__engine.step_processing() # self.__engine.stop_processing() return success
[ "def", "close_all_files", "(", "self", ",", "leave_first_editor", "=", "True", ")", ":", "# self.__engine.start_processing(\"Closing All Files ...\", len(self.list_editors()))", "success", "=", "True", "for", "file", "in", "self", ".", "list_files", "(", ")", ":", "succ...
Closes every opened files and removes their associated **Script_Editor_tabWidget** Widget tabs. :return: Method success. :rtype: bool
[ "Closes", "every", "opened", "files", "and", "removes", "their", "associated", "**", "Script_Editor_tabWidget", "**", "Widget", "tabs", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/chain.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/chain.py#L195-L226
def blat(args): """ %prog blat old.fasta new.fasta Generate psl file using blat. """ p = OptionParser(blat.__doc__) p.add_option("--minscore", default=100, type="int", help="Matches minus mismatches gap penalty [default: %default]") p.add_option("--minid", default=98, type="int", help="Minimum sequence identity [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) oldfasta, newfasta = args twobitfiles = [] for fastafile in args: tbfile = faToTwoBit(fastafile) twobitfiles.append(tbfile) oldtwobit, newtwobit = twobitfiles cmd = "pblat -threads={0}".format(opts.cpus) if which("pblat") else "blat" cmd += " {0} {1}".format(oldtwobit, newfasta) cmd += " -tileSize=12 -minScore={0} -minIdentity={1} ".\ format(opts.minscore, opts.minid) pslfile = "{0}.{1}.psl".format(*(op.basename(x).split('.')[0] \ for x in (newfasta, oldfasta))) cmd += pslfile sh(cmd)
[ "def", "blat", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "blat", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--minscore\"", ",", "default", "=", "100", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Matches minus mismatches gap pena...
%prog blat old.fasta new.fasta Generate psl file using blat.
[ "%prog", "blat", "old", ".", "fasta", "new", ".", "fasta" ]
python
train
bjodah/pyneqsys
pyneqsys/symbolic.py
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/symbolic.py#L232-L281
def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None, **kwargs): """ Generate a TransformedSys instance from a callback Parameters ---------- cb : callable Should have the signature ``cb(x, p, backend) -> list of exprs``. The callback ``cb`` should return *untransformed* expressions. transf_cbs : pair or iterable of pairs of callables Callables for forward- and backward-transformations. Each callable should take a single parameter (expression) and return a single expression. nx : int Number of unkowns. nparams : int Number of parameters. pre_adj : callable, optional To tweak expression prior to transformation. Takes a sinlge argument (expression) and return a single argument rewritten expression. \\*\\*kwargs : Keyword arguments passed on to :class:`TransformedSys`. See also :class:`SymbolicSys` and :class:`pyneqsys.NeqSys`. Examples -------- >>> import sympy as sp >>> transformed = TransformedSys.from_callback(lambda x, p, be: [ ... x[0]*x[1] - p[0], ... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2 ... ], (sp.log, sp.exp), 2, 1) ... """ be = Backend(kwargs.pop('backend', None)) x = be.real_symarray('x', nx) p = be.real_symarray('p', nparams) try: transf = [(transf_cbs[idx][0](xi), transf_cbs[idx][1](xi)) for idx, xi in enumerate(x)] except TypeError: transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x)) try: exprs = cb(x, p, be) except TypeError: exprs = _ensure_3args(cb)(x, p, be) return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs)
[ "def", "from_callback", "(", "cls", ",", "cb", ",", "transf_cbs", ",", "nx", ",", "nparams", "=", "0", ",", "pre_adj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "be", "=", "Backend", "(", "kwargs", ".", "pop", "(", "'backend'", ",", "None", ...
Generate a TransformedSys instance from a callback Parameters ---------- cb : callable Should have the signature ``cb(x, p, backend) -> list of exprs``. The callback ``cb`` should return *untransformed* expressions. transf_cbs : pair or iterable of pairs of callables Callables for forward- and backward-transformations. Each callable should take a single parameter (expression) and return a single expression. nx : int Number of unkowns. nparams : int Number of parameters. pre_adj : callable, optional To tweak expression prior to transformation. Takes a sinlge argument (expression) and return a single argument rewritten expression. \\*\\*kwargs : Keyword arguments passed on to :class:`TransformedSys`. See also :class:`SymbolicSys` and :class:`pyneqsys.NeqSys`. Examples -------- >>> import sympy as sp >>> transformed = TransformedSys.from_callback(lambda x, p, be: [ ... x[0]*x[1] - p[0], ... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2 ... ], (sp.log, sp.exp), 2, 1) ...
[ "Generate", "a", "TransformedSys", "instance", "from", "a", "callback" ]
python
train
pgmpy/pgmpy
pgmpy/models/DynamicBayesianNetwork.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/DynamicBayesianNetwork.py#L374-L410
def get_cpds(self, node=None, time_slice=0): """ Returns the CPDs that have been associated with the network. Parameters ---------- node: tuple (node_name, time_slice) The node should be in the following form (node_name, time_slice). Here, node_name is the node that is inserted while the time_slice is an integer value, which denotes the index of the time_slice that the node belongs to. time_slice: int The time_slice should be a positive integer greater than or equal to zero. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> from pgmpy.factors.discrete import TabularCPD >>> dbn = DBN() >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))]) >>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5], ... [0.4,0.25,0.8,0.03], ... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2]) >>> dbn.add_cpds(grade_cpd) >>> dbn.get_cpds() """ # TODO: fix bugs in this if node: if node not in super(DynamicBayesianNetwork, self).nodes(): raise ValueError('Node not present in the model.') else: for cpd in self.cpds: if cpd.variable == node: return cpd else: return [cpd for cpd in self.cpds if set(list(cpd.variables)).issubset(self.get_slice_nodes(time_slice))]
[ "def", "get_cpds", "(", "self", ",", "node", "=", "None", ",", "time_slice", "=", "0", ")", ":", "# TODO: fix bugs in this", "if", "node", ":", "if", "node", "not", "in", "super", "(", "DynamicBayesianNetwork", ",", "self", ")", ".", "nodes", "(", ")", ...
Returns the CPDs that have been associated with the network. Parameters ---------- node: tuple (node_name, time_slice) The node should be in the following form (node_name, time_slice). Here, node_name is the node that is inserted while the time_slice is an integer value, which denotes the index of the time_slice that the node belongs to. time_slice: int The time_slice should be a positive integer greater than or equal to zero. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> from pgmpy.factors.discrete import TabularCPD >>> dbn = DBN() >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))]) >>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5], ... [0.4,0.25,0.8,0.03], ... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2]) >>> dbn.add_cpds(grade_cpd) >>> dbn.get_cpds()
[ "Returns", "the", "CPDs", "that", "have", "been", "associated", "with", "the", "network", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L1731-L1776
def commit_state_preorder( self, nameop, current_block_number ): """ Commit a state preorder (works for namespace_preorder and name_preorder), DO NOT CALL THIS DIRECTLY """ # have to have read-write disposition if self.disposition != DISPOSITION_RW: log.error("FATAL: borrowing violation: not a read-write connection") traceback.print_stack() os.abort() opcode = None try: opcode = nameop.get('opcode') assert opcode is not None, 'BUG: no preorder opcode' except Exception as e: log.exception(e) log.error("FATAL: no opcode in preorder") os.abort() # did we pay any tokens for this state? account_payment_info = state_preorder_get_account_payment_info(nameop) cur = self.db.cursor() # cannot have collided if BlockstackDB.nameop_is_collided( nameop ): log.debug("Not commiting '%s', since it collided" % nameop) self.log_reject(current_block_number, nameop['vtxindex'], nameop['op'], nameop) return [] self.log_accept( current_block_number, nameop['vtxindex'], nameop['op'], nameop ) commit_preorder = self.sanitize_op( nameop ) rc = namedb_preorder_insert( cur, commit_preorder ) if not rc: log.error("FATAL: failed to commit preorder '%s'" % commit_preorder['preorder_hash'] ) os.abort() # debit tokens, if present self.commit_account_debit(opcode, account_payment_info, current_block_number, nameop['vtxindex'], nameop['txid']) self.db.commit() return commit_preorder
[ "def", "commit_state_preorder", "(", "self", ",", "nameop", ",", "current_block_number", ")", ":", "# have to have read-write disposition ", "if", "self", ".", "disposition", "!=", "DISPOSITION_RW", ":", "log", ".", "error", "(", "\"FATAL: borrowing violation: not a read-...
Commit a state preorder (works for namespace_preorder and name_preorder), DO NOT CALL THIS DIRECTLY
[ "Commit", "a", "state", "preorder", "(", "works", "for", "namespace_preorder", "and", "name_preorder", ")" ]
python
train
tomokinakamaru/mapletree
mapletree/defaults/request/validators.py
https://github.com/tomokinakamaru/mapletree/blob/19ec68769ef2c1cd2e4164ed8623e0c4280279bb/mapletree/defaults/request/validators.py#L56-L70
def float_range(string, minimum, maximum, inf, sup): """ Requires values to be a number and range in a certain range. :param string: Value to validate :param minimum: Minimum value to accept :param maximum: Maximum value to accept :param inf: Infimum value to accept :param sup: Supremum value to accept :type string: str :type minimum: float :type maximum: float :type inf: float :type sup: float """ return _inrange(float(string), minimum, maximum, inf, sup)
[ "def", "float_range", "(", "string", ",", "minimum", ",", "maximum", ",", "inf", ",", "sup", ")", ":", "return", "_inrange", "(", "float", "(", "string", ")", ",", "minimum", ",", "maximum", ",", "inf", ",", "sup", ")" ]
Requires values to be a number and range in a certain range. :param string: Value to validate :param minimum: Minimum value to accept :param maximum: Maximum value to accept :param inf: Infimum value to accept :param sup: Supremum value to accept :type string: str :type minimum: float :type maximum: float :type inf: float :type sup: float
[ "Requires", "values", "to", "be", "a", "number", "and", "range", "in", "a", "certain", "range", "." ]
python
train
dask/dask-ml
dask_ml/preprocessing/data.py
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L837-L861
def transform(self, X, y=None): """Ordinal encode the categorical columns in X Parameters ---------- X : pd.DataFrame or dd.DataFrame y : ignored Returns ------- transformed : pd.DataFrame or dd.DataFrame Same type as the input """ if not X.columns.equals(self.columns_): raise ValueError( "Columns of 'X' do not match the training " "columns. Got {!r}, expected {!r}".format(X.columns, self.columns) ) if not isinstance(X, (pd.DataFrame, dd.DataFrame)): raise TypeError("Unexpected type {}".format(type(X))) X = X.copy() for col in self.categorical_columns_: X[col] = X[col].cat.codes return X
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "not", "X", ".", "columns", ".", "equals", "(", "self", ".", "columns_", ")", ":", "raise", "ValueError", "(", "\"Columns of 'X' do not match the training \"", "\"columns. Got ...
Ordinal encode the categorical columns in X Parameters ---------- X : pd.DataFrame or dd.DataFrame y : ignored Returns ------- transformed : pd.DataFrame or dd.DataFrame Same type as the input
[ "Ordinal", "encode", "the", "categorical", "columns", "in", "X" ]
python
train
sorgerlab/indra
indra/tools/assemble_corpus.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L786-L890
def filter_gene_list(stmts_in, gene_list, policy, allow_families=False, **kwargs): """Return statements that contain genes given in a list. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. gene_list : list[str] A list of gene symbols to filter for. policy : str The policy to apply when filtering for the list of genes. "one": keep statements that contain at least one of the list of genes and possibly others not in the list "all": keep statements that only contain genes given in the list allow_families : Optional[bool] Will include statements involving FamPlex families containing one of the genes in the gene list. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[str] If true, removes bound conditions that are not genes in the list If false (default), looks at agents in the bound conditions in addition to those participating in the statement directly when applying the specified policy. invert : Optional[bool] If True, the statements that do not match according to the policy are returned. Default: False Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) remove_bound = kwargs.get('remove_bound', False) if policy not in ('one', 'all'): logger.error('Policy %s is invalid, not applying filter.' % policy) else: genes_str = ', '.join(gene_list) inv_str = 'not ' if invert else '' logger.info(('Filtering %d statements for ones %scontaining "%s" of: ' '%s...') % (len(stmts_in), inv_str, policy, genes_str)) # If we're allowing families, make a list of all FamPlex IDs that # contain members of the gene list, and add them to the filter list filter_list = copy(gene_list) if allow_families: for hgnc_name in gene_list: gene_uri = hierarchies['entity'].get_uri('HGNC', hgnc_name) parents = hierarchies['entity'].get_parents(gene_uri) for par_uri in parents: ns, id = hierarchies['entity'].ns_id_from_uri(par_uri) filter_list.append(id) stmts_out = [] if remove_bound: # If requested, remove agents whose names are not in the list from # all bound conditions if not invert: keep_criterion = lambda a: a.name in filter_list else: keep_criterion = lambda a: a.name not in filter_list for st in stmts_in: for agent in st.agent_list(): _remove_bound_conditions(agent, keep_criterion) if policy == 'one': for st in stmts_in: found_gene = False if not remove_bound: agent_list = st.agent_list_with_bound_condition_agents() else: agent_list = st.agent_list() for agent in agent_list: if agent is not None: if agent.name in filter_list: found_gene = True break if (found_gene and not invert) or (not found_gene and invert): stmts_out.append(st) elif policy == 'all': for st in stmts_in: found_genes = True if not remove_bound: agent_list = st.agent_list_with_bound_condition_agents() else: agent_list = st.agent_list() for agent in agent_list: if agent is not None: if agent.name not in filter_list: found_genes = False break if (found_genes and not invert) or (not found_genes and invert): stmts_out.append(st) else: stmts_out = stmts_in logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_gene_list", "(", "stmts_in", ",", "gene_list", ",", "policy", ",", "allow_families", "=", "False", ",", "*", "*", "kwargs", ")", ":", "invert", "=", "kwargs", ".", "get", "(", "'invert'", ",", "False", ")", "remove_bound", "=", "kwargs", "...
Return statements that contain genes given in a list. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. gene_list : list[str] A list of gene symbols to filter for. policy : str The policy to apply when filtering for the list of genes. "one": keep statements that contain at least one of the list of genes and possibly others not in the list "all": keep statements that only contain genes given in the list allow_families : Optional[bool] Will include statements involving FamPlex families containing one of the genes in the gene list. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[str] If true, removes bound conditions that are not genes in the list If false (default), looks at agents in the bound conditions in addition to those participating in the statement directly when applying the specified policy. invert : Optional[bool] If True, the statements that do not match according to the policy are returned. Default: False Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Return", "statements", "that", "contain", "genes", "given", "in", "a", "list", "." ]
python
train
yandex/yandex-tank
yandextank/api/apiworker.py
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/api/apiworker.py#L22-L58
def init_logging(self, log_filename="tank.log"): """ Set up logging """ logger = logging.getLogger('') self.log_filename = log_filename self.core.add_artifact_file(self.log_filename) file_handler = logging.FileHandler(self.log_filename) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter( logging.Formatter( "%(asctime)s [%(levelname)s] %(name)s %(message)s")) logger.addHandler(file_handler) console_handler = logging.StreamHandler(sys.stdout) stderr_hdl = logging.StreamHandler(sys.stderr) # fmt_verbose = logging.Formatter( # "%(asctime)s [%(levelname)s] %(name)s %(message)s") fmt_regular = logging.Formatter( "%(asctime)s %(levelname)s: %(message)s", "%H:%M:%S") console_handler.setLevel(logging.INFO) console_handler.setFormatter(fmt_regular) stderr_hdl.setFormatter(fmt_regular) f_err = SingleLevelFilter(logging.ERROR, True) f_warn = SingleLevelFilter(logging.WARNING, True) f_crit = SingleLevelFilter(logging.CRITICAL, True) console_handler.addFilter(f_err) console_handler.addFilter(f_warn) console_handler.addFilter(f_crit) logger.addHandler(console_handler) f_info = SingleLevelFilter(logging.INFO, True) f_debug = SingleLevelFilter(logging.DEBUG, True) stderr_hdl.addFilter(f_info) stderr_hdl.addFilter(f_debug) logger.addHandler(stderr_hdl)
[ "def", "init_logging", "(", "self", ",", "log_filename", "=", "\"tank.log\"", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "''", ")", "self", ".", "log_filename", "=", "log_filename", "self", ".", "core", ".", "add_artifact_file", "(", "self", ...
Set up logging
[ "Set", "up", "logging" ]
python
test
ladybug-tools/ladybug
ladybug/futil.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L146-L171
def bat_to_sh(file_path): """Convert honeybee .bat file to .sh file. WARNING: This is a very simple function and doesn't handle any edge cases. """ sh_file = file_path[:-4] + '.sh' with open(file_path, 'rb') as inf, open(sh_file, 'wb') as outf: outf.write('#!/usr/bin/env bash\n\n') for line in inf: # pass the path lines, etc to get to the commands if line.strip(): continue else: break for line in inf: if line.startswith('echo'): continue modified_line = line.replace('c:\\radiance\\bin\\', '').replace('\\', '/') outf.write(modified_line) print('bash file is created at:\n\t%s' % sh_file) # Heroku - Make command.sh executable st = os.stat(sh_file) os.chmod(sh_file, st.st_mode | 0o111) return sh_file
[ "def", "bat_to_sh", "(", "file_path", ")", ":", "sh_file", "=", "file_path", "[", ":", "-", "4", "]", "+", "'.sh'", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "inf", ",", "open", "(", "sh_file", ",", "'wb'", ")", "as", "outf", ":", ...
Convert honeybee .bat file to .sh file. WARNING: This is a very simple function and doesn't handle any edge cases.
[ "Convert", "honeybee", ".", "bat", "file", "to", ".", "sh", "file", "." ]
python
train
jazzband/django-pipeline
pipeline/compilers/__init__.py
https://github.com/jazzband/django-pipeline/blob/3cd2f93bb47bf8d34447e13ff691f7027e7b07a2/pipeline/compilers/__init__.py#L94-L153
def execute_command(self, command, cwd=None, stdout_captured=None): """Execute a command at cwd, saving its normal output at stdout_captured. Errors, defined as nonzero return code or a failure to start execution, will raise a CompilerError exception with a description of the cause. They do not write output. This is file-system safe (any valid file names are allowed, even with spaces or crazy characters) and OS agnostic (existing and future OSes that Python supports should already work). The only thing weird here is that any incoming command arg item may itself be a tuple. This allows compiler implementations to look clean while supporting historical string config settings and maintaining backwards compatibility. Thus, we flatten one layer deep. ((env, foocomp), infile, (-arg,)) -> (env, foocomp, infile, -arg) """ argument_list = [] for flattening_arg in command: if isinstance(flattening_arg, string_types): argument_list.append(flattening_arg) else: argument_list.extend(flattening_arg) # The first element in argument_list is the program that will be executed; if it is '', then # a PermissionError will be raised. Thus empty arguments are filtered out from argument_list argument_list = list(filter(None, argument_list)) stdout = None try: # We always catch stdout in a file, but we may not have a use for it. temp_file_container = cwd or os.path.dirname(stdout_captured or "") or os.getcwd() with NamedTemporaryFile(delete=False, dir=temp_file_container) as stdout: compiling = subprocess.Popen(argument_list, cwd=cwd, stdout=stdout, stderr=subprocess.PIPE) _, stderr = compiling.communicate() set_std_streams_blocking() if compiling.returncode != 0: stdout_captured = None # Don't save erroneous result. raise CompilerError( "{0!r} exit code {1}\n{2}".format(argument_list, compiling.returncode, stderr), command=argument_list, error_output=stderr) # User wants to see everything that happened. if self.verbose: with open(stdout.name) as out: print(out.read()) print(stderr) except OSError as e: stdout_captured = None # Don't save erroneous result. raise CompilerError(e, command=argument_list, error_output=text_type(e)) finally: # Decide what to do with captured stdout. if stdout: if stdout_captured: shutil.move(stdout.name, os.path.join(cwd or os.curdir, stdout_captured)) else: os.remove(stdout.name)
[ "def", "execute_command", "(", "self", ",", "command", ",", "cwd", "=", "None", ",", "stdout_captured", "=", "None", ")", ":", "argument_list", "=", "[", "]", "for", "flattening_arg", "in", "command", ":", "if", "isinstance", "(", "flattening_arg", ",", "s...
Execute a command at cwd, saving its normal output at stdout_captured. Errors, defined as nonzero return code or a failure to start execution, will raise a CompilerError exception with a description of the cause. They do not write output. This is file-system safe (any valid file names are allowed, even with spaces or crazy characters) and OS agnostic (existing and future OSes that Python supports should already work). The only thing weird here is that any incoming command arg item may itself be a tuple. This allows compiler implementations to look clean while supporting historical string config settings and maintaining backwards compatibility. Thus, we flatten one layer deep. ((env, foocomp), infile, (-arg,)) -> (env, foocomp, infile, -arg)
[ "Execute", "a", "command", "at", "cwd", "saving", "its", "normal", "output", "at", "stdout_captured", ".", "Errors", "defined", "as", "nonzero", "return", "code", "or", "a", "failure", "to", "start", "execution", "will", "raise", "a", "CompilerError", "excepti...
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L473-L490
def integer(cls, integer, bits = None): """ @type integer: int @param integer: Integer. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.integer_size} @rtype: str @return: Text output. """ if bits is None: integer_size = cls.integer_size else: integer_size = bits / 4 return ('%%.%dX' % integer_size) % integer
[ "def", "integer", "(", "cls", ",", "integer", ",", "bits", "=", "None", ")", ":", "if", "bits", "is", "None", ":", "integer_size", "=", "cls", ".", "integer_size", "else", ":", "integer_size", "=", "bits", "/", "4", "return", "(", "'%%.%dX'", "%", "i...
@type integer: int @param integer: Integer. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.integer_size} @rtype: str @return: Text output.
[ "@type", "integer", ":", "int", "@param", "integer", ":", "Integer", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py#L312-L512
def find_files(self, ignore_policies=True): """ Search shared and private assemblies and return a list of files. If any files are not found, return an empty list. IMPORTANT NOTE: For the purpose of getting the dependent assembly files of an executable, the publisher configuration (aka policy) should be ignored (which is the default). Setting ignore_policies=False is only useful to find out which files are actually loaded at runtime. """ # Shared Assemblies: # http://msdn.microsoft.com/en-us/library/aa375996%28VS.85%29.aspx # # Private Assemblies: # http://msdn.microsoft.com/en-us/library/aa375674%28VS.85%29.aspx # # Assembly Searching Sequence: # http://msdn.microsoft.com/en-us/library/aa374224%28VS.85%29.aspx # # NOTE: # Multilanguage User Interface (MUI) support not yet implemented files = [] languages = [] if self.language not in (None, "", "*", "neutral"): languages.append(self.getlanguage()) if "-" in self.language: # language-culture syntax, e.g. en-us # Add only the language part languages.append(self.language.split("-")[0]) if self.language not in ("en-us", "en"): languages.append("en-us") if self.language != "en": languages.append("en") languages.append(self.getlanguage("*")) winsxs = os.path.join(compat.getenv("SystemRoot"), "WinSxS") if not os.path.isdir(winsxs): logger.warn("No such dir %s", winsxs) manifests = os.path.join(winsxs, "Manifests") if not os.path.isdir(manifests): logger.warn("No such dir %s", manifests) if not ignore_policies and self.version: if sys.getwindowsversion() < (6, ): # Windows XP pcfiles = os.path.join(winsxs, "Policies") if not os.path.isdir(pcfiles): logger.warn("No such dir %s", pcfiles) else: # Vista or later pcfiles = manifests for language in languages: version = self.version # Search for publisher configuration if not ignore_policies and version: # Publisher Configuration (aka policy) # A publisher configuration file globally redirects # applications and assemblies having a dependence on one # version of a side-by-side assembly to use another version of # the same assembly. This enables applications and assemblies # to use the updated assembly without having to rebuild all of # the affected applications. # http://msdn.microsoft.com/en-us/library/aa375680%28VS.85%29.aspx # # Under Windows XP and 2003, policies are stored as # <version>.policy files inside # %SystemRoot%\WinSxS\Policies\<name> # Under Vista and later, policies are stored as # <name>.manifest files inside %SystemRoot%\winsxs\Manifests redirected = False if os.path.isdir(pcfiles): logger.info("Searching for publisher configuration %s ...", self.getpolicyid(True, language=language)) if sys.getwindowsversion() < (6, ): # Windows XP policies = os.path.join(pcfiles, self.getpolicyid(True, language=language) + ".policy") else: # Vista or later policies = os.path.join(pcfiles, self.getpolicyid(True, language=language) + ".manifest") for manifestpth in glob(policies): if not os.path.isfile(manifestpth): logger.warn("Not a file %s", manifestpth) continue logger.info("Found %s", manifestpth) try: policy = ManifestFromXMLFile(manifestpth) except Exception, exc: logger.error("Could not parse file %s", manifestpth) logger.exception(exc) else: logger.info("Checking publisher policy for " "binding redirects") for assembly in policy.dependentAssemblies: if (not assembly.same_id(self, True) or assembly.optional): continue for redirect in assembly.bindingRedirects: if logger.isEnabledFor(logging.INFO): old = "-".join([".".join([str(i) for i in part]) for part in redirect[0]]) new = ".".join([str(i) for i in redirect[1]]) logger.info("Found redirect for " "version(s) %s -> %n", old, new) if (version >= redirect[0][0] and version <= redirect[0][-1] and version != redirect[1]): logger.info("Applying redirect " "%s -> %s", ".".join([str(i) for i in version]), new) version = redirect[1] redirected = True if not redirected: logger.info("Publisher configuration not used") # Search for assemblies according to assembly searching sequence paths = [] if os.path.isdir(manifests): # Add winsxs search paths paths.extend(glob(os.path.join(manifests, self.getid(language=language, version=version) + "_*.manifest"))) if self.filename: # Add private assembly search paths dirnm = os.path.dirname(self.filename) if language in (LANGUAGE_NEUTRAL_NT5, LANGUAGE_NEUTRAL_NT6): for ext in (".dll", ".manifest"): paths.extend(glob(os.path.join(dirnm, self.name + ext))) paths.extend(glob(os.path.join(dirnm, self.name, self.name + ext))) else: for ext in (".dll", ".manifest"): paths.extend(glob(os.path.join(dirnm, language, self.name + ext))) for ext in (".dll", ".manifest"): paths.extend(glob(os.path.join(dirnm, language, self.name, self.name + ext))) logger.info("Searching for assembly %s ...", self.getid(language=language, version=version)) for manifestpth in paths: if not os.path.isfile(manifestpth): logger.warn("Not a file %s", manifestpth) continue assemblynm = os.path.basename( os.path.splitext(manifestpth)[0]) try: if manifestpth.endswith(".dll"): logger.info("Found manifest in %s", manifestpth) manifest = ManifestFromResFile(manifestpth, [1]) else: logger.info("Found manifest %s", manifestpth) manifest = ManifestFromXMLFile(manifestpth) except Exception, exc: logger.error("Could not parse manifest %s", manifestpth) logger.exception(exc) else: if manifestpth.startswith(winsxs): assemblydir = os.path.join(winsxs, assemblynm) if not os.path.isdir(assemblydir): logger.warn("No such dir %s", assemblydir) logger.warn("Assembly incomplete") return [] else: assemblydir = os.path.dirname(manifestpth) files.append(manifestpth) for file_ in self.files or manifest.files: fn = file_.find(assemblydir) if fn: files.append(fn) else: # If any of our files does not exist, # the assembly is incomplete logger.warn("Assembly incomplete") return [] return files logger.warn("Assembly not found") return []
[ "def", "find_files", "(", "self", ",", "ignore_policies", "=", "True", ")", ":", "# Shared Assemblies:", "# http://msdn.microsoft.com/en-us/library/aa375996%28VS.85%29.aspx", "#", "# Private Assemblies:", "# http://msdn.microsoft.com/en-us/library/aa375674%28VS.85%29.aspx", "#", "# A...
Search shared and private assemblies and return a list of files. If any files are not found, return an empty list. IMPORTANT NOTE: For the purpose of getting the dependent assembly files of an executable, the publisher configuration (aka policy) should be ignored (which is the default). Setting ignore_policies=False is only useful to find out which files are actually loaded at runtime.
[ "Search", "shared", "and", "private", "assemblies", "and", "return", "a", "list", "of", "files", ".", "If", "any", "files", "are", "not", "found", "return", "an", "empty", "list", ".", "IMPORTANT", "NOTE", ":", "For", "the", "purpose", "of", "getting", "...
python
train
steelkiwi/django-skd-tools
skd_tools/utils.py
https://github.com/steelkiwi/django-skd-tools/blob/422dc3e49f12739a500302e4c494379684e9dc50/skd_tools/utils.py#L10-L20
def get_random_filename(instance, filename): """ Generates random filename for uploading file using uuid4 hashes You need to define UPLOADS_ROOT in your django settings something like this UPLOADS_ROOT = rel(MEDIA_ROOT, 'uploads') """ folder = settings.UPLOADS_ROOT ext = filename.split('.')[-1] filename = '{}.{}'.format(str(uuid4()), ext) return os.path.join(folder, filename)
[ "def", "get_random_filename", "(", "instance", ",", "filename", ")", ":", "folder", "=", "settings", ".", "UPLOADS_ROOT", "ext", "=", "filename", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "filename", "=", "'{}.{}'", ".", "format", "(", "str", ...
Generates random filename for uploading file using uuid4 hashes You need to define UPLOADS_ROOT in your django settings something like this UPLOADS_ROOT = rel(MEDIA_ROOT, 'uploads')
[ "Generates", "random", "filename", "for", "uploading", "file", "using", "uuid4", "hashes", "You", "need", "to", "define", "UPLOADS_ROOT", "in", "your", "django", "settings", "something", "like", "this", "UPLOADS_ROOT", "=", "rel", "(", "MEDIA_ROOT", "uploads", "...
python
valid
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L1715-L1857
def IMUL(cpu, *operands): """ Signed multiply. Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands. - One-operand form. This form is identical to that used by the MUL instruction. Here, the source operand (in a general-purpose register or memory location) is multiplied by the value in the AL, AX, or EAX register (depending on the operand size) and the product is stored in the AX, DX:AX, or EDX:EAX registers, respectively. - Two-operand form. With this form the destination operand (the first operand) is multiplied by the source operand (second operand). The destination operand is a general-purpose register and the source operand is an immediate value, a general-purpose register, or a memory location. The product is then stored in the destination operand location. - Three-operand form. This form requires a destination operand (the first operand) and two source operands (the second and the third operands). Here, the first source operand (which can be a general-purpose register or a memory location) is multiplied by the second source operand (an immediate value). The product is then stored in the destination operand (a general-purpose register). When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format. The CF and OF flags are set when significant bits are carried into the upper half of the result. The CF and OF flags are cleared when the result fits exactly in the lower half of the result. The three forms of the IMUL instruction are similar in that the length of the product is calculated to twice the length of the operands. With the one-operand form, the product is stored exactly in the destination. With the two- and three- operand forms, however, result is truncated to the length of the destination before it is stored in the destination register. Because of this truncation, the CF or OF flag should be tested to ensure that no significant bits are lost. The two- and three-operand forms may also be used with unsigned operands because the lower half of the product is the same regardless if the operands are signed or unsigned. The CF and OF flags, however, cannot be used to determine if the upper half of the result is non-zero:: IF (NumberOfOperands == 1) THEN IF (OperandSize == 8) THEN AX = AL * SRC (* Signed multiplication *) IF AL == AX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE IF OperandSize == 16 THEN DX:AX = AX * SRC (* Signed multiplication *) IF sign_extend_to_32 (AX) == DX:AX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE IF OperandSize == 32 THEN EDX:EAX = EAX * SRC (* Signed multiplication *) IF EAX == EDX:EAX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE (* OperandSize = 64 *) RDX:RAX = RAX * SRC (* Signed multiplication *) IF RAX == RDX:RAX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; FI; FI; ELSE IF (NumberOfOperands = 2) THEN temp = DEST * SRC (* Signed multiplication; temp is double DEST size *) DEST = DEST * SRC (* Signed multiplication *) IF temp != DEST THEN CF = 1; OF = 1; ELSE CF = 0; OF = 0; FI; ELSE (* NumberOfOperands = 3 *) DEST = SRC1 * SRC2 (* Signed multiplication *) temp = SRC1 * SRC2 (* Signed multiplication; temp is double SRC1 size *) IF temp != DEST THEN CF = 1; OF = 1; ELSE CF = 0; OF = 0; FI; FI; FI; :param cpu: current CPU. :param operands: variable list of operands. """ dest = operands[0] OperandSize = dest.size reg_name_h = {8: 'AH', 16: 'DX', 32: 'EDX', 64: 'RDX'}[OperandSize] reg_name_l = {8: 'AL', 16: 'AX', 32: 'EAX', 64: 'RAX'}[OperandSize] arg0 = dest.read() arg1 = None arg2 = None res = None if len(operands) == 1: arg1 = cpu.read_register(reg_name_l) temp = (Operators.SEXTEND(arg0, OperandSize, OperandSize * 2) * Operators.SEXTEND(arg1, OperandSize, OperandSize * 2)) temp = temp & ((1 << (OperandSize * 2)) - 1) cpu.write_register(reg_name_l, Operators.EXTRACT(temp, 0, OperandSize)) cpu.write_register(reg_name_h, Operators.EXTRACT(temp, OperandSize, OperandSize)) res = Operators.EXTRACT(temp, 0, OperandSize) elif len(operands) == 2: arg1 = operands[1].read() arg1 = Operators.SEXTEND(arg1, OperandSize, OperandSize * 2) temp = Operators.SEXTEND(arg0, OperandSize, OperandSize * 2) * arg1 temp = temp & ((1 << (OperandSize * 2)) - 1) res = dest.write(Operators.EXTRACT(temp, 0, OperandSize)) else: arg1 = operands[1].read() arg2 = operands[2].read() temp = (Operators.SEXTEND(arg1, OperandSize, OperandSize * 2) * Operators.SEXTEND(arg2, operands[2].size, OperandSize * 2)) temp = temp & ((1 << (OperandSize * 2)) - 1) res = dest.write(Operators.EXTRACT(temp, 0, OperandSize)) cpu.CF = (Operators.SEXTEND(res, OperandSize, OperandSize * 2) != temp) cpu.OF = cpu.CF
[ "def", "IMUL", "(", "cpu", ",", "*", "operands", ")", ":", "dest", "=", "operands", "[", "0", "]", "OperandSize", "=", "dest", ".", "size", "reg_name_h", "=", "{", "8", ":", "'AH'", ",", "16", ":", "'DX'", ",", "32", ":", "'EDX'", ",", "64", ":...
Signed multiply. Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands. - One-operand form. This form is identical to that used by the MUL instruction. Here, the source operand (in a general-purpose register or memory location) is multiplied by the value in the AL, AX, or EAX register (depending on the operand size) and the product is stored in the AX, DX:AX, or EDX:EAX registers, respectively. - Two-operand form. With this form the destination operand (the first operand) is multiplied by the source operand (second operand). The destination operand is a general-purpose register and the source operand is an immediate value, a general-purpose register, or a memory location. The product is then stored in the destination operand location. - Three-operand form. This form requires a destination operand (the first operand) and two source operands (the second and the third operands). Here, the first source operand (which can be a general-purpose register or a memory location) is multiplied by the second source operand (an immediate value). The product is then stored in the destination operand (a general-purpose register). When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format. The CF and OF flags are set when significant bits are carried into the upper half of the result. The CF and OF flags are cleared when the result fits exactly in the lower half of the result. The three forms of the IMUL instruction are similar in that the length of the product is calculated to twice the length of the operands. With the one-operand form, the product is stored exactly in the destination. With the two- and three- operand forms, however, result is truncated to the length of the destination before it is stored in the destination register. Because of this truncation, the CF or OF flag should be tested to ensure that no significant bits are lost. The two- and three-operand forms may also be used with unsigned operands because the lower half of the product is the same regardless if the operands are signed or unsigned. The CF and OF flags, however, cannot be used to determine if the upper half of the result is non-zero:: IF (NumberOfOperands == 1) THEN IF (OperandSize == 8) THEN AX = AL * SRC (* Signed multiplication *) IF AL == AX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE IF OperandSize == 16 THEN DX:AX = AX * SRC (* Signed multiplication *) IF sign_extend_to_32 (AX) == DX:AX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE IF OperandSize == 32 THEN EDX:EAX = EAX * SRC (* Signed multiplication *) IF EAX == EDX:EAX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE (* OperandSize = 64 *) RDX:RAX = RAX * SRC (* Signed multiplication *) IF RAX == RDX:RAX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; FI; FI; ELSE IF (NumberOfOperands = 2) THEN temp = DEST * SRC (* Signed multiplication; temp is double DEST size *) DEST = DEST * SRC (* Signed multiplication *) IF temp != DEST THEN CF = 1; OF = 1; ELSE CF = 0; OF = 0; FI; ELSE (* NumberOfOperands = 3 *) DEST = SRC1 * SRC2 (* Signed multiplication *) temp = SRC1 * SRC2 (* Signed multiplication; temp is double SRC1 size *) IF temp != DEST THEN CF = 1; OF = 1; ELSE CF = 0; OF = 0; FI; FI; FI; :param cpu: current CPU. :param operands: variable list of operands.
[ "Signed", "multiply", "." ]
python
valid
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L47-L72
def head_to_head(self, home_num, away_num): """ Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } """ if home_num in self.home_fo and away_num in self.home_fo[home_num]['opps']: h_fo = self.home_fo[home_num]['opps'][away_num] a_fo = self.away_fo[away_num]['opps'][home_num] return { 'home': { k: h_fo[k] for k in self.__zones }, 'away': { k: a_fo[k] for k in self.__zones } } else: return { }
[ "def", "head_to_head", "(", "self", ",", "home_num", ",", "away_num", ")", ":", "if", "home_num", "in", "self", ".", "home_fo", "and", "away_num", "in", "self", ".", "home_fo", "[", "home_num", "]", "[", "'opps'", "]", ":", "h_fo", "=", "self", ".", ...
Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } }
[ "Return", "the", "head", "-", "to", "-", "head", "face", "-", "off", "outcomes", "between", "two", "players", ".", "If", "the", "matchup", "didn", "t", "happen", "{", "}", "is", "returned", ".", ":", "param", "home_num", ":", "the", "number", "of", "...
python
train
kurtraschke/pyRFC3339
pyrfc3339/utils.py
https://github.com/kurtraschke/pyRFC3339/blob/e30cc1555adce0ecc7bd65509a2249d47e5a41b4/pyrfc3339/utils.py#L87-L119
def timedelta_seconds(td): ''' Return the offset stored by a :class:`datetime.timedelta` object as an integer number of seconds. Microseconds, if present, are rounded to the nearest second. Delegates to :meth:`timedelta.total_seconds() <datetime.timedelta.total_seconds()>` if available. >>> timedelta_seconds(timedelta(hours=1)) 3600 >>> timedelta_seconds(timedelta(hours=-1)) -3600 >>> timedelta_seconds(timedelta(hours=1, minutes=30)) 5400 >>> timedelta_seconds(timedelta(hours=1, minutes=30, ... microseconds=300000)) 5400 >>> timedelta_seconds(timedelta(hours=1, minutes=30, ... microseconds=900000)) 5401 ''' try: return int(round(td.total_seconds())) except AttributeError: days = td.days seconds = td.seconds microseconds = td.microseconds return int(round((days * 86400) + seconds + (microseconds / 1000000)))
[ "def", "timedelta_seconds", "(", "td", ")", ":", "try", ":", "return", "int", "(", "round", "(", "td", ".", "total_seconds", "(", ")", ")", ")", "except", "AttributeError", ":", "days", "=", "td", ".", "days", "seconds", "=", "td", ".", "seconds", "m...
Return the offset stored by a :class:`datetime.timedelta` object as an integer number of seconds. Microseconds, if present, are rounded to the nearest second. Delegates to :meth:`timedelta.total_seconds() <datetime.timedelta.total_seconds()>` if available. >>> timedelta_seconds(timedelta(hours=1)) 3600 >>> timedelta_seconds(timedelta(hours=-1)) -3600 >>> timedelta_seconds(timedelta(hours=1, minutes=30)) 5400 >>> timedelta_seconds(timedelta(hours=1, minutes=30, ... microseconds=300000)) 5400 >>> timedelta_seconds(timedelta(hours=1, minutes=30, ... microseconds=900000)) 5401
[ "Return", "the", "offset", "stored", "by", "a", ":", "class", ":", "datetime", ".", "timedelta", "object", "as", "an", "integer", "number", "of", "seconds", ".", "Microseconds", "if", "present", "are", "rounded", "to", "the", "nearest", "second", "." ]
python
train
RedHatInsights/insights-core
insights/core/dr.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L927-L970
def run(components=None, broker=None): """ Executes components in an order that satisfies their dependency relationships. Keyword Args: components: Can be one of a dependency graph, a single component, a component group, or a component type. If it's anything other than a dependency graph, the appropriate graph is built for you and before evaluation. broker (Broker): Optionally pass a broker to use for evaluation. One is created by default, but it's often useful to seed a broker with an initial dependency. Returns: Broker: The broker after evaluation. """ components = components or COMPONENTS[GROUPS.single] components = _determine_components(components) broker = broker or Broker() for component in run_order(components): start = time.time() try: if component not in broker and component in DELEGATES and is_enabled(component): log.info("Trying %s" % get_name(component)) result = DELEGATES[component].process(broker) broker[component] = result except MissingRequirements as mr: if log.isEnabledFor(logging.DEBUG): name = get_name(component) reqs = stringify_requirements(mr.requirements) log.debug("%s missing requirements %s" % (name, reqs)) broker.add_exception(component, mr) except SkipComponent: pass except Exception as ex: tb = traceback.format_exc() log.warn(tb) broker.add_exception(component, ex, tb) finally: broker.exec_times[component] = time.time() - start broker.fire_observers(component) return broker
[ "def", "run", "(", "components", "=", "None", ",", "broker", "=", "None", ")", ":", "components", "=", "components", "or", "COMPONENTS", "[", "GROUPS", ".", "single", "]", "components", "=", "_determine_components", "(", "components", ")", "broker", "=", "...
Executes components in an order that satisfies their dependency relationships. Keyword Args: components: Can be one of a dependency graph, a single component, a component group, or a component type. If it's anything other than a dependency graph, the appropriate graph is built for you and before evaluation. broker (Broker): Optionally pass a broker to use for evaluation. One is created by default, but it's often useful to seed a broker with an initial dependency. Returns: Broker: The broker after evaluation.
[ "Executes", "components", "in", "an", "order", "that", "satisfies", "their", "dependency", "relationships", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/imagenet/inception/slim/ops.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/slim/ops.py#L446-L473
def repeat_op(repetitions, inputs, op, *args, **kwargs): """Build a sequential Tower starting from inputs by using an op repeatedly. It creates new scopes for each operation by increasing the counter. Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') it will repeat the given op under the following variable_scopes: conv1/Conv conv1/Conv_1 conv1/Conv_2 Args: repetitions: number or repetitions. inputs: a tensor of size [batch_size, height, width, channels]. op: an operation. *args: args for the op. **kwargs: kwargs for the op. Returns: a tensor result of applying the operation op, num times. Raises: ValueError: if the op is unknown or wrong. """ scope = kwargs.pop('scope', None) with tf.variable_scope(scope, 'RepeatOp', [inputs]): tower = inputs for _ in range(repetitions): tower = op(tower, *args, **kwargs) return tower
[ "def", "repeat_op", "(", "repetitions", ",", "inputs", ",", "op", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "scope", "=", "kwargs", ".", "pop", "(", "'scope'", ",", "None", ")", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'R...
Build a sequential Tower starting from inputs by using an op repeatedly. It creates new scopes for each operation by increasing the counter. Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') it will repeat the given op under the following variable_scopes: conv1/Conv conv1/Conv_1 conv1/Conv_2 Args: repetitions: number or repetitions. inputs: a tensor of size [batch_size, height, width, channels]. op: an operation. *args: args for the op. **kwargs: kwargs for the op. Returns: a tensor result of applying the operation op, num times. Raises: ValueError: if the op is unknown or wrong.
[ "Build", "a", "sequential", "Tower", "starting", "from", "inputs", "by", "using", "an", "op", "repeatedly", "." ]
python
train
Erotemic/utool
utool/util_sysreq.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L61-L82
def get_global_dist_packages_dir(): """ Attempts to work around virtualenvs and find the system dist_pacakges. Essentially this is implmenented as a lookuptable """ import utool as ut if not ut.in_virtual_env(): # Non venv case return get_site_packages_dir() else: candidates = [] if ut.LINUX: import sys candidates += [ '/usr/lib/python%s/dist-packages' % (sys.version[0:3],), '/usr/lib/python%s/dist-packages' % (sys.version[0:1],), ] else: raise NotImplementedError() for path in candidates: if ut.checkpath(path): return path
[ "def", "get_global_dist_packages_dir", "(", ")", ":", "import", "utool", "as", "ut", "if", "not", "ut", ".", "in_virtual_env", "(", ")", ":", "# Non venv case", "return", "get_site_packages_dir", "(", ")", "else", ":", "candidates", "=", "[", "]", "if", "ut"...
Attempts to work around virtualenvs and find the system dist_pacakges. Essentially this is implmenented as a lookuptable
[ "Attempts", "to", "work", "around", "virtualenvs", "and", "find", "the", "system", "dist_pacakges", ".", "Essentially", "this", "is", "implmenented", "as", "a", "lookuptable" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L1935-L1943
def _schema_from_json_file_object(self, file_obj): """Helper function for schema_from_json that takes a file object that describes a table schema. Returns: List of schema field objects. """ json_data = json.load(file_obj) return [SchemaField.from_api_repr(field) for field in json_data]
[ "def", "_schema_from_json_file_object", "(", "self", ",", "file_obj", ")", ":", "json_data", "=", "json", ".", "load", "(", "file_obj", ")", "return", "[", "SchemaField", ".", "from_api_repr", "(", "field", ")", "for", "field", "in", "json_data", "]" ]
Helper function for schema_from_json that takes a file object that describes a table schema. Returns: List of schema field objects.
[ "Helper", "function", "for", "schema_from_json", "that", "takes", "a", "file", "object", "that", "describes", "a", "table", "schema", "." ]
python
train
twisted/epsilon
epsilon/extime.py
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/extime.py#L607-L619
def fromPOSIXTimestamp(klass, secs): """Return a new Time instance from seconds since the POSIX epoch. The POSIX epoch is midnight Jan 1, 1970 UTC. According to POSIX, leap seconds don't exist, so one UTC day is exactly 86400 seconds, even if it wasn't. @param secs: a number of seconds, represented as an integer, long or float. """ self = klass.fromDatetime(_EPOCH + datetime.timedelta(seconds=secs)) self.resolution = datetime.timedelta() return self
[ "def", "fromPOSIXTimestamp", "(", "klass", ",", "secs", ")", ":", "self", "=", "klass", ".", "fromDatetime", "(", "_EPOCH", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "secs", ")", ")", "self", ".", "resolution", "=", "datetime", ".", "timede...
Return a new Time instance from seconds since the POSIX epoch. The POSIX epoch is midnight Jan 1, 1970 UTC. According to POSIX, leap seconds don't exist, so one UTC day is exactly 86400 seconds, even if it wasn't. @param secs: a number of seconds, represented as an integer, long or float.
[ "Return", "a", "new", "Time", "instance", "from", "seconds", "since", "the", "POSIX", "epoch", "." ]
python
train
jarrekk/imgkit
imgkit/imgkit.py
https://github.com/jarrekk/imgkit/blob/763296cc2e81b16b9c3ebd2cd4355ddd02d5ab16/imgkit/imgkit.py#L130-L152
def _normalize_options(self, options): """ Generator of 2-tuples (option-key, option-value). When options spec is a list, generate a 2-tuples per list item. :param options: dict {option: value} returns: iterator (option-key, option-value) - option names lower cased and prepended with '--' if necessary. Non-empty values cast to str """ for key, value in list(options.items()): if '--' in key: normalized_key = self._normalize_arg(key) else: normalized_key = '--%s' % self._normalize_arg(key) if isinstance(value, (list, tuple)): for opt_val in value: yield (normalized_key, opt_val) else: yield (normalized_key, str(value) if value else value)
[ "def", "_normalize_options", "(", "self", ",", "options", ")", ":", "for", "key", ",", "value", "in", "list", "(", "options", ".", "items", "(", ")", ")", ":", "if", "'--'", "in", "key", ":", "normalized_key", "=", "self", ".", "_normalize_arg", "(", ...
Generator of 2-tuples (option-key, option-value). When options spec is a list, generate a 2-tuples per list item. :param options: dict {option: value} returns: iterator (option-key, option-value) - option names lower cased and prepended with '--' if necessary. Non-empty values cast to str
[ "Generator", "of", "2", "-", "tuples", "(", "option", "-", "key", "option", "-", "value", ")", ".", "When", "options", "spec", "is", "a", "list", "generate", "a", "2", "-", "tuples", "per", "list", "item", "." ]
python
train
Rapptz/discord.py
discord/ext/commands/bot.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/bot.py#L868-L894
async def process_commands(self, message): """|coro| This function processes the commands that have been registered to the bot and other groups. Without this coroutine, none of the commands will be triggered. By default, this coroutine is called inside the :func:`.on_message` event. If you choose to override the :func:`.on_message` event, then you should invoke this coroutine as well. This is built using other low level tools, and is equivalent to a call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`. This also checks if the message's author is a bot and doesn't call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so. Parameters ----------- message: :class:`discord.Message` The message to process commands for. """ if message.author.bot: return ctx = await self.get_context(message) await self.invoke(ctx)
[ "async", "def", "process_commands", "(", "self", ",", "message", ")", ":", "if", "message", ".", "author", ".", "bot", ":", "return", "ctx", "=", "await", "self", ".", "get_context", "(", "message", ")", "await", "self", ".", "invoke", "(", "ctx", ")" ...
|coro| This function processes the commands that have been registered to the bot and other groups. Without this coroutine, none of the commands will be triggered. By default, this coroutine is called inside the :func:`.on_message` event. If you choose to override the :func:`.on_message` event, then you should invoke this coroutine as well. This is built using other low level tools, and is equivalent to a call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`. This also checks if the message's author is a bot and doesn't call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so. Parameters ----------- message: :class:`discord.Message` The message to process commands for.
[ "|coro|" ]
python
train
marshmallow-code/apispec
src/apispec/ext/marshmallow/openapi.py
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/ext/marshmallow/openapi.py#L343-L365
def field2pattern(self, field, **kwargs): """Return the dictionary of OpenAPI field attributes for a set of :class:`Range <marshmallow.validators.Regexp>` validators. :param Field field: A marshmallow field. :rtype: dict """ regex_validators = ( v for v in field.validators if isinstance(getattr(v, "regex", None), RegexType) ) v = next(regex_validators, None) attributes = {} if v is None else {"pattern": v.regex.pattern} if next(regex_validators, None) is not None: warnings.warn( "More than one regex validator defined on {} field. Only the " "first one will be used in the output spec.".format(type(field)), UserWarning, ) return attributes
[ "def", "field2pattern", "(", "self", ",", "field", ",", "*", "*", "kwargs", ")", ":", "regex_validators", "=", "(", "v", "for", "v", "in", "field", ".", "validators", "if", "isinstance", "(", "getattr", "(", "v", ",", "\"regex\"", ",", "None", ")", "...
Return the dictionary of OpenAPI field attributes for a set of :class:`Range <marshmallow.validators.Regexp>` validators. :param Field field: A marshmallow field. :rtype: dict
[ "Return", "the", "dictionary", "of", "OpenAPI", "field", "attributes", "for", "a", "set", "of", ":", "class", ":", "Range", "<marshmallow", ".", "validators", ".", "Regexp", ">", "validators", "." ]
python
train
raphaelm/python-fints
fints/client.py
https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/client.py#L455-L472
def _find_highest_supported_command(self, *segment_classes, **kwargs): """Search the BPD for the highest supported version of a segment.""" return_parameter_segment = kwargs.get("return_parameter_segment", False) parameter_segment_name = "{}I{}S".format(segment_classes[0].TYPE[0], segment_classes[0].TYPE[2:]) version_map = dict((clazz.VERSION, clazz) for clazz in segment_classes) max_version = self.bpd.find_segment_highest_version(parameter_segment_name, version_map.keys()) if not max_version: raise FinTSUnsupportedOperation('No supported {} version found. I support {}, bank supports {}.'.format( parameter_segment_name, tuple(version_map.keys()), tuple(v.header.version for v in self.bpd.find_segments(parameter_segment_name)) )) if return_parameter_segment: return max_version, version_map.get(max_version.header.version) else: return version_map.get(max_version.header.version)
[ "def", "_find_highest_supported_command", "(", "self", ",", "*", "segment_classes", ",", "*", "*", "kwargs", ")", ":", "return_parameter_segment", "=", "kwargs", ".", "get", "(", "\"return_parameter_segment\"", ",", "False", ")", "parameter_segment_name", "=", "\"{}...
Search the BPD for the highest supported version of a segment.
[ "Search", "the", "BPD", "for", "the", "highest", "supported", "version", "of", "a", "segment", "." ]
python
train
ozgur/python-firebase
firebase/decorators.py
https://github.com/ozgur/python-firebase/blob/6b96b326f6d8f477503ca42fdfbd81bcbe1f9e0d/firebase/decorators.py#L5-L23
def http_connection(timeout): """ Decorator function that injects a requests.Session instance into the decorated function's actual parameters if not given. """ def wrapper(f): def wrapped(*args, **kwargs): if not ('connection' in kwargs) or not kwargs['connection']: connection = requests.Session() kwargs['connection'] = connection else: connection = kwargs['connection'] if not getattr(connection, 'timeout', False): connection.timeout = timeout connection.headers.update({'Content-type': 'application/json'}) return f(*args, **kwargs) return wraps(f)(wrapped) return wrapper
[ "def", "http_connection", "(", "timeout", ")", ":", "def", "wrapper", "(", "f", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "(", "'connection'", "in", "kwargs", ")", "or", "not", "kwargs", "[", "'co...
Decorator function that injects a requests.Session instance into the decorated function's actual parameters if not given.
[ "Decorator", "function", "that", "injects", "a", "requests", ".", "Session", "instance", "into", "the", "decorated", "function", "s", "actual", "parameters", "if", "not", "given", "." ]
python
valid
LonamiWebs/Telethon
telethon/client/uploads.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/uploads.py#L85-L293
async def send_file( self, entity, file, *, caption=None, force_document=False, progress_callback=None, reply_to=None, attributes=None, thumb=None, allow_cache=True, parse_mode=(), voice_note=False, video_note=False, buttons=None, silent=None, supports_streaming=False, **kwargs): """ Sends a file to the specified entity. Args: entity (`entity`): Who will receive the file. file (`str` | `bytes` | `file` | `media`): The file to send, which can be one of: * A local file path to an in-disk file. The file name will be the path's base name. * A `bytes` byte array with the file's data to send (for example, by using ``text.encode('utf-8')``). A default file name will be used. * A bytes `io.IOBase` stream over the file to send (for example, by using ``open(file, 'rb')``). Its ``.name`` property will be used for the file name, or a default if it doesn't have one. * An external URL to a file over the internet. This will send the file as "external" media, and Telegram is the one that will fetch the media and send it. * A Bot API-like ``file_id``. You can convert previously sent media to file IDs for later reusing with `telethon.utils.pack_bot_file_id`. * A handle to an existing file (for example, if you sent a message with media before, you can use its ``message.media`` as a file here). * A handle to an uploaded file (from `upload_file`). To send an album, you should provide a list in this parameter. If a list or similar is provided, the files in it will be sent as an album in the order in which they appear, sliced in chunks of 10 if more than 10 are given. caption (`str`, optional): Optional caption for the sent media message. When sending an album, the caption may be a list of strings, which will be assigned to the files pairwise. force_document (`bool`, optional): If left to ``False`` and the file is a path that ends with the extension of an image file or a video file, it will be sent as such. Otherwise always as a document. progress_callback (`callable`, optional): A callback function accepting two parameters: ``(sent bytes, total)``. reply_to (`int` | `Message <telethon.tl.custom.message.Message>`): Same as `reply_to` from `send_message`. attributes (`list`, optional): Optional attributes that override the inferred ones, like :tl:`DocumentAttributeFilename` and so on. thumb (`str` | `bytes` | `file`, optional): Optional JPEG thumbnail (for documents). **Telegram will ignore this parameter** unless you pass a ``.jpg`` file! The file must also be small in dimensions and in-disk size. Successful thumbnails were files below 20kb and 200x200px. Width/height and dimensions/size ratios may be important. allow_cache (`bool`, optional): Whether to allow using the cached version stored in the database or not. Defaults to ``True`` to avoid re-uploads. Must be ``False`` if you wish to use different attributes or thumb than those that were used when the file was cached. parse_mode (`object`, optional): See the `TelegramClient.parse_mode <telethon.client.messageparse.MessageParseMethods.parse_mode>` property for allowed values. Markdown parsing will be used by default. voice_note (`bool`, optional): If ``True`` the audio will be sent as a voice note. Set `allow_cache` to ``False`` if you sent the same file without this setting before for it to work. video_note (`bool`, optional): If ``True`` the video will be sent as a video note, also known as a round video message. Set `allow_cache` to ``False`` if you sent the same file without this setting before for it to work. buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`): The matrix (list of lists), row list or button to be shown after sending the message. This parameter will only work if you have signed in as a bot. You can also pass your own :tl:`ReplyMarkup` here. silent (`bool`, optional): Whether the message should notify people in a broadcast channel or not. Defaults to ``False``, which means it will notify them. Set it to ``True`` to alter this behaviour. supports_streaming (`bool`, optional): Whether the sent video supports streaming or not. Note that Telegram only recognizes as streamable some formats like MP4, and others like AVI or MKV will not work. You should convert these to MP4 before sending if you want them to be streamable. Unsupported formats will result in ``VideoContentTypeError``. Notes: If the ``hachoir3`` package (``hachoir`` module) is installed, it will be used to determine metadata from audio and video files. If the `pillow` package is installed and you are sending a photo, it will be resized to fit within the maximum dimensions allowed by Telegram to avoid ``errors.PhotoInvalidDimensionsError``. This cannot be done if you are sending :tl:`InputFile`, however. Returns: The `telethon.tl.custom.message.Message` (or messages) containing the sent file, or messages if a list of them was passed. """ # i.e. ``None`` was used if not file: raise TypeError('Cannot use {!r} as file'.format(file)) if not caption: caption = '' # First check if the user passed an iterable, in which case # we may want to send as an album if all are photo files. if utils.is_list_like(file): # TODO Fix progress_callback images = [] if force_document: documents = file else: documents = [] for x in file: if utils.is_image(x): images.append(x) else: documents.append(x) result = [] while images: result += await self._send_album( entity, images[:10], caption=caption, progress_callback=progress_callback, reply_to=reply_to, parse_mode=parse_mode, silent=silent ) images = images[10:] for x in documents: result.append(await self.send_file( entity, x, allow_cache=allow_cache, caption=caption, force_document=force_document, progress_callback=progress_callback, reply_to=reply_to, attributes=attributes, thumb=thumb, voice_note=voice_note, video_note=video_note, buttons=buttons, silent=silent, supports_streaming=supports_streaming, **kwargs )) return result entity = await self.get_input_entity(entity) reply_to = utils.get_message_id(reply_to) # Not document since it's subject to change. # Needed when a Message is passed to send_message and it has media. if 'entities' in kwargs: msg_entities = kwargs['entities'] else: caption, msg_entities =\ await self._parse_message_text(caption, parse_mode) file_handle, media, image = await self._file_to_media( file, force_document=force_document, progress_callback=progress_callback, attributes=attributes, allow_cache=allow_cache, thumb=thumb, voice_note=voice_note, video_note=video_note, supports_streaming=supports_streaming ) # e.g. invalid cast from :tl:`MessageMediaWebPage` if not media: raise TypeError('Cannot use {!r} as file'.format(file)) markup = self.build_reply_markup(buttons) request = functions.messages.SendMediaRequest( entity, media, reply_to_msg_id=reply_to, message=caption, entities=msg_entities, reply_markup=markup, silent=silent ) msg = self._get_response_message(request, await self(request), entity) await self._cache_media(msg, file, file_handle, image=image) return msg
[ "async", "def", "send_file", "(", "self", ",", "entity", ",", "file", ",", "*", ",", "caption", "=", "None", ",", "force_document", "=", "False", ",", "progress_callback", "=", "None", ",", "reply_to", "=", "None", ",", "attributes", "=", "None", ",", ...
Sends a file to the specified entity. Args: entity (`entity`): Who will receive the file. file (`str` | `bytes` | `file` | `media`): The file to send, which can be one of: * A local file path to an in-disk file. The file name will be the path's base name. * A `bytes` byte array with the file's data to send (for example, by using ``text.encode('utf-8')``). A default file name will be used. * A bytes `io.IOBase` stream over the file to send (for example, by using ``open(file, 'rb')``). Its ``.name`` property will be used for the file name, or a default if it doesn't have one. * An external URL to a file over the internet. This will send the file as "external" media, and Telegram is the one that will fetch the media and send it. * A Bot API-like ``file_id``. You can convert previously sent media to file IDs for later reusing with `telethon.utils.pack_bot_file_id`. * A handle to an existing file (for example, if you sent a message with media before, you can use its ``message.media`` as a file here). * A handle to an uploaded file (from `upload_file`). To send an album, you should provide a list in this parameter. If a list or similar is provided, the files in it will be sent as an album in the order in which they appear, sliced in chunks of 10 if more than 10 are given. caption (`str`, optional): Optional caption for the sent media message. When sending an album, the caption may be a list of strings, which will be assigned to the files pairwise. force_document (`bool`, optional): If left to ``False`` and the file is a path that ends with the extension of an image file or a video file, it will be sent as such. Otherwise always as a document. progress_callback (`callable`, optional): A callback function accepting two parameters: ``(sent bytes, total)``. reply_to (`int` | `Message <telethon.tl.custom.message.Message>`): Same as `reply_to` from `send_message`. attributes (`list`, optional): Optional attributes that override the inferred ones, like :tl:`DocumentAttributeFilename` and so on. thumb (`str` | `bytes` | `file`, optional): Optional JPEG thumbnail (for documents). **Telegram will ignore this parameter** unless you pass a ``.jpg`` file! The file must also be small in dimensions and in-disk size. Successful thumbnails were files below 20kb and 200x200px. Width/height and dimensions/size ratios may be important. allow_cache (`bool`, optional): Whether to allow using the cached version stored in the database or not. Defaults to ``True`` to avoid re-uploads. Must be ``False`` if you wish to use different attributes or thumb than those that were used when the file was cached. parse_mode (`object`, optional): See the `TelegramClient.parse_mode <telethon.client.messageparse.MessageParseMethods.parse_mode>` property for allowed values. Markdown parsing will be used by default. voice_note (`bool`, optional): If ``True`` the audio will be sent as a voice note. Set `allow_cache` to ``False`` if you sent the same file without this setting before for it to work. video_note (`bool`, optional): If ``True`` the video will be sent as a video note, also known as a round video message. Set `allow_cache` to ``False`` if you sent the same file without this setting before for it to work. buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`): The matrix (list of lists), row list or button to be shown after sending the message. This parameter will only work if you have signed in as a bot. You can also pass your own :tl:`ReplyMarkup` here. silent (`bool`, optional): Whether the message should notify people in a broadcast channel or not. Defaults to ``False``, which means it will notify them. Set it to ``True`` to alter this behaviour. supports_streaming (`bool`, optional): Whether the sent video supports streaming or not. Note that Telegram only recognizes as streamable some formats like MP4, and others like AVI or MKV will not work. You should convert these to MP4 before sending if you want them to be streamable. Unsupported formats will result in ``VideoContentTypeError``. Notes: If the ``hachoir3`` package (``hachoir`` module) is installed, it will be used to determine metadata from audio and video files. If the `pillow` package is installed and you are sending a photo, it will be resized to fit within the maximum dimensions allowed by Telegram to avoid ``errors.PhotoInvalidDimensionsError``. This cannot be done if you are sending :tl:`InputFile`, however. Returns: The `telethon.tl.custom.message.Message` (or messages) containing the sent file, or messages if a list of them was passed.
[ "Sends", "a", "file", "to", "the", "specified", "entity", "." ]
python
train
WebarchivCZ/WA-KAT
src/wa_kat/data_model.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/data_model.py#L31-L38
def _compose_func(func, args_func=lambda req_info: [req_info.index]): """ Compose function used to compose arguments to function. Arguments for the functions are composed from the :class:`.RequestInfo` object from the ZODB. """ return FuncInfo(func=func, args_func=args_func)
[ "def", "_compose_func", "(", "func", ",", "args_func", "=", "lambda", "req_info", ":", "[", "req_info", ".", "index", "]", ")", ":", "return", "FuncInfo", "(", "func", "=", "func", ",", "args_func", "=", "args_func", ")" ]
Compose function used to compose arguments to function. Arguments for the functions are composed from the :class:`.RequestInfo` object from the ZODB.
[ "Compose", "function", "used", "to", "compose", "arguments", "to", "function", "." ]
python
train
pantsbuild/pants
contrib/python/src/python/pants/contrib/python/checks/checker/checker.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/python/src/python/pants/contrib/python/checks/checker/checker.py#L125-L145
def plugins(): """Returns a tuple of the plugin classes registered with the python style checker. :rtype: tuple of :class:`pants.contrib.python.checks.checker.common.CheckstylePlugin` subtypes """ return ( ClassFactoring, ConstantLogic, ExceptStatements, FutureCompatibility, ImportOrder, Indentation, MissingContextManager, NewStyleClasses, Newlines, PrintStatements, TrailingWhitespace, PEP8VariableNames, PyflakesChecker, PyCodeStyleChecker, )
[ "def", "plugins", "(", ")", ":", "return", "(", "ClassFactoring", ",", "ConstantLogic", ",", "ExceptStatements", ",", "FutureCompatibility", ",", "ImportOrder", ",", "Indentation", ",", "MissingContextManager", ",", "NewStyleClasses", ",", "Newlines", ",", "PrintSta...
Returns a tuple of the plugin classes registered with the python style checker. :rtype: tuple of :class:`pants.contrib.python.checks.checker.common.CheckstylePlugin` subtypes
[ "Returns", "a", "tuple", "of", "the", "plugin", "classes", "registered", "with", "the", "python", "style", "checker", "." ]
python
train
materialsvirtuallab/monty
monty/os/path.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/os/path.py#L15-L41
def which(cmd): """ Returns full path to a executable. Args: cmd (str): Executable command to search for. Returns: (str) Full path to command. None if it is not found. Example:: full_path_to_python = which("python") """ def is_exe(fp): return os.path.isfile(fp) and os.access(fp, os.X_OK) fpath, fname = os.path.split(cmd) if fpath: if is_exe(cmd): return cmd else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, cmd) if is_exe(exe_file): return exe_file return None
[ "def", "which", "(", "cmd", ")", ":", "def", "is_exe", "(", "fp", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "fp", ")", "and", "os", ".", "access", "(", "fp", ",", "os", ".", "X_OK", ")", "fpath", ",", "fname", "=", "os", ".",...
Returns full path to a executable. Args: cmd (str): Executable command to search for. Returns: (str) Full path to command. None if it is not found. Example:: full_path_to_python = which("python")
[ "Returns", "full", "path", "to", "a", "executable", "." ]
python
train
pgjones/quart
quart/routing.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/routing.py#L335-L360
def match(self, path: str) -> Tuple[Optional[Dict[str, Any]], bool]: """Check if the path matches this Rule. If it does it returns a dict of matched and converted values, otherwise None is returned. """ match = self._pattern.match(path) if match is not None: # If the route is a branch (not leaf) and the path is # missing a trailing slash then it needs one to be # considered a match in the strict slashes mode. needs_slash = ( self.strict_slashes and not self.is_leaf and match.groupdict()['__slash__'] != '/' ) try: converted_varaibles = { name: self._converters[name].to_python(value) for name, value in match.groupdict().items() if name != '__slash__' } except ValidationError: # Doesn't meet conversion rules, no match return None, False else: return {**self.defaults, **converted_varaibles}, needs_slash else: return None, False
[ "def", "match", "(", "self", ",", "path", ":", "str", ")", "->", "Tuple", "[", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ",", "bool", "]", ":", "match", "=", "self", ".", "_pattern", ".", "match", "(", "path", ")", "if", "match...
Check if the path matches this Rule. If it does it returns a dict of matched and converted values, otherwise None is returned.
[ "Check", "if", "the", "path", "matches", "this", "Rule", "." ]
python
train
TeamHG-Memex/eli5
eli5/_feature_names.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/_feature_names.py#L161-L179
def add_feature(self, feature): # type: (Any) -> int """ Add a new feature name, return it's index. """ # A copy of self.feature_names is always made, because it might be # "owned" by someone else. # It's possible to make the copy only at the first call to # self.add_feature to improve performance. idx = self.n_features if isinstance(self.feature_names, (list, np.ndarray)): self.feature_names = list(self.feature_names) self.feature_names.append(feature) elif isinstance(self.feature_names, dict): self.feature_names = dict(self.feature_names) self.feature_names[idx] = feature elif self.feature_names is None: self.feature_names = {idx: feature} self.n_features += 1 return idx
[ "def", "add_feature", "(", "self", ",", "feature", ")", ":", "# type: (Any) -> int", "# A copy of self.feature_names is always made, because it might be", "# \"owned\" by someone else.", "# It's possible to make the copy only at the first call to", "# self.add_feature to improve performance....
Add a new feature name, return it's index.
[ "Add", "a", "new", "feature", "name", "return", "it", "s", "index", "." ]
python
train
onnx/onnxmltools
onnxmltools/convert/coreml/shape_calculators/neural_network/Pad.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/neural_network/Pad.py#L13-L30
def calculate_padding_output_shapes(operator): ''' Allowed input/output patterns are 1. [N, C, H, W] ---> [N, C, H', W'] ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) output_shape = copy.deepcopy(operator.inputs[0].type.shape) params = operator.raw_operator.padding if len(params.paddingAmounts.borderAmounts) > 0: output_shape[2] += params.paddingAmounts.borderAmounts[0].startEdgeSize output_shape[2] += params.paddingAmounts.borderAmounts[0].endEdgeSize output_shape[3] += params.paddingAmounts.borderAmounts[1].startEdgeSize output_shape[3] += params.paddingAmounts.borderAmounts[1].endEdgeSize operator.outputs[0].type.shape = output_shape
[ "def", "calculate_padding_output_shapes", "(", "operator", ")", ":", "check_input_and_output_numbers", "(", "operator", ",", "input_count_range", "=", "1", ",", "output_count_range", "=", "1", ")", "check_input_and_output_types", "(", "operator", ",", "good_input_types", ...
Allowed input/output patterns are 1. [N, C, H, W] ---> [N, C, H', W']
[ "Allowed", "input", "/", "output", "patterns", "are", "1", ".", "[", "N", "C", "H", "W", "]", "---", ">", "[", "N", "C", "H", "W", "]" ]
python
train
tanghaibao/jcvi
jcvi/assembly/sspace.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/sspace.py#L145-L165
def close(args): """ %prog close scaffolds.fasta PE*.fastq Run GapFiller to fill gaps. """ p = OptionParser(close.__doc__) p.set_home("gapfiller") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) scaffolds = args[0] libtxt = write_libraries(args[1:], aligner="bwa") cmd = "perl " + op.join(opts.gapfiller_home, "GapFiller.pl") cmd += " -l {0} -s {1} -T {2}".format(libtxt, scaffolds, opts.cpus) runsh = "run.sh" write_file(runsh, cmd)
[ "def", "close", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "close", ".", "__doc__", ")", "p", ".", "set_home", "(", "\"gapfiller\"", ")", "p", ".", "set_cpus", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")",...
%prog close scaffolds.fasta PE*.fastq Run GapFiller to fill gaps.
[ "%prog", "close", "scaffolds", ".", "fasta", "PE", "*", ".", "fastq" ]
python
train
gunthercox/ChatterBot
chatterbot/trainers.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L30-L37
def get_preprocessed_statement(self, input_statement): """ Preprocess the input statement. """ for preprocessor in self.chatbot.preprocessors: input_statement = preprocessor(input_statement) return input_statement
[ "def", "get_preprocessed_statement", "(", "self", ",", "input_statement", ")", ":", "for", "preprocessor", "in", "self", ".", "chatbot", ".", "preprocessors", ":", "input_statement", "=", "preprocessor", "(", "input_statement", ")", "return", "input_statement" ]
Preprocess the input statement.
[ "Preprocess", "the", "input", "statement", "." ]
python
train
pandas-dev/pandas
pandas/io/json/json.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L558-L580
def _get_object_parser(self, json): """ Parses a json document into a pandas object. """ typ = self.typ dtype = self.dtype kwargs = { "orient": self.orient, "dtype": self.dtype, "convert_axes": self.convert_axes, "convert_dates": self.convert_dates, "keep_default_dates": self.keep_default_dates, "numpy": self.numpy, "precise_float": self.precise_float, "date_unit": self.date_unit } obj = None if typ == 'frame': obj = FrameParser(json, **kwargs).parse() if typ == 'series' or obj is None: if not isinstance(dtype, bool): kwargs['dtype'] = dtype obj = SeriesParser(json, **kwargs).parse() return obj
[ "def", "_get_object_parser", "(", "self", ",", "json", ")", ":", "typ", "=", "self", ".", "typ", "dtype", "=", "self", ".", "dtype", "kwargs", "=", "{", "\"orient\"", ":", "self", ".", "orient", ",", "\"dtype\"", ":", "self", ".", "dtype", ",", "\"co...
Parses a json document into a pandas object.
[ "Parses", "a", "json", "document", "into", "a", "pandas", "object", "." ]
python
train
peopledoc/django-pimpmytheme
pimpmytheme/management/commands/update_themefolder_from_git.py
https://github.com/peopledoc/django-pimpmytheme/blob/302c65bec52bbd9e0a17de68ad337ef4a6205556/pimpmytheme/management/commands/update_themefolder_from_git.py#L99-L128
def update_git_repository(self, folder, git_repository): """Updates git remote for the managed theme folder if has changed. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder """ # load repo object from path repo = git.Repo(folder) # keep local_head_name for to reset folder remote head later local_head_name = repo.head.ref.name # test if git repository url has changed remote = repo.remote('origin') if remote.url == git_repository: return # remove/add new remote repository origin remote.remove(repo, 'origin') origin = remote.add(repo, 'origin', git_repository) # fetch available branches origin.fetch() # get remote head according previously store local head name remote_head = getattr(origin.refs, local_head_name) # reset repository tracking branch according deduced remote head repo.create_head(local_head_name, remote_head)\ .set_tracking_branch(remote_head)
[ "def", "update_git_repository", "(", "self", ",", "folder", ",", "git_repository", ")", ":", "# load repo object from path", "repo", "=", "git", ".", "Repo", "(", "folder", ")", "# keep local_head_name for to reset folder remote head later", "local_head_name", "=", "repo"...
Updates git remote for the managed theme folder if has changed. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder
[ "Updates", "git", "remote", "for", "the", "managed", "theme", "folder", "if", "has", "changed", "." ]
python
train
dropbox/pyannotate
pyannotate_runtime/collect_types.py
https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_runtime/collect_types.py#L821-L903
def _trace_dispatch(frame, event, arg): # type: (Any, str, Optional[Any]) -> None """ This is the main hook passed to setprofile(). It implement python profiler interface. Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace """ # Bail if we're not tracing. if not running: return # Get counter for this code object. Bail if we don't care about this function. # An explicit None is stored in the table when we no longer care. code = frame.f_code key = id(code) n = sampling_counters.get(key, 0) if n is None: return if event == 'call': # Bump counter and bail depending on sampling sequence. sampling_counters[key] = n + 1 # Each function gets traced at most MAX_SAMPLES_PER_FUNC times per run. # NOTE: There's a race condition if two threads call the same function. # I don't think we should care, so what if it gets probed an extra time. if n not in sampling_sequence: if n > LAST_SAMPLE: sampling_counters[key] = None # We're no longer interested in this function. call_pending.discard(key) # Avoid getting events out of sync return # Mark that we are looking for a return from this code object. call_pending.add(key) elif event == 'return': if key not in call_pending: # No pending call event -- ignore this event. We only collect # return events when we know the corresponding call event. return call_pending.discard(key) # Avoid race conditions else: # Ignore other events, such as c_call and c_return. return # Track calls under current directory only. filename = _filter_filename(code.co_filename) if filename: func_name = get_function_name_from_frame(frame) if not func_name or func_name[0] == '<': # Could be a lambda or a comprehension; we're not interested. sampling_counters[key] = None else: function_key = FunctionKey(filename, code.co_firstlineno, func_name) if event == 'call': # TODO(guido): Make this faster arg_info = inspect.getargvalues(frame) # type: ArgInfo resolved_types = prep_args(arg_info) _task_queue.put(KeyAndTypes(function_key, resolved_types)) elif event == 'return': # This event is also triggered if a function yields or raises an exception. # We can tell the difference by looking at the bytecode. # (We don't get here for C functions so the bytecode always exists.) last_opcode = code.co_code[frame.f_lasti] if last_opcode == RETURN_VALUE_OPCODE: if code.co_flags & CO_GENERATOR: # Return from a generator. t = resolve_type(FakeIterator([])) else: t = resolve_type(arg) elif last_opcode == YIELD_VALUE_OPCODE: # Yield from a generator. # TODO: Unify generators -- currently each YIELD is turned into # a separate call, so a function yielding ints and strs will be # typed as Union[Iterator[int], Iterator[str]] -- this should be # Iterator[Union[int, str]]. t = resolve_type(FakeIterator([arg])) else: # This branch is also taken when returning from a generator. # TODO: returning non-trivial values from generators, per PEP 380; # and async def / await stuff. t = NoReturnType _task_queue.put(KeyAndReturn(function_key, t)) else: sampling_counters[key] = None
[ "def", "_trace_dispatch", "(", "frame", ",", "event", ",", "arg", ")", ":", "# type: (Any, str, Optional[Any]) -> None", "# Bail if we're not tracing.", "if", "not", "running", ":", "return", "# Get counter for this code object. Bail if we don't care about this function.", "# An...
This is the main hook passed to setprofile(). It implement python profiler interface. Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace
[ "This", "is", "the", "main", "hook", "passed", "to", "setprofile", "()", ".", "It", "implement", "python", "profiler", "interface", "." ]
python
train
coghost/izen
izen/amq.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/amq.py#L201-L204
def _on_connect(client, userdata, flags, rc): """默认 topic 连接处理方法""" log.debug('[MQTT] Connected with result code ' + str(rc)) client.subscribe('test_topic', qos=2)
[ "def", "_on_connect", "(", "client", ",", "userdata", ",", "flags", ",", "rc", ")", ":", "log", ".", "debug", "(", "'[MQTT] Connected with result code '", "+", "str", "(", "rc", ")", ")", "client", ".", "subscribe", "(", "'test_topic'", ",", "qos", "=", ...
默认 topic 连接处理方法
[ "默认", "topic", "连接处理方法" ]
python
train
mithro/python-datetime-tz
datetime_tz/update_win32tz_map.py
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/update_win32tz_map.py#L95-L134
def update_stored_win32tz_map(): """Downloads the cldr win32 timezone map and stores it in win32tz_map.py.""" windows_zones_xml = download_cldr_win32tz_map_xml() source_hash = hashlib.md5(windows_zones_xml).hexdigest() if hasattr(windows_zones_xml, "decode"): windows_zones_xml = windows_zones_xml.decode("utf-8") map_zones = create_win32tz_map(windows_zones_xml) map_dir = os.path.dirname(os.path.abspath(__file__)) map_filename = os.path.join(map_dir, "win32tz_map.py") if os.path.exists(map_filename): reload(win32tz_map) current_hash = getattr(win32tz_map, "source_hash", None) if current_hash == source_hash: return False map_file = open(map_filename, "w") comment = "Map between Windows and Olson timezones taken from %s" % ( _CLDR_WINZONES_URL,) comment2 = "Generated automatically from datetime_tz.py" map_file.write("'''%s\n" % comment) map_file.write("%s'''\n" % comment2) map_file.write("source_hash = '%s' # md5 sum of xml source data\n" % ( source_hash)) map_file.write("win32timezones = {\n") for win32_name, territory, olson_name, comment in map_zones: if territory == '001': map_file.write(" %r: %r, # %s\n" % ( str(win32_name), str(olson_name), comment or "")) else: map_file.write(" %r: %r, # %s\n" % ( (str(win32_name), str(territory)), str(olson_name), comment or "")) map_file.write("}\n") map_file.close() return True
[ "def", "update_stored_win32tz_map", "(", ")", ":", "windows_zones_xml", "=", "download_cldr_win32tz_map_xml", "(", ")", "source_hash", "=", "hashlib", ".", "md5", "(", "windows_zones_xml", ")", ".", "hexdigest", "(", ")", "if", "hasattr", "(", "windows_zones_xml", ...
Downloads the cldr win32 timezone map and stores it in win32tz_map.py.
[ "Downloads", "the", "cldr", "win32", "timezone", "map", "and", "stores", "it", "in", "win32tz_map", ".", "py", "." ]
python
train
rainwoodman/kdcount
kdcount/sphere.py
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/sphere.py#L167-L219
def ang2pix(nside, theta, phi): r"""Convert angle :math:`\theta` :math:`\phi` to pixel. This is translated from chealpix.c; but refer to Section 4.1 of http://adsabs.harvard.edu/abs/2005ApJ...622..759G """ nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi) def equatorial(nside, tt, z): t1 = nside * (0.5 + tt) t2 = nside * z * 0.75 jp = (t1 - t2).astype('i8') jm = (t1 + t2).astype('i8') ir = nside + 1 + jp - jm # in {1, 2n + 1} kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1} ip = ip % (4 * nside) return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip def polecaps(nside, tt, z, s): tp = tt - numpy.floor(tt) za = numpy.abs(z) tmp = nside * s / ((1 + za) / 3) ** 0.5 mp = za > 0.99 tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5 jp = (tp * tmp).astype('i8') jm = ((1 - tp) * tmp).astype('i8') ir = jp + jm + 1 ip = (tt * ir).astype('i8') ip = ip % (4 * ir) r1 = 2 * ir * (ir - 1) r2 = 2 * ir * (ir + 1) r = numpy.empty_like(r1) r[z > 0] = r1[z > 0] + ip[z > 0] r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0] return r z = numpy.cos(theta) s = numpy.sin(theta) tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4] result = numpy.zeros(z.shape, dtype='i8') mask = (z < 2. / 3) & (z > -2. / 3) result[mask] = equatorial(nside[mask], tt[mask], z[mask]) result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask]) return result
[ "def", "ang2pix", "(", "nside", ",", "theta", ",", "phi", ")", ":", "nside", ",", "theta", ",", "phi", "=", "numpy", ".", "lib", ".", "stride_tricks", ".", "broadcast_arrays", "(", "nside", ",", "theta", ",", "phi", ")", "def", "equatorial", "(", "ns...
r"""Convert angle :math:`\theta` :math:`\phi` to pixel. This is translated from chealpix.c; but refer to Section 4.1 of http://adsabs.harvard.edu/abs/2005ApJ...622..759G
[ "r", "Convert", "angle", ":", "math", ":", "\\", "theta", ":", "math", ":", "\\", "phi", "to", "pixel", "." ]
python
train
cirruscluster/cirruscluster
cirruscluster/ext/ansible/utils/__init__.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/__init__.py#L580-L596
def make_sudo_cmd(sudo_user, executable, cmd): """ helper function for connection plugins to create sudo commands """ # Rather than detect if sudo wants a password this time, -k makes # sudo always ask for a password if one is required. # Passing a quoted compound command to sudo (or sudo -s) # directly doesn't work, so we shellquote it with pipes.quote() # and pass the quoted string to the user's shell. We loop reading # output until we see the randomly-generated sudo prompt set with # the -p option. randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) prompt = '[sudo via ansible, key=%s] password: ' % randbits sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS, prompt, sudo_user, executable or '$SHELL', pipes.quote(cmd)) return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt)
[ "def", "make_sudo_cmd", "(", "sudo_user", ",", "executable", ",", "cmd", ")", ":", "# Rather than detect if sudo wants a password this time, -k makes", "# sudo always ask for a password if one is required.", "# Passing a quoted compound command to sudo (or sudo -s)", "# directly doesn't wo...
helper function for connection plugins to create sudo commands
[ "helper", "function", "for", "connection", "plugins", "to", "create", "sudo", "commands" ]
python
train
sammchardy/python-binance
binance/depthcache.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/depthcache.py#L183-L197
def _start_socket(self): """Start the depth cache socket :return: """ if self._bm is None: self._bm = BinanceSocketManager(self._client) self._conn_key = self._bm.start_depth_socket(self._symbol, self._depth_event) if not self._bm.is_alive(): self._bm.start() # wait for some socket responses while not len(self._depth_message_buffer): time.sleep(1)
[ "def", "_start_socket", "(", "self", ")", ":", "if", "self", ".", "_bm", "is", "None", ":", "self", ".", "_bm", "=", "BinanceSocketManager", "(", "self", ".", "_client", ")", "self", ".", "_conn_key", "=", "self", ".", "_bm", ".", "start_depth_socket", ...
Start the depth cache socket :return:
[ "Start", "the", "depth", "cache", "socket" ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/v1/routes/users.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/v1/routes/users.py#L97-L107
def auth(context, request): """ Basic Authentication """ if ploneapi.user.is_anonymous(): request.response.setStatus(401) request.response.setHeader('WWW-Authenticate', 'basic realm="JSONAPI AUTH"', 1) logger.info("*** BASIC AUTHENTICATE ***") return {}
[ "def", "auth", "(", "context", ",", "request", ")", ":", "if", "ploneapi", ".", "user", ".", "is_anonymous", "(", ")", ":", "request", ".", "response", ".", "setStatus", "(", "401", ")", "request", ".", "response", ".", "setHeader", "(", "'WWW-Authentica...
Basic Authentication
[ "Basic", "Authentication" ]
python
train
pkkid/python-plexapi
plexapi/utils.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/utils.py#L322-L346
def getMyPlexAccount(opts=None): # pragma: no cover """ Helper function tries to get a MyPlex Account instance by checking the the following locations for a username and password. This is useful to create user-friendly command line tools. 1. command-line options (opts). 2. environment variables and config.ini 3. Prompt on the command line. """ from plexapi import CONFIG from plexapi.myplex import MyPlexAccount # 1. Check command-line options if opts and opts.username and opts.password: print('Authenticating with Plex.tv as %s..' % opts.username) return MyPlexAccount(opts.username, opts.password) # 2. Check Plexconfig (environment variables and config.ini) config_username = CONFIG.get('auth.myplex_username') config_password = CONFIG.get('auth.myplex_password') if config_username and config_password: print('Authenticating with Plex.tv as %s..' % config_username) return MyPlexAccount(config_username, config_password) # 3. Prompt for username and password on the command line username = input('What is your plex.tv username: ') password = getpass('What is your plex.tv password: ') print('Authenticating with Plex.tv as %s..' % username) return MyPlexAccount(username, password)
[ "def", "getMyPlexAccount", "(", "opts", "=", "None", ")", ":", "# pragma: no cover", "from", "plexapi", "import", "CONFIG", "from", "plexapi", ".", "myplex", "import", "MyPlexAccount", "# 1. Check command-line options", "if", "opts", "and", "opts", ".", "username", ...
Helper function tries to get a MyPlex Account instance by checking the the following locations for a username and password. This is useful to create user-friendly command line tools. 1. command-line options (opts). 2. environment variables and config.ini 3. Prompt on the command line.
[ "Helper", "function", "tries", "to", "get", "a", "MyPlex", "Account", "instance", "by", "checking", "the", "the", "following", "locations", "for", "a", "username", "and", "password", ".", "This", "is", "useful", "to", "create", "user", "-", "friendly", "comm...
python
train
pywavefront/PyWavefront
pywavefront/obj.py
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/obj.py#L83-L93
def parse(self): """Trigger cache load or call superclass parse()""" start = time.time() if self.cache: self.load_cache() if not self.cache_loaded: super(ObjParser, self).parse() logger.info("%s: Load time: %s", self.file_name, time.time() - start)
[ "def", "parse", "(", "self", ")", ":", "start", "=", "time", ".", "time", "(", ")", "if", "self", ".", "cache", ":", "self", ".", "load_cache", "(", ")", "if", "not", "self", ".", "cache_loaded", ":", "super", "(", "ObjParser", ",", "self", ")", ...
Trigger cache load or call superclass parse()
[ "Trigger", "cache", "load", "or", "call", "superclass", "parse", "()" ]
python
train
uw-it-aca/uw-restclients-canvas
uw_canvas/roles.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/roles.py#L21-L27
def get_roles_by_account_sis_id(self, account_sis_id, params={}): """ List the roles for an account, for the passed account SIS ID. """ return self.get_roles_in_account(self._sis_id(account_sis_id, sis_field="account"), params)
[ "def", "get_roles_by_account_sis_id", "(", "self", ",", "account_sis_id", ",", "params", "=", "{", "}", ")", ":", "return", "self", ".", "get_roles_in_account", "(", "self", ".", "_sis_id", "(", "account_sis_id", ",", "sis_field", "=", "\"account\"", ")", ",",...
List the roles for an account, for the passed account SIS ID.
[ "List", "the", "roles", "for", "an", "account", "for", "the", "passed", "account", "SIS", "ID", "." ]
python
test
aio-libs/aioredis
aioredis/pool.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/pool.py#L254-L260
async def _wait_execute(self, address, command, args, kw): """Acquire connection and execute command.""" conn = await self.acquire(command, args) try: return (await conn.execute(command, *args, **kw)) finally: self.release(conn)
[ "async", "def", "_wait_execute", "(", "self", ",", "address", ",", "command", ",", "args", ",", "kw", ")", ":", "conn", "=", "await", "self", ".", "acquire", "(", "command", ",", "args", ")", "try", ":", "return", "(", "await", "conn", ".", "execute"...
Acquire connection and execute command.
[ "Acquire", "connection", "and", "execute", "command", "." ]
python
train
yyuu/botornado
boto/rds/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/rds/__init__.py#L33-L54
def regions(): """ Get all available regions for the RDS service. :rtype: list :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` """ return [RDSRegionInfo(name='us-east-1', endpoint='rds.us-east-1.amazonaws.com'), RDSRegionInfo(name='eu-west-1', endpoint='rds.eu-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-1', endpoint='rds.us-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-2', endpoint='rds.us-west-2.amazonaws.com'), RDSRegionInfo(name='sa-east-1', endpoint='rds.sa-east-1.amazonaws.com'), RDSRegionInfo(name='ap-northeast-1', endpoint='rds.ap-northeast-1.amazonaws.com'), RDSRegionInfo(name='ap-southeast-1', endpoint='rds.ap-southeast-1.amazonaws.com') ]
[ "def", "regions", "(", ")", ":", "return", "[", "RDSRegionInfo", "(", "name", "=", "'us-east-1'", ",", "endpoint", "=", "'rds.us-east-1.amazonaws.com'", ")", ",", "RDSRegionInfo", "(", "name", "=", "'eu-west-1'", ",", "endpoint", "=", "'rds.eu-west-1.amazonaws.com...
Get all available regions for the RDS service. :rtype: list :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo`
[ "Get", "all", "available", "regions", "for", "the", "RDS", "service", "." ]
python
train
radical-cybertools/radical.entk
src/radical/entk/pipeline/pipeline.py
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/pipeline/pipeline.py#L330-L346
def _validate_entities(self, stages): """ Purpose: Validate whether the argument 'stages' is of list of Stage objects :argument: list of Stage objects """ if not stages: raise TypeError(expected_type=Stage, actual_type=type(stages)) if not isinstance(stages, list): stages = [stages] for value in stages: if not isinstance(value, Stage): raise TypeError(expected_type=Stage, actual_type=type(value)) return stages
[ "def", "_validate_entities", "(", "self", ",", "stages", ")", ":", "if", "not", "stages", ":", "raise", "TypeError", "(", "expected_type", "=", "Stage", ",", "actual_type", "=", "type", "(", "stages", ")", ")", "if", "not", "isinstance", "(", "stages", "...
Purpose: Validate whether the argument 'stages' is of list of Stage objects :argument: list of Stage objects
[ "Purpose", ":", "Validate", "whether", "the", "argument", "stages", "is", "of", "list", "of", "Stage", "objects" ]
python
train
kyrus/python-junit-xml
junit_xml/__init__.py
https://github.com/kyrus/python-junit-xml/blob/9bb2675bf0058742da04285dcdcf8781eee03db0/junit_xml/__init__.py#L57-L73
def decode(var, encoding): """ If not already unicode, decode it. """ if PY2: if isinstance(var, unicode): ret = var elif isinstance(var, str): if encoding: ret = var.decode(encoding) else: ret = unicode(var) else: ret = unicode(var) else: ret = str(var) return ret
[ "def", "decode", "(", "var", ",", "encoding", ")", ":", "if", "PY2", ":", "if", "isinstance", "(", "var", ",", "unicode", ")", ":", "ret", "=", "var", "elif", "isinstance", "(", "var", ",", "str", ")", ":", "if", "encoding", ":", "ret", "=", "var...
If not already unicode, decode it.
[ "If", "not", "already", "unicode", "decode", "it", "." ]
python
train
lobocv/crashreporter
crashreporter/crashreporter.py
https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L471-L485
def _watcher_thread(self): """ Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports. """ while 1: time.sleep(self.check_interval) if not self._watcher_running: break self.logger.info('CrashReporter: Attempting to send offline reports.') self.submit_offline_reports() remaining_reports = len(self.get_offline_reports()) if remaining_reports == 0: break self._watcher = None self.logger.info('CrashReporter: Watcher stopped.')
[ "def", "_watcher_thread", "(", "self", ")", ":", "while", "1", ":", "time", ".", "sleep", "(", "self", ".", "check_interval", ")", "if", "not", "self", ".", "_watcher_running", ":", "break", "self", ".", "logger", ".", "info", "(", "'CrashReporter: Attempt...
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
[ "Periodically", "attempt", "to", "upload", "the", "crash", "reports", ".", "If", "any", "upload", "method", "is", "successful", "delete", "the", "saved", "reports", "." ]
python
train
lepture/flask-oauthlib
flask_oauthlib/provider/oauth1.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth1.py#L879-L899
def save_verifier(self, token, verifier, request): """Save verifier to database. A verifiersetter is required. It would be better to combine request token and verifier together:: def verifiersetter(token, verifier, request): tok = Grant.query.filter_by(token=token).first() tok.verifier = verifier['oauth_verifier'] tok.user = get_current_user() return tok.save() .. admonition:: Note: A user is required on verifier, remember to attach current user to verifier. """ log.debug('Save verifier %r for %r', verifier, token) self._verifiersetter( token=token, verifier=verifier, request=request )
[ "def", "save_verifier", "(", "self", ",", "token", ",", "verifier", ",", "request", ")", ":", "log", ".", "debug", "(", "'Save verifier %r for %r'", ",", "verifier", ",", "token", ")", "self", ".", "_verifiersetter", "(", "token", "=", "token", ",", "verif...
Save verifier to database. A verifiersetter is required. It would be better to combine request token and verifier together:: def verifiersetter(token, verifier, request): tok = Grant.query.filter_by(token=token).first() tok.verifier = verifier['oauth_verifier'] tok.user = get_current_user() return tok.save() .. admonition:: Note: A user is required on verifier, remember to attach current user to verifier.
[ "Save", "verifier", "to", "database", "." ]
python
test
openstack/python-scciclient
scciclient/irmc/snmp.py
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/snmp.py#L105-L118
def get_server_model(snmp_client): """Get server model of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of server model. """ try: server_model = snmp_client.get(SERVER_MODEL_OID) return six.text_type(server_model) except SNMPFailure as e: raise SNMPServerModelFailure( SNMP_FAILURE_MSG % ("GET SERVER MODEL", e))
[ "def", "get_server_model", "(", "snmp_client", ")", ":", "try", ":", "server_model", "=", "snmp_client", ".", "get", "(", "SERVER_MODEL_OID", ")", "return", "six", ".", "text_type", "(", "server_model", ")", "except", "SNMPFailure", "as", "e", ":", "raise", ...
Get server model of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of server model.
[ "Get", "server", "model", "of", "the", "node", "." ]
python
train
saltstack/salt
salt/modules/zookeeper.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zookeeper.py#L190-L231
def ensure_path(path, acls=None, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): ''' Ensure Znode path exists path Parent path to create acls list of acls dictionaries to be assigned (Default: None) profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.ensure_path /test/name profile=prod ''' if acls is None: acls = [] acls = [make_digest_acl(**acl) for acl in acls] conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) return conn.ensure_path(path, acls)
[ "def", "ensure_path", "(", "path", ",", "acls", "=", "None", ",", "profile", "=", "None", ",", "hosts", "=", "None", ",", "scheme", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "default_acl", "=", "None", ")", ":", ...
Ensure Znode path exists path Parent path to create acls list of acls dictionaries to be assigned (Default: None) profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.ensure_path /test/name profile=prod
[ "Ensure", "Znode", "path", "exists" ]
python
train
michael-lazar/rtv
rtv/content.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/content.py#L479-L507
def get(self, index, n_cols=70): """ Grab the `i`th submission, with the title field formatted to fit inside of a window of width `n` """ if index < -1: raise IndexError elif index == -1: data = self._submission_data data['split_title'] = self.wrap_text(data['title'], width=n_cols-2) data['split_text'] = self.wrap_text(data['text'], width=n_cols-2) data['n_rows'] = len(data['split_title'] + data['split_text']) + 5 data['h_offset'] = 0 else: data = self._comment_data[index] indent_level = min(data['level'], self.max_indent_level) data['h_offset'] = indent_level * self.indent_size if data['type'] == 'Comment': width = min(n_cols - data['h_offset'], self._max_comment_cols) data['split_body'] = self.wrap_text(data['body'], width=width) data['n_rows'] = len(data['split_body']) + 1 else: data['n_rows'] = 1 return data
[ "def", "get", "(", "self", ",", "index", ",", "n_cols", "=", "70", ")", ":", "if", "index", "<", "-", "1", ":", "raise", "IndexError", "elif", "index", "==", "-", "1", ":", "data", "=", "self", ".", "_submission_data", "data", "[", "'split_title'", ...
Grab the `i`th submission, with the title field formatted to fit inside of a window of width `n`
[ "Grab", "the", "i", "th", "submission", "with", "the", "title", "field", "formatted", "to", "fit", "inside", "of", "a", "window", "of", "width", "n" ]
python
train
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L691-L732
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'): """A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method. **mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten. # Parameters _targetGraph_ : `networkx Graph` > the graph to be modified, it has precedence. _addedGraph_ : `networkx Graph` > the graph that is unmodified, it is added and does **not** have precedence. _incrementedNodeVal_ : `optional [str]` > default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. _incrementedEdgeVal_ : `optional [str]` > default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. """ for addedNode, attribs in addedGraph.nodes(data = True): if incrementedNodeVal: try: targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal] except KeyError: targetGraph.add_node(addedNode, **attribs) else: if not targetGraph.has_node(addedNode): targetGraph.add_node(addedNode, **attribs) for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True): if incrementedEdgeVal: try: targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal] except KeyError: targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) else: if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2): targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
[ "def", "mergeGraphs", "(", "targetGraph", ",", "addedGraph", ",", "incrementedNodeVal", "=", "'count'", ",", "incrementedEdgeVal", "=", "'weight'", ")", ":", "for", "addedNode", ",", "attribs", "in", "addedGraph", ".", "nodes", "(", "data", "=", "True", ")", ...
A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method. **mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten. # Parameters _targetGraph_ : `networkx Graph` > the graph to be modified, it has precedence. _addedGraph_ : `networkx Graph` > the graph that is unmodified, it is added and does **not** have precedence. _incrementedNodeVal_ : `optional [str]` > default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. _incrementedEdgeVal_ : `optional [str]` > default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
[ "A", "quick", "way", "of", "merging", "graphs", "this", "is", "meant", "to", "be", "quick", "and", "is", "only", "intended", "for", "graphs", "generated", "by", "metaknowledge", ".", "This", "does", "not", "check", "anything", "and", "as", "such", "may", ...
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/check_manifest.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/check_manifest.py#L478-L490
def read_manifest(): """Read existing configuration from MANIFEST.in. We use that to ignore anything the MANIFEST.in ignores. """ # XXX modifies global state, which is kind of evil if not os.path.isfile('MANIFEST.in'): return with open('MANIFEST.in') as manifest: contents = manifest.read() ignore, ignore_regexps = _get_ignore_from_manifest(contents) IGNORE.extend(ignore) IGNORE_REGEXPS.extend(ignore_regexps)
[ "def", "read_manifest", "(", ")", ":", "# XXX modifies global state, which is kind of evil", "if", "not", "os", ".", "path", ".", "isfile", "(", "'MANIFEST.in'", ")", ":", "return", "with", "open", "(", "'MANIFEST.in'", ")", "as", "manifest", ":", "contents", "=...
Read existing configuration from MANIFEST.in. We use that to ignore anything the MANIFEST.in ignores.
[ "Read", "existing", "configuration", "from", "MANIFEST", ".", "in", "." ]
python
test
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1045-L1052
def get_redditor(self, user_name, *args, **kwargs): """Return a Redditor instance for the user_name specified. The additional parameters are passed directly into the :class:`.Redditor` constructor. """ return objects.Redditor(self, user_name, *args, **kwargs)
[ "def", "get_redditor", "(", "self", ",", "user_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "objects", ".", "Redditor", "(", "self", ",", "user_name", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a Redditor instance for the user_name specified. The additional parameters are passed directly into the :class:`.Redditor` constructor.
[ "Return", "a", "Redditor", "instance", "for", "the", "user_name", "specified", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_cronjobs.py#L259-L263
def DeleteOldCronJobRuns(self, cutoff_timestamp, cursor=None): """Deletes cron job runs that are older then the given timestamp.""" query = "DELETE FROM cron_job_runs WHERE write_time < FROM_UNIXTIME(%s)" cursor.execute(query, [mysql_utils.RDFDatetimeToTimestamp(cutoff_timestamp)])
[ "def", "DeleteOldCronJobRuns", "(", "self", ",", "cutoff_timestamp", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"DELETE FROM cron_job_runs WHERE write_time < FROM_UNIXTIME(%s)\"", "cursor", ".", "execute", "(", "query", ",", "[", "mysql_utils", ".", "RDFDat...
Deletes cron job runs that are older then the given timestamp.
[ "Deletes", "cron", "job", "runs", "that", "are", "older", "then", "the", "given", "timestamp", "." ]
python
train
apache/airflow
airflow/hooks/presto_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L67-L78
def _get_pretty_exception_message(e): """ Parses some DatabaseError to provide a better error message """ if (hasattr(e, 'message') and 'errorName' in e.message and 'message' in e.message): return ('{name}: {message}'.format( name=e.message['errorName'], message=e.message['message'])) else: return str(e)
[ "def", "_get_pretty_exception_message", "(", "e", ")", ":", "if", "(", "hasattr", "(", "e", ",", "'message'", ")", "and", "'errorName'", "in", "e", ".", "message", "and", "'message'", "in", "e", ".", "message", ")", ":", "return", "(", "'{name}: {message}'...
Parses some DatabaseError to provide a better error message
[ "Parses", "some", "DatabaseError", "to", "provide", "a", "better", "error", "message" ]
python
test
pyQode/pyqode.core
pyqode/core/api/code_edit.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/api/code_edit.py#L945-L978
def keyPressEvent(self, event): """ Overrides the keyPressEvent to emit the key_pressed signal. Also takes care of indenting and handling smarter home key. :param event: QKeyEvent """ if self.isReadOnly(): return initial_state = event.isAccepted() event.ignore() self.key_pressed.emit(event) state = event.isAccepted() if not event.isAccepted(): if event.key() == QtCore.Qt.Key_Tab and event.modifiers() == \ QtCore.Qt.NoModifier: self.indent() event.accept() elif event.key() == QtCore.Qt.Key_Backtab and \ event.modifiers() == QtCore.Qt.NoModifier: self.un_indent() event.accept() elif event.key() == QtCore.Qt.Key_Home and \ int(event.modifiers()) & QtCore.Qt.ControlModifier == 0: self._do_home_key( event, int(event.modifiers()) & QtCore.Qt.ShiftModifier) if not event.isAccepted(): event.setAccepted(initial_state) super(CodeEdit, self).keyPressEvent(event) new_state = event.isAccepted() event.setAccepted(state) self.post_key_pressed.emit(event) event.setAccepted(new_state)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "if", "self", ".", "isReadOnly", "(", ")", ":", "return", "initial_state", "=", "event", ".", "isAccepted", "(", ")", "event", ".", "ignore", "(", ")", "self", ".", "key_pressed", ".", "emit"...
Overrides the keyPressEvent to emit the key_pressed signal. Also takes care of indenting and handling smarter home key. :param event: QKeyEvent
[ "Overrides", "the", "keyPressEvent", "to", "emit", "the", "key_pressed", "signal", "." ]
python
train
quodlibet/mutagen
mutagen/_senf/_print.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_print.py#L282-L314
def _encode_codepage(codepage, text): """ Args: codepage (int) text (text) Returns: `bytes` Encode text using the given code page. Will not fail if a char can't be encoded using that codepage. """ assert isinstance(text, text_type) if not text: return b"" size = (len(text.encode("utf-16-le", _surrogatepass)) // ctypes.sizeof(winapi.WCHAR)) # get the required buffer size length = winapi.WideCharToMultiByte( codepage, 0, text, size, None, 0, None, None) if length == 0: raise ctypes.WinError() # decode to the buffer buf = ctypes.create_string_buffer(length) length = winapi.WideCharToMultiByte( codepage, 0, text, size, buf, length, None, None) if length == 0: raise ctypes.WinError() return buf[:length]
[ "def", "_encode_codepage", "(", "codepage", ",", "text", ")", ":", "assert", "isinstance", "(", "text", ",", "text_type", ")", "if", "not", "text", ":", "return", "b\"\"", "size", "=", "(", "len", "(", "text", ".", "encode", "(", "\"utf-16-le\"", ",", ...
Args: codepage (int) text (text) Returns: `bytes` Encode text using the given code page. Will not fail if a char can't be encoded using that codepage.
[ "Args", ":", "codepage", "(", "int", ")", "text", "(", "text", ")", "Returns", ":", "bytes" ]
python
train
PMEAL/porespy
porespy/filters/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/filters/__funcs__.py#L838-L862
def region_size(im): r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. """ if im.dtype == bool: im = spim.label(im)[0] counts = sp.bincount(im.flatten()) counts[0] = 0 chords = counts[im] return chords
[ "def", "region_size", "(", "im", ")", ":", "if", "im", ".", "dtype", "==", "bool", ":", "im", "=", "spim", ".", "label", "(", "im", ")", "[", "0", "]", "counts", "=", "sp", ".", "bincount", "(", "im", ".", "flatten", "(", ")", ")", "counts", ...
r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``.
[ "r", "Replace", "each", "voxel", "with", "size", "of", "region", "to", "which", "it", "belongs" ]
python
train
droope/droopescan
dscan/plugins/silverstripe.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/plugins/silverstripe.py#L119-L161
def _convert_to_folder(self, packages): """ Silverstripe's page contains a list of composer packages. This function converts those to folder names. These may be different due to installer-name. Implemented exponential backoff in order to prevent packager from being overly sensitive about the number of requests I was making. @see: https://github.com/composer/installers#custom-install-names @see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7 """ url = 'http://packagist.org/p/%s.json' with ThreadPoolExecutor(max_workers=12) as executor: futures = [] for package in packages: future = executor.submit(self._get, url, package) futures.append({ 'future': future, 'package': package }) folders = [] for i, future in enumerate(futures, start=1): r = future['future'].result() package = future['package'] if not 'installer-name' in r.text: folder_name = package.split('/')[1] else: splat = list(filter(None, re.split(r'[^a-zA-Z0-9-_.,]', r.text))) folder_name = splat[splat.index('installer-name') + 1] if not folder_name in folders: folders.append(folder_name) else: print("Folder %s is duplicated (current %s, previous %s)" % (folder_name, package, folders.index(folder_name))) if i % 25 == 0: print("Done %s." % i) return folders
[ "def", "_convert_to_folder", "(", "self", ",", "packages", ")", ":", "url", "=", "'http://packagist.org/p/%s.json'", "with", "ThreadPoolExecutor", "(", "max_workers", "=", "12", ")", "as", "executor", ":", "futures", "=", "[", "]", "for", "package", "in", "pac...
Silverstripe's page contains a list of composer packages. This function converts those to folder names. These may be different due to installer-name. Implemented exponential backoff in order to prevent packager from being overly sensitive about the number of requests I was making. @see: https://github.com/composer/installers#custom-install-names @see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7
[ "Silverstripe", "s", "page", "contains", "a", "list", "of", "composer", "packages", ".", "This", "function", "converts", "those", "to", "folder", "names", ".", "These", "may", "be", "different", "due", "to", "installer", "-", "name", "." ]
python
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L331-L347
def json_facet(self, field=None): ''' EXPERIMENTAL Tried to kick back the json.fact output. ''' facets = self.data['facets'] if field is None: temp_fields = [x for x in facets.keys() if x != 'count'] if len(temp_fields) != 1: raise ValueError("field argument not specified and it looks like there is more than one field in facets. Specify the field to get json.facet from. ") field = temp_fields[0] if field not in self.data['facets']: raise ValueError("Facet Field {} Not found in response, available fields are {}".format( field, self.data['facets'].keys() )) return self.data['facets'][field]
[ "def", "json_facet", "(", "self", ",", "field", "=", "None", ")", ":", "facets", "=", "self", ".", "data", "[", "'facets'", "]", "if", "field", "is", "None", ":", "temp_fields", "=", "[", "x", "for", "x", "in", "facets", ".", "keys", "(", ")", "i...
EXPERIMENTAL Tried to kick back the json.fact output.
[ "EXPERIMENTAL" ]
python
train
widdowquinn/pyani
pyani/pyani_config.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_config.py#L107-L130
def params_mpl(df): """Returns dict of matplotlib parameters, dependent on dataframe.""" return {'ANIb_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIb_alignment_coverage': ('BuRd', 0, 1), 'ANIb_hadamard': ('hadamard_BuRd', 0, 1), 'ANIb_similarity_errors': ('afmhot', df.values.min(), df.values.max()), 'ANIm_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIm_alignment_coverage': ('BuRd', 0, 1), 'ANIm_hadamard': ('hadamard_BuRd', 0, 1), 'ANIm_similarity_errors': ('afmhot', df.values.min(), df.values.max()), 'TETRA_correlations': ('spbnd_BuRd', 0, 1), 'ANIblastall_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIblastall_alignment_coverage': ('BuRd', 0, 1), 'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1), 'ANIblastall_similarity_errors': ('afmhot', df.values.min(), df.values.max())}
[ "def", "params_mpl", "(", "df", ")", ":", "return", "{", "'ANIb_alignment_lengths'", ":", "(", "'afmhot'", ",", "df", ".", "values", ".", "min", "(", ")", ",", "df", ".", "values", ".", "max", "(", ")", ")", ",", "'ANIb_percentage_identity'", ":", "(",...
Returns dict of matplotlib parameters, dependent on dataframe.
[ "Returns", "dict", "of", "matplotlib", "parameters", "dependent", "on", "dataframe", "." ]
python
train
wummel/dosage
dosagelib/plugins/p.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/plugins/p.py#L93-L99
def starter(cls): """Get bounced start URL.""" data = cls.getPage(cls.url) url1 = cls.fetchUrl(cls.url, data, cls.prevSearch) data = cls.getPage(url1) url2 = cls.fetchUrl(url1, data, cls.nextSearch) return cls.prevUrlModifier(url2)
[ "def", "starter", "(", "cls", ")", ":", "data", "=", "cls", ".", "getPage", "(", "cls", ".", "url", ")", "url1", "=", "cls", ".", "fetchUrl", "(", "cls", ".", "url", ",", "data", ",", "cls", ".", "prevSearch", ")", "data", "=", "cls", ".", "get...
Get bounced start URL.
[ "Get", "bounced", "start", "URL", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L532-L537
def atlasdb_format_query( query, values ): """ Turn a query into a string for printing. Useful for debugging. """ return "".join( ["%s %s" % (frag, "'%s'" % val if type(val) in [str, unicode] else val) for (frag, val) in zip(query.split("?"), values + ("",))] )
[ "def", "atlasdb_format_query", "(", "query", ",", "values", ")", ":", "return", "\"\"", ".", "join", "(", "[", "\"%s %s\"", "%", "(", "frag", ",", "\"'%s'\"", "%", "val", "if", "type", "(", "val", ")", "in", "[", "str", ",", "unicode", "]", "else", ...
Turn a query into a string for printing. Useful for debugging.
[ "Turn", "a", "query", "into", "a", "string", "for", "printing", ".", "Useful", "for", "debugging", "." ]
python
train
okunishinishi/python-stringcase
stringcase.py
https://github.com/okunishinishi/python-stringcase/blob/700ad111be16b384aadaddcf8199f9390575c7b6/stringcase.py#L86-L100
def pathcase(string): """Convert string into path case. Join punctuation with slash. Args: string: String to convert. Returns: string: Path cased string. """ string = snakecase(string) if not string: return string return re.sub(r"_", "/", string)
[ "def", "pathcase", "(", "string", ")", ":", "string", "=", "snakecase", "(", "string", ")", "if", "not", "string", ":", "return", "string", "return", "re", ".", "sub", "(", "r\"_\"", ",", "\"/\"", ",", "string", ")" ]
Convert string into path case. Join punctuation with slash. Args: string: String to convert. Returns: string: Path cased string.
[ "Convert", "string", "into", "path", "case", ".", "Join", "punctuation", "with", "slash", "." ]
python
valid
harlowja/notifier
notifier/_notifier.py
https://github.com/harlowja/notifier/blob/35bf58e6350b1d3a3e8c4224e9d01178df70d753/notifier/_notifier.py#L168-L191
def is_equivalent(self, callback, details_filter=None): """Check if the callback provided is the same as the internal one. :param callback: callback used for comparison :param details_filter: callback used for comparison :returns: false if not the same callback, otherwise true :rtype: boolean """ cb = self.callback if cb is None and callback is not None: return False if cb is not None and callback is None: return False if cb is not None and callback is not None \ and not reflection.is_same_callback(cb, callback): return False if details_filter is not None: if self._details_filter is None: return False else: return reflection.is_same_callback(self._details_filter, details_filter) else: return self._details_filter is None
[ "def", "is_equivalent", "(", "self", ",", "callback", ",", "details_filter", "=", "None", ")", ":", "cb", "=", "self", ".", "callback", "if", "cb", "is", "None", "and", "callback", "is", "not", "None", ":", "return", "False", "if", "cb", "is", "not", ...
Check if the callback provided is the same as the internal one. :param callback: callback used for comparison :param details_filter: callback used for comparison :returns: false if not the same callback, otherwise true :rtype: boolean
[ "Check", "if", "the", "callback", "provided", "is", "the", "same", "as", "the", "internal", "one", "." ]
python
train
PetrochukM/PyTorch-NLP
torchnlp/utils.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/utils.py#L105-L130
def resplit_datasets(dataset, other_dataset, random_seed=None, split=None): """Deterministic shuffle and split algorithm. Given the same two datasets and the same ``random_seed``, the split happens the same exact way every call. Args: dataset (lib.datasets.Dataset): First dataset. other_dataset (lib.datasets.Dataset): Another dataset. random_seed (int, optional): Seed to control the shuffle of both datasets. split (float, optional): If defined it is the percentage of rows that first dataset gets after split otherwise the original proportions are kept. Returns: :class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets. """ # Prevent circular dependency from torchnlp.datasets import Dataset concat = dataset.rows + other_dataset.rows shuffle(concat, random_seed=random_seed) if split is None: return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):]) else: split = max(min(round(len(concat) * split), len(concat)), 0) return Dataset(concat[:split]), Dataset(concat[split:])
[ "def", "resplit_datasets", "(", "dataset", ",", "other_dataset", ",", "random_seed", "=", "None", ",", "split", "=", "None", ")", ":", "# Prevent circular dependency", "from", "torchnlp", ".", "datasets", "import", "Dataset", "concat", "=", "dataset", ".", "rows...
Deterministic shuffle and split algorithm. Given the same two datasets and the same ``random_seed``, the split happens the same exact way every call. Args: dataset (lib.datasets.Dataset): First dataset. other_dataset (lib.datasets.Dataset): Another dataset. random_seed (int, optional): Seed to control the shuffle of both datasets. split (float, optional): If defined it is the percentage of rows that first dataset gets after split otherwise the original proportions are kept. Returns: :class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets.
[ "Deterministic", "shuffle", "and", "split", "algorithm", "." ]
python
train
opennode/waldur-core
waldur_core/quotas/views.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/quotas/views.py#L35-L71
def retrieve(self, request, *args, **kwargs): """ To set quota limit issue a **PUT** request against */api/quotas/<quota uuid>** with limit values. Please note that if a quota is a cache of a backend quota (e.g. 'storage' size of an OpenStack tenant), it will be impossible to modify it through */api/quotas/<quota uuid>** endpoint. Example of changing quota limit: .. code-block:: http POST /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "limit": 2000.0 } Example of changing quota threshold: .. code-block:: http PUT /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "threshold": 100.0 } """ return super(QuotaViewSet, self).retrieve(request, *args, **kwargs)
[ "def", "retrieve", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "QuotaViewSet", ",", "self", ")", ".", "retrieve", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
To set quota limit issue a **PUT** request against */api/quotas/<quota uuid>** with limit values. Please note that if a quota is a cache of a backend quota (e.g. 'storage' size of an OpenStack tenant), it will be impossible to modify it through */api/quotas/<quota uuid>** endpoint. Example of changing quota limit: .. code-block:: http POST /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "limit": 2000.0 } Example of changing quota threshold: .. code-block:: http PUT /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "threshold": 100.0 }
[ "To", "set", "quota", "limit", "issue", "a", "**", "PUT", "**", "request", "against", "*", "/", "api", "/", "quotas", "/", "<quota", "uuid", ">", "**", "with", "limit", "values", "." ]
python
train
spdx/tools-python
spdx/parsers/rdf.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/rdf.py#L255-L277
def _handle_license_list(self, lics_set, cls=None): """ Return a license representing a `cls` object (LicenseConjunction or LicenseDisjunction) from a list of license resources or None. """ licenses = [] for _, _, lics_member in self.graph.triples( (lics_set, self.spdx_namespace['member'], None)): try: if (lics_member, RDF.type, self.spdx_namespace['ExtractedLicensingInfo']) in self.graph: lics = self.handle_extracted_license(lics_member) if lics is not None: licenses.append(lics) else: licenses.append(self.handle_lics(lics_member)) except CardinalityError: self.value_error('LICS_LIST_MEMBER', lics_member) break if len(licenses) > 1: return reduce(lambda a, b: cls(a, b), licenses) else: self.value_error('PKG_CONC_LIST', '') return
[ "def", "_handle_license_list", "(", "self", ",", "lics_set", ",", "cls", "=", "None", ")", ":", "licenses", "=", "[", "]", "for", "_", ",", "_", ",", "lics_member", "in", "self", ".", "graph", ".", "triples", "(", "(", "lics_set", ",", "self", ".", ...
Return a license representing a `cls` object (LicenseConjunction or LicenseDisjunction) from a list of license resources or None.
[ "Return", "a", "license", "representing", "a", "cls", "object", "(", "LicenseConjunction", "or", "LicenseDisjunction", ")", "from", "a", "list", "of", "license", "resources", "or", "None", "." ]
python
valid
camsci/meteor-pi
src/pythonModules/meteorpi_server/meteorpi_server/importer_api.py
https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_server/meteorpi_server/importer_api.py#L204-L223
def process_request(): """ Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is 'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the cache this method will return None and clear the cache (this should only happen under conditions where we've failed to correctly handle caching, such as a server restart or under extreme load, but will result in the server having to re-request a previous value from the exporting party). :return: A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected cache miss, and 'entity-id' which will be the UUID of the entity requested. The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss. """ g.request_dict = safe_load(request.get_data()) entity_type = g.request_dict['type'] entity_id = g.request_dict[entity_type]['id'] ImportRequest.logger.debug("Received request, type={0}, id={1}".format(entity_type, entity_id)) entity = ImportRequest._get_entity(entity_id) ImportRequest.logger.debug("Entity with id={0} was {1}".format(entity_id, entity)) return ImportRequest(entity=entity, entity_id=entity_id)
[ "def", "process_request", "(", ")", ":", "g", ".", "request_dict", "=", "safe_load", "(", "request", ".", "get_data", "(", ")", ")", "entity_type", "=", "g", ".", "request_dict", "[", "'type'", "]", "entity_id", "=", "g", ".", "request_dict", "[", "entit...
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is 'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the cache this method will return None and clear the cache (this should only happen under conditions where we've failed to correctly handle caching, such as a server restart or under extreme load, but will result in the server having to re-request a previous value from the exporting party). :return: A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected cache miss, and 'entity-id' which will be the UUID of the entity requested. The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
[ "Retrieve", "a", "CameraStatus", "Event", "or", "FileRecord", "from", "the", "request", "based", "on", "the", "supplied", "type", "and", "ID", ".", "If", "the", "type", "is", "cached_request", "then", "the", "ID", "must", "be", "specified", "in", "cached_req...
python
train
postmanlabs/httpbin
httpbin/filters.py
https://github.com/postmanlabs/httpbin/blob/f8ec666b4d1b654e4ff6aedd356f510dcac09f83/httpbin/filters.py#L96-L115
def brotli(f, *args, **kwargs): """Brotli Flask Response Decorator""" data = f(*args, **kwargs) if isinstance(data, Response): content = data.data else: content = data deflated_data = _brotli.compress(content) if isinstance(data, Response): data.data = deflated_data data.headers['Content-Encoding'] = 'br' data.headers['Content-Length'] = str(len(data.data)) return data return deflated_data
[ "def", "brotli", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "data", ",", "Response", ")", ":", "content", "=", "data", ".", "data", ...
Brotli Flask Response Decorator
[ "Brotli", "Flask", "Response", "Decorator" ]
python
train
rosenbrockc/fortpy
fortpy/isense/context.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/context.py#L126-L132
def short_symbol(self): """Gets the symbol for the current cursor *excluding* the last character under the cursor.""" if self._short_symbol is None: self._short_symbol = self._symbol_extract(cache.RE_CURSOR, False) return self._short_symbol
[ "def", "short_symbol", "(", "self", ")", ":", "if", "self", ".", "_short_symbol", "is", "None", ":", "self", ".", "_short_symbol", "=", "self", ".", "_symbol_extract", "(", "cache", ".", "RE_CURSOR", ",", "False", ")", "return", "self", ".", "_short_symbol...
Gets the symbol for the current cursor *excluding* the last character under the cursor.
[ "Gets", "the", "symbol", "for", "the", "current", "cursor", "*", "excluding", "*", "the", "last", "character", "under", "the", "cursor", "." ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/filters.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/filters.py#L200-L219
def do_default(value, default_value=u'', boolean=False): """If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} """ if (boolean and not value) or isinstance(value, Undefined): return default_value return value
[ "def", "do_default", "(", "value", ",", "default_value", "=", "u''", ",", "boolean", "=", "False", ")", ":", "if", "(", "boolean", "and", "not", "value", ")", "or", "isinstance", "(", "value", ",", "Undefined", ")", ":", "return", "default_value", "retur...
If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }}
[ "If", "the", "value", "is", "undefined", "it", "will", "return", "the", "passed", "default", "value", "otherwise", "the", "value", "of", "the", "variable", ":" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/formatters.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/formatters.py#L274-L298
def for_type_by_name(self, type_module, type_name, func): """Add a format function for a type specified by the full dotted module and name of the type, rather than the type of the object. Parameters ---------- type_module : str The full dotted name of the module the type is defined in, like ``numpy``. type_name : str The name of the type (the class name), like ``dtype`` func : callable The callable that will be called to compute the format data. The call signature of this function is simple, it must take the object to be formatted and return the raw data for the given format. Subclasses may use a different call signature for the `func` argument. """ key = (type_module, type_name) oldfunc = self.deferred_printers.get(key, None) if func is not None: # To support easy restoration of old printers, we need to ignore # Nones. self.deferred_printers[key] = func return oldfunc
[ "def", "for_type_by_name", "(", "self", ",", "type_module", ",", "type_name", ",", "func", ")", ":", "key", "=", "(", "type_module", ",", "type_name", ")", "oldfunc", "=", "self", ".", "deferred_printers", ".", "get", "(", "key", ",", "None", ")", "if", ...
Add a format function for a type specified by the full dotted module and name of the type, rather than the type of the object. Parameters ---------- type_module : str The full dotted name of the module the type is defined in, like ``numpy``. type_name : str The name of the type (the class name), like ``dtype`` func : callable The callable that will be called to compute the format data. The call signature of this function is simple, it must take the object to be formatted and return the raw data for the given format. Subclasses may use a different call signature for the `func` argument.
[ "Add", "a", "format", "function", "for", "a", "type", "specified", "by", "the", "full", "dotted", "module", "and", "name", "of", "the", "type", "rather", "than", "the", "type", "of", "the", "object", "." ]
python
test
tonybaloney/wily
wily/commands/build.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/commands/build.py#L27-L130
def build(config, archiver, operators): """ Build the history given a archiver and collection of operators. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param archiver: The archiver to use :type archiver: :namedtuple:`wily.archivers.Archiver` :param operators: The list of operators to execute :type operators: `list` of :namedtuple:`wily.operators.Operator` """ try: logger.debug(f"Using {archiver.name} archiver module") archiver = archiver.cls(config) revisions = archiver.revisions(config.path, config.max_revisions) except InvalidGitRepositoryError: # TODO: This logic shouldn't really be here (SoC) logger.info(f"Defaulting back to the filesystem archiver, not a valid git repo") archiver = FilesystemArchiver(config) revisions = archiver.revisions(config.path, config.max_revisions) except Exception as e: if hasattr(e, "message"): logger.error(f"Failed to setup archiver: '{e.message}'") else: logger.error(f"Failed to setup archiver: '{type(e)} - {e}'") exit(1) state = State(config, archiver=archiver) # Check for existence of cache, else provision state.ensure_exists() index = state.index[archiver.name] # remove existing revisions from the list revisions = [revision for revision in revisions if revision not in index] logger.info( f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'." ) _op_desc = ",".join([operator.name for operator in operators]) logger.info(f"Running operators - {_op_desc}") bar = Bar("Processing", max=len(revisions) * len(operators)) state.operators = operators try: with multiprocessing.Pool(processes=len(operators)) as pool: for revision in revisions: # Checkout target revision archiver.checkout(revision, config.checkout_options) stats = {"operator_data": {}} # Run each operator as a seperate process data = pool.starmap( run_operator, [(operator, revision, config) for operator in operators], ) # Map the data back into a dictionary for operator_name, result in data: # aggregate values to directories roots = [] # find all unique directories in the results for entry in result.keys(): parent = pathlib.Path(entry).parents[0] if parent not in roots: roots.append(parent) for root in roots: # find all matching entries recursively aggregates = [ path for path in result.keys() if root in pathlib.Path(path).parents ] result[str(root)] = {} # aggregate values for metric in resolve_operator(operator_name).cls.metrics: func = metric.aggregate values = [ result[aggregate][metric.name] for aggregate in aggregates if aggregate in result and metric.name in result[aggregate] ] if len(values) > 0: result[str(root)][metric.name] = func(values) stats["operator_data"][operator_name] = result bar.next() ir = index.add(revision, operators=operators) ir.store(config, archiver, stats) index.save() bar.finish() except Exception as e: logger.error(f"Failed to build cache: '{e}'") raise e finally: # Reset the archive after every run back to the head of the branch archiver.finish()
[ "def", "build", "(", "config", ",", "archiver", ",", "operators", ")", ":", "try", ":", "logger", ".", "debug", "(", "f\"Using {archiver.name} archiver module\"", ")", "archiver", "=", "archiver", ".", "cls", "(", "config", ")", "revisions", "=", "archiver", ...
Build the history given a archiver and collection of operators. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param archiver: The archiver to use :type archiver: :namedtuple:`wily.archivers.Archiver` :param operators: The list of operators to execute :type operators: `list` of :namedtuple:`wily.operators.Operator`
[ "Build", "the", "history", "given", "a", "archiver", "and", "collection", "of", "operators", "." ]
python
train
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L2916-L2942
def dotted(self): """ :return: A unicode string of the object identifier in dotted notation, thus ignoring any mapped value """ if self._dotted is None: output = [] part = 0 for byte in self.contents: if _PY2: byte = ord(byte) part = part * 128 part += byte & 127 # Last byte in subidentifier has the eighth bit set to 0 if byte & 0x80 == 0: if len(output) == 0: output.append(str_cls(part // 40)) output.append(str_cls(part % 40)) else: output.append(str_cls(part)) part = 0 self._dotted = '.'.join(output) return self._dotted
[ "def", "dotted", "(", "self", ")", ":", "if", "self", ".", "_dotted", "is", "None", ":", "output", "=", "[", "]", "part", "=", "0", "for", "byte", "in", "self", ".", "contents", ":", "if", "_PY2", ":", "byte", "=", "ord", "(", "byte", ")", "par...
:return: A unicode string of the object identifier in dotted notation, thus ignoring any mapped value
[ ":", "return", ":", "A", "unicode", "string", "of", "the", "object", "identifier", "in", "dotted", "notation", "thus", "ignoring", "any", "mapped", "value" ]
python
train
rwl/PyCIM
PyCIM/RDFXMLReader.py
https://github.com/rwl/PyCIM/blob/4a12ebb5a7fb03c7790d396910daef9b97c4ef99/PyCIM/RDFXMLReader.py#L226-L244
def xmlns(source): """ Returns a map of prefix to namespace for the given XML file. """ namespaces = {} events=("end", "start-ns", "end-ns") for (event, elem) in iterparse(source, events): if event == "start-ns": prefix, ns = elem namespaces[prefix] = ns elif event == "end": break # Reset stream if hasattr(source, "seek"): source.seek(0) return namespaces
[ "def", "xmlns", "(", "source", ")", ":", "namespaces", "=", "{", "}", "events", "=", "(", "\"end\"", ",", "\"start-ns\"", ",", "\"end-ns\"", ")", "for", "(", "event", ",", "elem", ")", "in", "iterparse", "(", "source", ",", "events", ")", ":", "if", ...
Returns a map of prefix to namespace for the given XML file.
[ "Returns", "a", "map", "of", "prefix", "to", "namespace", "for", "the", "given", "XML", "file", "." ]
python
train
garyp/sifter
sifter/grammar/lexer.py
https://github.com/garyp/sifter/blob/9c472af76853c1196387141e017114d282637474/sifter/grammar/lexer.py#L55-L66
def t_QUOTED_STRING(t): r'"([^"\\]|\\["\\])*"' # TODO: Add support for: # - An undefined escape sequence (such as "\a" in a context where "a" # has no special meaning) is interpreted as if there were no backslash # (in this case, "\a" is just "a"), though that may be changed by # extensions. # - Non-printing characters such as tabs, CRLF, and control characters # are permitted in quoted strings. Quoted strings MAY span multiple # lines. An unencoded NUL (US-ASCII 0) is not allowed in strings. t.value = t.value.strip('"').replace(r'\"', '"').replace(r'\\', '\\') return t
[ "def", "t_QUOTED_STRING", "(", "t", ")", ":", "# TODO: Add support for:", "# - An undefined escape sequence (such as \"\\a\" in a context where \"a\"", "# has no special meaning) is interpreted as if there were no backslash", "# (in this case, \"\\a\" is just \"a\"), though that may be changed by"...
r'"([^"\\]|\\["\\])*"
[ "r", "(", "[", "^", "\\\\", "]", "|", "\\\\", "[", "\\\\", "]", ")", "*" ]
python
train