repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
jaredLunde/vital-tools
vital/security/__init__.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/security/__init__.py#L405-L430
def randkey(bits, keyspace=string.ascii_letters + string.digits + '#/.', rng=None): """ Returns a cryptographically secure random key of desired @bits of entropy within @keyspace using :class:random.SystemRandom @bits: (#int) minimum bits of entropy @keyspace: (#str) or iterable allowed output chars @rng: the random number generator to use. Defaults to :class:random.SystemRandom. Must have a |choice| method -> (#str) random key .. from vital.security import randkey randkey(24) # -> '9qaX' randkey(48) # -> 'iPJ5YWs9' randkey(64) # - > 'C..VJ.KLdxg' randkey(64, keyspace="abc", rng=random) # -> 'aabcccbabcaacaccccabcaabbabcacabacbbbaaab' .. """ return "".join(char for char in iter_random_chars(bits, keyspace, rng))
[ "def", "randkey", "(", "bits", ",", "keyspace", "=", "string", ".", "ascii_letters", "+", "string", ".", "digits", "+", "'#/.'", ",", "rng", "=", "None", ")", ":", "return", "\"\"", ".", "join", "(", "char", "for", "char", "in", "iter_random_chars", "(...
Returns a cryptographically secure random key of desired @bits of entropy within @keyspace using :class:random.SystemRandom @bits: (#int) minimum bits of entropy @keyspace: (#str) or iterable allowed output chars @rng: the random number generator to use. Defaults to :class:random.SystemRandom. Must have a |choice| method -> (#str) random key .. from vital.security import randkey randkey(24) # -> '9qaX' randkey(48) # -> 'iPJ5YWs9' randkey(64) # - > 'C..VJ.KLdxg' randkey(64, keyspace="abc", rng=random) # -> 'aabcccbabcaacaccccabcaabbabcacabacbbbaaab' ..
[ "Returns", "a", "cryptographically", "secure", "random", "key", "of", "desired", "@bits", "of", "entropy", "within", "@keyspace", "using", ":", "class", ":", "random", ".", "SystemRandom" ]
python
train
34.615385
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L1504-L1512
def idx_infeasible(self, solution_genotype): """return indices of "infeasible" variables, that is, variables that do not directly map into the feasible domain such that ``tf.inverse(tf(x)) == x``. """ res = [i for i, x in enumerate(solution_genotype) if not self.is_feasible_i(x, i)] return res
[ "def", "idx_infeasible", "(", "self", ",", "solution_genotype", ")", ":", "res", "=", "[", "i", "for", "i", ",", "x", "in", "enumerate", "(", "solution_genotype", ")", "if", "not", "self", ".", "is_feasible_i", "(", "x", ",", "i", ")", "]", "return", ...
return indices of "infeasible" variables, that is, variables that do not directly map into the feasible domain such that ``tf.inverse(tf(x)) == x``.
[ "return", "indices", "of", "infeasible", "variables", "that", "is", "variables", "that", "do", "not", "directly", "map", "into", "the", "feasible", "domain", "such", "that", "tf", ".", "inverse", "(", "tf", "(", "x", "))", "==", "x", "." ]
python
train
40.777778
rflamary/POT
ot/smooth.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/smooth.py#L510-L600
def smooth_ot_semi_dual(a, b, M, reg, reg_type='l2', method="L-BFGS-B", stopThr=1e-9, numItermax=500, verbose=False, log=False): r""" Solve the regularized OT problem in the semi-dual and return the OT matrix The function solves the smooth relaxed dual formulation (10) in [17]_ : .. math:: \max_{\alpha}\quad a^T\alpha-OT_\Omega^*(\alpha,b) where : .. math:: OT_\Omega^*(\alpha,b)=\sum_j b_j - :math:`\mathbf{m}_j` is the jth column of the cost matrix - :math:`OT_\Omega^*(\alpha,b)` is defined in Eq. (9) in [17] - a and b are source and target weights (sum to 1) The OT matrix can is reconstructed using [17]_ Proposition 2. The optimization algorithm is using gradient decent (L-BFGS by default). Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) or np.ndarray (nt,nbb) samples in the target domain, compute sinkhorn with multiple targets and fixed M if b is a matrix (return OT loss + dual variables in log) M : np.ndarray (ns,nt) loss matrix reg : float Regularization term >0 reg_type : str Regularization type, can be the following (default ='l2'): - 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn [2]_) - 'l2' : Squared Euclidean regularization method : str Solver to use for scipy.optimize.minimize numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshol on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). See Also -------- ot.lp.emd : Unregularized OT ot.sinhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ if reg_type.lower() in ['l2', 'squaredl2']: regul = SquaredL2(gamma=reg) elif reg_type.lower() in ['entropic', 'negentropy', 'kl']: regul = NegEntropy(gamma=reg) else: raise NotImplementedError('Unknown regularization') # solve dual alpha, res = solve_semi_dual(a, b, M, regul, max_iter=numItermax, tol=stopThr, verbose=verbose) # reconstruct transport matrix G = get_plan_from_semi_dual(alpha, b, M, regul) if log: log = {'alpha': alpha, 'res': res} return G, log else: return G
[ "def", "smooth_ot_semi_dual", "(", "a", ",", "b", ",", "M", ",", "reg", ",", "reg_type", "=", "'l2'", ",", "method", "=", "\"L-BFGS-B\"", ",", "stopThr", "=", "1e-9", ",", "numItermax", "=", "500", ",", "verbose", "=", "False", ",", "log", "=", "Fals...
r""" Solve the regularized OT problem in the semi-dual and return the OT matrix The function solves the smooth relaxed dual formulation (10) in [17]_ : .. math:: \max_{\alpha}\quad a^T\alpha-OT_\Omega^*(\alpha,b) where : .. math:: OT_\Omega^*(\alpha,b)=\sum_j b_j - :math:`\mathbf{m}_j` is the jth column of the cost matrix - :math:`OT_\Omega^*(\alpha,b)` is defined in Eq. (9) in [17] - a and b are source and target weights (sum to 1) The OT matrix can is reconstructed using [17]_ Proposition 2. The optimization algorithm is using gradient decent (L-BFGS by default). Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) or np.ndarray (nt,nbb) samples in the target domain, compute sinkhorn with multiple targets and fixed M if b is a matrix (return OT loss + dual variables in log) M : np.ndarray (ns,nt) loss matrix reg : float Regularization term >0 reg_type : str Regularization type, can be the following (default ='l2'): - 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn [2]_) - 'l2' : Squared Euclidean regularization method : str Solver to use for scipy.optimize.minimize numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshol on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). See Also -------- ot.lp.emd : Unregularized OT ot.sinhorn : Entropic regularized OT ot.optim.cg : General regularized OT
[ "r", "Solve", "the", "regularized", "OT", "problem", "in", "the", "semi", "-", "dual", "and", "return", "the", "OT", "matrix" ]
python
train
32.604396
zencoder/zencoder-py
zencoder/core.py
https://github.com/zencoder/zencoder-py/blob/9d762e33e2bb2edadb0e5da0bb80a61e27636426/zencoder/core.py#L103-L110
def post(self, url, body=None): """ Executes an HTTP POST request for the given URL. """ response = self.http.post(url, headers=self.headers, data=body, **self.requests_params) return self.process(response)
[ "def", "post", "(", "self", ",", "url", ",", "body", "=", "None", ")", ":", "response", "=", "self", ".", "http", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "headers", ",", "data", "=", "body", ",", "*", "*", "self", ".", "reque...
Executes an HTTP POST request for the given URL.
[ "Executes", "an", "HTTP", "POST", "request", "for", "the", "given", "URL", "." ]
python
train
40.75
CZ-NIC/yangson
yangson/datamodel.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/datamodel.py#L169-L177
def schema_digest(self) -> str: """Generate schema digest (to be used primarily by clients). Returns: Condensed information about the schema in JSON format. """ res = self.schema._node_digest() res["config"] = True return json.dumps(res)
[ "def", "schema_digest", "(", "self", ")", "->", "str", ":", "res", "=", "self", ".", "schema", ".", "_node_digest", "(", ")", "res", "[", "\"config\"", "]", "=", "True", "return", "json", ".", "dumps", "(", "res", ")" ]
Generate schema digest (to be used primarily by clients). Returns: Condensed information about the schema in JSON format.
[ "Generate", "schema", "digest", "(", "to", "be", "used", "primarily", "by", "clients", ")", "." ]
python
train
32.222222
tensorpack/tensorpack
examples/basics/export-model.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/basics/export-model.py#L106-L113
def export_serving(model_path): """Export trained model to use it in TensorFlow Serving or cloudML. """ pred_config = PredictConfig( session_init=get_model_loader(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) ModelExporter(pred_config).export_serving('/tmp/exported')
[ "def", "export_serving", "(", "model_path", ")", ":", "pred_config", "=", "PredictConfig", "(", "session_init", "=", "get_model_loader", "(", "model_path", ")", ",", "model", "=", "InferenceOnlyModel", "(", ")", ",", "input_names", "=", "[", "'input_img_bytes'", ...
Export trained model to use it in TensorFlow Serving or cloudML.
[ "Export", "trained", "model", "to", "use", "it", "in", "TensorFlow", "Serving", "or", "cloudML", "." ]
python
train
46.375
singularityhub/sregistry-cli
sregistry/main/google_build/push.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/google_build/push.py#L26-L58
def push(self, path, name, tag=None): '''push an image to Google Cloud Storage, meaning uploading it path: should correspond to an absolte image path (or derive it) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker ''' path = os.path.abspath(path) bot.debug("PUSH %s" % path) if not os.path.exists(path): bot.error('%s does not exist.' %path) sys.exit(1) # This returns a data structure with collection, container, based on uri names = parse_image_name(remove_uri(name),tag=tag) if names['version'] is None: version = get_image_hash(path) names = parse_image_name(remove_uri(name), tag=tag, version=version) # Update metadata with names metadata = self.get_metadata(path, names=names) if "data" in metadata: metadata = metadata['data'] metadata.update(names) manifest = self._upload(source=path, destination=names['storage'], metadata=metadata) print(manifest['mediaLink'])
[ "def", "push", "(", "self", ",", "path", ",", "name", ",", "tag", "=", "None", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "bot", ".", "debug", "(", "\"PUSH %s\"", "%", "path", ")", "if", "not", "os", ".", "path"...
push an image to Google Cloud Storage, meaning uploading it path: should correspond to an absolte image path (or derive it) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker
[ "push", "an", "image", "to", "Google", "Cloud", "Storage", "meaning", "uploading", "it", "path", ":", "should", "correspond", "to", "an", "absolte", "image", "path", "(", "or", "derive", "it", ")", "name", ":", "should", "be", "the", "complete", "uri", "...
python
test
33.878788
Parsely/birding
src/birding/config.py
https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/config.py#L231-L261
def import_name(name, default_ns=None): """Import an object based on the dotted string. >>> import_name('textwrap') # doctest: +ELLIPSIS <module 'textwrap' from '...'> >>> import_name('birding.config') # doctest: +ELLIPSIS <module 'birding.config' from '...'> >>> import_name('birding.config.get_config') # doctest: +ELLIPSIS <function get_config at ...> >>> If `ns` is provided, use it as the namespace if `name` does not have a dot. >>> ns = 'birding.config' >>> x = import_name('birding.config.get_config') >>> x # doctest: +ELLIPSIS <function get_config at ...> >>> x == import_name('get_config', default_ns=ns) True >>> x == import_name('birding.config.get_config', default_ns=ns) True >>> """ if '.' not in name: if default_ns is None: return importlib.import_module(name) else: name = default_ns + '.' + name module_name, object_name = name.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, object_name)
[ "def", "import_name", "(", "name", ",", "default_ns", "=", "None", ")", ":", "if", "'.'", "not", "in", "name", ":", "if", "default_ns", "is", "None", ":", "return", "importlib", ".", "import_module", "(", "name", ")", "else", ":", "name", "=", "default...
Import an object based on the dotted string. >>> import_name('textwrap') # doctest: +ELLIPSIS <module 'textwrap' from '...'> >>> import_name('birding.config') # doctest: +ELLIPSIS <module 'birding.config' from '...'> >>> import_name('birding.config.get_config') # doctest: +ELLIPSIS <function get_config at ...> >>> If `ns` is provided, use it as the namespace if `name` does not have a dot. >>> ns = 'birding.config' >>> x = import_name('birding.config.get_config') >>> x # doctest: +ELLIPSIS <function get_config at ...> >>> x == import_name('get_config', default_ns=ns) True >>> x == import_name('birding.config.get_config', default_ns=ns) True >>>
[ "Import", "an", "object", "based", "on", "the", "dotted", "string", "." ]
python
train
33.83871
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/account_management/account_management.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/account_management/account_management.py#L257-L268
def list_groups(self, **kwargs): """List all groups in organisation. :param int limit: The number of groups to retrieve :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get groups after/starting at given group ID :returns: a list of :py:class:`Group` objects. :rtype: PaginatedResponse """ kwargs = self._verify_sort_options(kwargs) api = self._get_api(iam.DeveloperApi) return PaginatedResponse(api.get_all_groups, lwrap_type=Group, **kwargs)
[ "def", "list_groups", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_verify_sort_options", "(", "kwargs", ")", "api", "=", "self", ".", "_get_api", "(", "iam", ".", "DeveloperApi", ")", "return", "PaginatedResponse", "(", "...
List all groups in organisation. :param int limit: The number of groups to retrieve :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get groups after/starting at given group ID :returns: a list of :py:class:`Group` objects. :rtype: PaginatedResponse
[ "List", "all", "groups", "in", "organisation", "." ]
python
train
46.75
zvolsky/wfpdf
wfpdf.py
https://github.com/zvolsky/wfpdf/blob/d3625a6420ae1fb6722d81cddf0636af496c42bb/wfpdf.py#L116-L127
def header(self, item0, *items): """print string item0 to the current position and next strings to defined positions example: .header("Name", 75, "Quantity", 100, "Unit") """ self.txt(item0) at_x = None for item in items: if at_x is None: at_x = item else: self.txt(item, at_x=at_x) at_x = None
[ "def", "header", "(", "self", ",", "item0", ",", "*", "items", ")", ":", "self", ".", "txt", "(", "item0", ")", "at_x", "=", "None", "for", "item", "in", "items", ":", "if", "at_x", "is", "None", ":", "at_x", "=", "item", "else", ":", "self", "...
print string item0 to the current position and next strings to defined positions example: .header("Name", 75, "Quantity", 100, "Unit")
[ "print", "string", "item0", "to", "the", "current", "position", "and", "next", "strings", "to", "defined", "positions", "example", ":", ".", "header", "(", "Name", "75", "Quantity", "100", "Unit", ")" ]
python
train
33.583333
Clinical-Genomics/scout
scout/adapter/mongo/user.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/user.py#L72-L84
def user(self, email): """Fetch a user from the database. Args: email(str) Returns: user_obj(dict) """ LOG.info("Fetching user %s", email) user_obj = self.user_collection.find_one({'_id': email}) return user_obj
[ "def", "user", "(", "self", ",", "email", ")", ":", "LOG", ".", "info", "(", "\"Fetching user %s\"", ",", "email", ")", "user_obj", "=", "self", ".", "user_collection", ".", "find_one", "(", "{", "'_id'", ":", "email", "}", ")", "return", "user_obj" ]
Fetch a user from the database. Args: email(str) Returns: user_obj(dict)
[ "Fetch", "a", "user", "from", "the", "database", ".", "Args", ":", "email", "(", "str", ")", "Returns", ":", "user_obj", "(", "dict", ")" ]
python
test
24.153846
openego/eDisGo
edisgo/tools/edisgo_run.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/edisgo_run.py#L168-L215
def run_edisgo_twice(run_args): """ Run grid analysis twice on same grid: once w/ and once w/o new generators First run without connection of new generators approves sufficient grid hosting capacity. Otherwise, grid is reinforced. Second run assessment grid extension needs in terms of RES integration Parameters ---------- run_args : list Optional parameters for :func:`run_edisgo_basic`. Returns ------- all_costs_before_geno_import : :pandas:`pandas.Dataframe<dataframe>` Grid extension cost before grid connection of new generators all_grid_issues_before_geno_import : dict Remaining overloading or over-voltage issues in grid all_costs : :pandas:`pandas.Dataframe<dataframe>` Grid extension cost due to grid connection of new generators all_grid_issues : dict Remaining overloading or over-voltage issues in grid """ # base case with no generator import edisgo_grid, \ costs_before_geno_import, \ grid_issues_before_geno_import = run_edisgo_basic(*run_args) if edisgo_grid: # clear the pypsa object and results from edisgo_grid edisgo_grid.network.results = Results(edisgo_grid.network) edisgo_grid.network.pypsa = None # case after generator import # run_args = [ding0_filename] # run_args.extend(run_args_opt) run_args.append(edisgo_grid) _, costs, \ grid_issues = run_edisgo_basic(*run_args) return costs_before_geno_import, grid_issues_before_geno_import, \ costs, grid_issues else: return costs_before_geno_import, grid_issues_before_geno_import, \ costs_before_geno_import, grid_issues_before_geno_import
[ "def", "run_edisgo_twice", "(", "run_args", ")", ":", "# base case with no generator import", "edisgo_grid", ",", "costs_before_geno_import", ",", "grid_issues_before_geno_import", "=", "run_edisgo_basic", "(", "*", "run_args", ")", "if", "edisgo_grid", ":", "# clear the py...
Run grid analysis twice on same grid: once w/ and once w/o new generators First run without connection of new generators approves sufficient grid hosting capacity. Otherwise, grid is reinforced. Second run assessment grid extension needs in terms of RES integration Parameters ---------- run_args : list Optional parameters for :func:`run_edisgo_basic`. Returns ------- all_costs_before_geno_import : :pandas:`pandas.Dataframe<dataframe>` Grid extension cost before grid connection of new generators all_grid_issues_before_geno_import : dict Remaining overloading or over-voltage issues in grid all_costs : :pandas:`pandas.Dataframe<dataframe>` Grid extension cost due to grid connection of new generators all_grid_issues : dict Remaining overloading or over-voltage issues in grid
[ "Run", "grid", "analysis", "twice", "on", "same", "grid", ":", "once", "w", "/", "and", "once", "w", "/", "o", "new", "generators" ]
python
train
35.791667
svetlyak40wt/python-repr
src/magic_repr/__init__.py
https://github.com/svetlyak40wt/python-repr/blob/49e358e77b97d74f29f4977ea009ab2d64c254e8/src/magic_repr/__init__.py#L125-L177
def format_value(value): """This function should return unicode representation of the value """ value_id = id(value) if value_id in recursion_breaker.processed: return u'<recursion>' recursion_breaker.processed.add(value_id) try: if isinstance(value, six.binary_type): # suppose, all byte strings are in unicode # don't know if everybody in the world uses anything else? return u"'{0}'".format(value.decode('utf-8')) elif isinstance(value, six.text_type): return u"u'{0}'".format(value) elif isinstance(value, (list, tuple)): # long lists or lists with multiline items # will be shown vertically values = list(map(format_value, value)) result = serialize_list(u'[', values, delimiter=u',') + u']' return force_unicode(result) elif isinstance(value, dict): items = six.iteritems(value) # format each key/value pair as a text, # calling format_value recursively items = (tuple(map(format_value, item)) for item in items) items = list(items) # sort by keys for readability items.sort() # for each item value items = [ serialize_text( u'{0}: '.format(key), item_value) for key, item_value in items] # and serialize these pieces as a list, enclosing # them into a curve brackets result = serialize_list(u'{', items, delimiter=u',') + u'}' return force_unicode(result) return force_unicode(repr(value)) finally: recursion_breaker.processed.remove(value_id)
[ "def", "format_value", "(", "value", ")", ":", "value_id", "=", "id", "(", "value", ")", "if", "value_id", "in", "recursion_breaker", ".", "processed", ":", "return", "u'<recursion>'", "recursion_breaker", ".", "processed", ".", "add", "(", "value_id", ")", ...
This function should return unicode representation of the value
[ "This", "function", "should", "return", "unicode", "representation", "of", "the", "value" ]
python
valid
33.018868
noxdafox/vminspect
vminspect/comparator.py
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/comparator.py#L367-L374
def files_type(fs0, fs1, files): """Inspects the file type of the given files.""" for file_meta in files['deleted_files']: file_meta['type'] = fs0.file(file_meta['path']) for file_meta in files['created_files'] + files['modified_files']: file_meta['type'] = fs1.file(file_meta['path']) return files
[ "def", "files_type", "(", "fs0", ",", "fs1", ",", "files", ")", ":", "for", "file_meta", "in", "files", "[", "'deleted_files'", "]", ":", "file_meta", "[", "'type'", "]", "=", "fs0", ".", "file", "(", "file_meta", "[", "'path'", "]", ")", "for", "fil...
Inspects the file type of the given files.
[ "Inspects", "the", "file", "type", "of", "the", "given", "files", "." ]
python
train
40.5
tango-controls/pytango
tango/utils.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L635-L652
def is_float(tg_type, inc_array=False): """Tells if the given tango type is float :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is float or False otherwise :rtype: :py:obj:`bool` """ global _scalar_float_types, _array_float_types if tg_type in _scalar_float_types: return True if not inc_array: return False return tg_type in _array_float_types
[ "def", "is_float", "(", "tg_type", ",", "inc_array", "=", "False", ")", ":", "global", "_scalar_float_types", ",", "_array_float_types", "if", "tg_type", "in", "_scalar_float_types", ":", "return", "True", "if", "not", "inc_array", ":", "return", "False", "retur...
Tells if the given tango type is float :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is float or False otherwise :rtype: :py:obj:`bool`
[ "Tells", "if", "the", "given", "tango", "type", "is", "float" ]
python
train
33.944444
SiLab-Bonn/pyBAR
pybar/scans/analyze_timewalk.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/scans/analyze_timewalk.py#L36-L41
def get_charge_calibration(calibation_file, max_tdc): ''' Open the hit or calibration file and return the calibration per pixel''' with tb.open_file(calibation_file, mode="r") as in_file_calibration_h5: tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1] tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:] return get_charge(max_tdc, tdc_calibration_values, tdc_calibration)
[ "def", "get_charge_calibration", "(", "calibation_file", ",", "max_tdc", ")", ":", "with", "tb", ".", "open_file", "(", "calibation_file", ",", "mode", "=", "\"r\"", ")", "as", "in_file_calibration_h5", ":", "tdc_calibration", "=", "in_file_calibration_h5", ".", "...
Open the hit or calibration file and return the calibration per pixel
[ "Open", "the", "hit", "or", "calibration", "file", "and", "return", "the", "calibration", "per", "pixel" ]
python
train
78.166667
svenevs/exhale
exhale/graph.py
https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L498-L557
def toConsole(self, level, fmt_spec, printChildren=True): ''' Debugging tool for printing hierarchies / ownership to the console. Recursively calls children ``toConsole`` if this node is not a directory or a file, and ``printChildren == True``. .. todo:: fmt_spec docs needed. keys are ``kind`` and values are color spec :Parameters: ``level`` (int) The indentation level to be used, should be greater than or equal to 0. ``printChildren`` (bool) Whether or not the ``toConsole`` method for the children found in ``self.children`` should be called with ``level+1``. Default is True, set to False for directories and files. ''' indent = " " * level utils.verbose_log("{indent}- [{kind}]: {name}".format( indent=indent, kind=utils._use_color(self.kind, fmt_spec[self.kind], sys.stderr), name=self.name )) # files are children of directories, the file section will print those children if self.kind == "dir": for c in self.children: c.toConsole(level + 1, fmt_spec, printChildren=False) elif printChildren: if self.kind == "file": next_indent = " " * (level + 1) utils.verbose_log("{next_indent}[[[ location=\"{loc}\" ]]]".format( next_indent=next_indent, loc=self.location )) for incl in self.includes: utils.verbose_log("{next_indent}- #include <{incl}>".format( next_indent=next_indent, incl=incl )) for ref, name in self.included_by: utils.verbose_log("{next_indent}- included by: [{name}]".format( next_indent=next_indent, name=name )) for n in self.namespaces_used: n.toConsole(level + 1, fmt_spec, printChildren=False) for c in self.children: c.toConsole(level + 1, fmt_spec) elif self.kind == "class" or self.kind == "struct": relevant_children = [] for c in self.children: if c.kind == "class" or c.kind == "struct" or \ c.kind == "enum" or c.kind == "union": relevant_children.append(c) for rc in sorted(relevant_children): rc.toConsole(level + 1, fmt_spec) elif self.kind != "union": for c in self.children: c.toConsole(level + 1, fmt_spec)
[ "def", "toConsole", "(", "self", ",", "level", ",", "fmt_spec", ",", "printChildren", "=", "True", ")", ":", "indent", "=", "\" \"", "*", "level", "utils", ".", "verbose_log", "(", "\"{indent}- [{kind}]: {name}\"", ".", "format", "(", "indent", "=", "indent...
Debugging tool for printing hierarchies / ownership to the console. Recursively calls children ``toConsole`` if this node is not a directory or a file, and ``printChildren == True``. .. todo:: fmt_spec docs needed. keys are ``kind`` and values are color spec :Parameters: ``level`` (int) The indentation level to be used, should be greater than or equal to 0. ``printChildren`` (bool) Whether or not the ``toConsole`` method for the children found in ``self.children`` should be called with ``level+1``. Default is True, set to False for directories and files.
[ "Debugging", "tool", "for", "printing", "hierarchies", "/", "ownership", "to", "the", "console", ".", "Recursively", "calls", "children", "toConsole", "if", "this", "node", "is", "not", "a", "directory", "or", "a", "file", "and", "printChildren", "==", "True",...
python
train
45.483333
firecat53/urlscan
urlscan/urlchoose.py
https://github.com/firecat53/urlscan/blob/2d10807d01167873733da3b478c784f8fa21bbc0/urlscan/urlchoose.py#L547-L586
def _search(self): """ Search - search URLs and text. """ text = "Search: {}".format(self.search_string) footerwid = urwid.AttrMap(urwid.Text(text), 'footer') self.top.footer = footerwid search_items = [] for grp in self.items_org: done = False for idx, item in enumerate(grp): if isinstance(item, urwid.Columns): for col_idx, col in enumerate(item.contents): if isinstance(col[0], urwid.decoration.AttrMap): grp[idx][col_idx].set_label(splittext(col[0].base_widget.label, self.search_string, '')) if self.search_string.lower() in col[0].base_widget.label.lower(): grp[idx][col_idx].set_label(splittext(col[0].base_widget.label, self.search_string, 'search')) done = True elif isinstance(item, urwid.Text): grp[idx].set_text(splittext(item.text, self.search_string, '')) if self.search_string.lower() in item.text.lower(): grp[idx].set_text(splittext(item.text, self.search_string, 'search')) done = True if done is True: search_items.extend(grp) self.items = search_items self.top.body = urwid.ListBox(self.items) if self.items: self.top.body.focus_position = 2 if self.compact is False else 0 # Trick urwid into redisplaying the cursor self.top.keypress(self.tui.get_cols_rows(), "") self.no_matches = False else: self.no_matches = True footerwid = urwid.AttrMap(urwid.Text(text + " No Matches"), 'footer') self.top.footer = footerwid
[ "def", "_search", "(", "self", ")", ":", "text", "=", "\"Search: {}\"", ".", "format", "(", "self", ".", "search_string", ")", "footerwid", "=", "urwid", ".", "AttrMap", "(", "urwid", ".", "Text", "(", "text", ")", ",", "'footer'", ")", "self", ".", ...
Search - search URLs and text.
[ "Search", "-", "search", "URLs", "and", "text", "." ]
python
train
51.975
Jajcus/pyxmpp2
pyxmpp2/sasl/digest_md5.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/sasl/digest_md5.py#L61-L76
def _quote(data): """Prepare a string for quoting for DIGEST-MD5 challenge or response. Don't add the quotes, only escape '"' and "\\" with backslashes. :Parameters: - `data`: a raw string. :Types: - `data`: `bytes` :return: `data` with '"' and "\\" escaped using "\\". :returntype: `bytes` """ data = data.replace(b'\\', b'\\\\') data = data.replace(b'"', b'\\"') return data
[ "def", "_quote", "(", "data", ")", ":", "data", "=", "data", ".", "replace", "(", "b'\\\\'", ",", "b'\\\\\\\\'", ")", "data", "=", "data", ".", "replace", "(", "b'\"'", ",", "b'\\\\\"'", ")", "return", "data" ]
Prepare a string for quoting for DIGEST-MD5 challenge or response. Don't add the quotes, only escape '"' and "\\" with backslashes. :Parameters: - `data`: a raw string. :Types: - `data`: `bytes` :return: `data` with '"' and "\\" escaped using "\\". :returntype: `bytes`
[ "Prepare", "a", "string", "for", "quoting", "for", "DIGEST", "-", "MD5", "challenge", "or", "response", "." ]
python
valid
26.25
mandiant/ioc_writer
ioc_writer/ioc_common.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_common.py#L951-L963
def make_serviceitem_servicedllmd5sum(servicedll_md5, condition='is', negate=False): """ Create a node for ServiceItem/serviceDLLmd5sum :return: A IndicatorItem represented as an Element node """ document = 'ServiceItem' search = 'ServiceItem/serviceDLLmd5sum' content_type = 'md5' content = servicedll_md5 ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate) return ii_node
[ "def", "make_serviceitem_servicedllmd5sum", "(", "servicedll_md5", ",", "condition", "=", "'is'", ",", "negate", "=", "False", ")", ":", "document", "=", "'ServiceItem'", "search", "=", "'ServiceItem/serviceDLLmd5sum'", "content_type", "=", "'md5'", "content", "=", ...
Create a node for ServiceItem/serviceDLLmd5sum :return: A IndicatorItem represented as an Element node
[ "Create", "a", "node", "for", "ServiceItem", "/", "serviceDLLmd5sum", ":", "return", ":", "A", "IndicatorItem", "represented", "as", "an", "Element", "node" ]
python
train
39.153846
spotify/luigi
luigi/notifications.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/notifications.py#L264-L288
def send_email_sns(sender, subject, message, topic_ARN, image_png): """ Sends notification through AWS SNS. Takes Topic ARN from recipients. Does not handle access keys. Use either 1/ configuration file 2/ EC2 instance profile See also https://boto3.readthedocs.io/en/latest/guide/configuration.html. """ from boto3 import resource as boto3_resource sns = boto3_resource('sns') topic = sns.Topic(topic_ARN[0]) # Subject is max 100 chars if len(subject) > 100: subject = subject[0:48] + '...' + subject[-49:] response = topic.publish(Subject=subject, Message=message) logger.debug(("Message sent to SNS.\nMessageId: {},\nRequestId: {},\n" "HTTPSStatusCode: {}").format(response['MessageId'], response['ResponseMetadata']['RequestId'], response['ResponseMetadata']['HTTPStatusCode']))
[ "def", "send_email_sns", "(", "sender", ",", "subject", ",", "message", ",", "topic_ARN", ",", "image_png", ")", ":", "from", "boto3", "import", "resource", "as", "boto3_resource", "sns", "=", "boto3_resource", "(", "'sns'", ")", "topic", "=", "sns", ".", ...
Sends notification through AWS SNS. Takes Topic ARN from recipients. Does not handle access keys. Use either 1/ configuration file 2/ EC2 instance profile See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
[ "Sends", "notification", "through", "AWS", "SNS", ".", "Takes", "Topic", "ARN", "from", "recipients", "." ]
python
train
37.76
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L7038-L7064
def reeval(self, X, fit, func, ask, args=()): """store two fitness lists, `fit` and ``fitre`` reevaluating some solutions in `X`. ``self.evaluations`` evaluations are done for each reevaluated fitness value. See `__call__()`, where `reeval()` is called. """ self.fit = list(fit) self.fitre = list(fit) self.idx = self.indices(fit) if not len(self.idx): return self.idx evals = int(self.evaluations) if self.f_aggregate else 1 fagg = np.median if self.f_aggregate is None else self.f_aggregate for i in self.idx: X_i = X[i] if self.epsilon: if self.parallel: self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args)) else: self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args) for _k in xrange(evals)]) else: self.fitre[i] = fagg([func(X_i, *args) for _k in xrange(evals)]) self.evaluations_just_done = evals * len(self.idx) return self.fit, self.fitre, self.idx
[ "def", "reeval", "(", "self", ",", "X", ",", "fit", ",", "func", ",", "ask", ",", "args", "=", "(", ")", ")", ":", "self", ".", "fit", "=", "list", "(", "fit", ")", "self", ".", "fitre", "=", "list", "(", "fit", ")", "self", ".", "idx", "="...
store two fitness lists, `fit` and ``fitre`` reevaluating some solutions in `X`. ``self.evaluations`` evaluations are done for each reevaluated fitness value. See `__call__()`, where `reeval()` is called.
[ "store", "two", "fitness", "lists", "fit", "and", "fitre", "reevaluating", "some", "solutions", "in", "X", ".", "self", ".", "evaluations", "evaluations", "are", "done", "for", "each", "reevaluated", "fitness", "value", ".", "See", "__call__", "()", "where", ...
python
train
42.703704
niolabs/python-xbee
xbee/frame.py
https://github.com/niolabs/python-xbee/blob/b91be3d0ee7ccaa1990120b5b5490999d8e6cbc7/xbee/frame.py#L124-L140
def fill(self, byte): """ fill: byte -> None Adds the given raw byte to this APIFrame. If this APIFrame is marked as escaped and this byte is an escape byte, the next byte in a call to fill() will be unescaped. """ if self._unescape_next_byte: byte = intToByte(byteToInt(byte) ^ 0x20) self._unescape_next_byte = False elif self.escaped and byte == APIFrame.ESCAPE_BYTE: self._unescape_next_byte = True return self.raw_data += intToByte(byteToInt(byte))
[ "def", "fill", "(", "self", ",", "byte", ")", ":", "if", "self", ".", "_unescape_next_byte", ":", "byte", "=", "intToByte", "(", "byteToInt", "(", "byte", ")", "^", "0x20", ")", "self", ".", "_unescape_next_byte", "=", "False", "elif", "self", ".", "es...
fill: byte -> None Adds the given raw byte to this APIFrame. If this APIFrame is marked as escaped and this byte is an escape byte, the next byte in a call to fill() will be unescaped.
[ "fill", ":", "byte", "-", ">", "None" ]
python
train
32.882353
bitshares/uptick
uptick/callorders.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/callorders.py#L20-L57
def calls(ctx, obj, limit): """ List call/short positions of an account or an asset """ if obj.upper() == obj: # Asset from bitshares.asset import Asset asset = Asset(obj, full=True) calls = asset.get_call_orders(limit) t = [["acount", "debt", "collateral", "call price", "ratio"]] for call in calls: t.append( [ str(call["account"]["name"]), str(call["debt"]), str(call["collateral"]), str(call["call_price"]), "%.2f" % (call["ratio"]), ] ) print_table(t) else: # Account from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) calls = dex.list_debt_positions(account=obj) t = [["debt", "collateral", "call price", "ratio"]] for symbol in calls: t.append( [ str(calls[symbol]["debt"]), str(calls[symbol]["collateral"]), str(calls[symbol]["call_price"]), "%.2f" % (calls[symbol]["ratio"]), ] ) print_table(t)
[ "def", "calls", "(", "ctx", ",", "obj", ",", "limit", ")", ":", "if", "obj", ".", "upper", "(", ")", "==", "obj", ":", "# Asset", "from", "bitshares", ".", "asset", "import", "Asset", "asset", "=", "Asset", "(", "obj", ",", "full", "=", "True", "...
List call/short positions of an account or an asset
[ "List", "call", "/", "short", "positions", "of", "an", "account", "or", "an", "asset" ]
python
train
31.868421
NoviceLive/intellicoder
intellicoder/intellisense/database.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L267-L285
def make_export(self, exports): """Populate library exported function data.""" sql = 'drop table if exists export' logging.debug(sql) self.cursor.execute(sql) sql = 'create table if not exists export ' \ '(func text unique, module text)' logging.debug(sql) self.cursor.execute(sql) for module in exports: logging.debug(_('insering exports from %s'), module) sql = 'insert into export values (?, ?)' for func in exports[module]: if func: try: self.cursor.execute(sql, (func, module)) except sqlite3.IntegrityError: pass self.con.commit()
[ "def", "make_export", "(", "self", ",", "exports", ")", ":", "sql", "=", "'drop table if exists export'", "logging", ".", "debug", "(", "sql", ")", "self", ".", "cursor", ".", "execute", "(", "sql", ")", "sql", "=", "'create table if not exists export '", "'(f...
Populate library exported function data.
[ "Populate", "library", "exported", "function", "data", "." ]
python
train
39.157895
romanz/trezor-agent
libagent/gpg/agent.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L24-L28
def sig_encode(r, s): """Serialize ECDSA signature data into GPG S-expression.""" r = util.assuan_serialize(util.num2bytes(r, 32)) s = util.assuan_serialize(util.num2bytes(s, 32)) return b'(7:sig-val(5:ecdsa(1:r32:' + r + b')(1:s32:' + s + b')))'
[ "def", "sig_encode", "(", "r", ",", "s", ")", ":", "r", "=", "util", ".", "assuan_serialize", "(", "util", ".", "num2bytes", "(", "r", ",", "32", ")", ")", "s", "=", "util", ".", "assuan_serialize", "(", "util", ".", "num2bytes", "(", "s", ",", "...
Serialize ECDSA signature data into GPG S-expression.
[ "Serialize", "ECDSA", "signature", "data", "into", "GPG", "S", "-", "expression", "." ]
python
train
51.6
gorakhargosh/pepe
pepe/__init__.py
https://github.com/gorakhargosh/pepe/blob/1e40853378d515c99f03b3f59efa9b943d26eb62/pepe/__init__.py#L210-L454
def preprocess(input_file, output_file, defines=None, options=None, content_types_db=None, _preprocessed_files=None, _depth=0): """ Preprocesses the specified file. :param input_filename: The input path. :param output_filename: The output file (NOT path). :param defines: a dictionary of defined variables that will be understood in preprocessor statements. Keys must be strings and, currently, only the truth value of any key's value matters. :param options: A ``Namespace`` of command-line options. :param content_types_db: is an instance of ``ContentTypesDatabase``. :param _preprocessed_files: (for internal use only) is used to ensure files are not recursively preprocessed. :param _depth: When the call reaches _depth == 0, the output file is actually written. For all internal recursive calls _depth == 1. :return: Modified dictionary of defines or raises ``PreprocessorError`` if an error occurred. """ # Options that can later be turned into function parameters. include_paths = options.include_paths should_keep_lines = options.should_keep_lines should_substitute = options.should_substitute default_content_type = options.default_content_type input_filename = input_file.name defines = defines or {} # Ensure preprocessing isn't cyclic(?). _preprocessed_files = _preprocessed_files or [] input_file_absolute_path = absolute_path(input_filename) if input_file_absolute_path in _preprocessed_files: raise PreprocessorError("detected recursive #include of '%s'"\ % input_filename) _preprocessed_files.append(input_file_absolute_path) # Determine the content type and comment info for the input file. comment_groups = content_types_db.get_comment_group_for_path(input_filename, default_content_type) statement_regexps = get_statement_regexps(comment_groups) # Process the input file. # (Would be helpful if I knew anything about lexing and parsing # simple grammars.) input_lines = input_file.readlines() if _depth == 0: # Only at recursion depth 0 is the temporary buffer created. temp_output_buffer = StringIO() else: # At deeper levels, the temporary buffer is the output file. temp_output_buffer = output_file defines['__FILE__'] = input_filename SKIP, EMIT = range(2) # states states = [(EMIT, # a state is (<emit-or-skip-lines-in-this-section>, 0, # <have-emitted-in-this-if-block>, 0)] # <have-seen-'else'-in-this-if-block>) line_number = 0 for line in input_lines: line_number += 1 logger.debug("line %d: %r", line_number, line) defines['__LINE__'] = line_number # Is this line a preprocessor stmt line? #XXX Could probably speed this up by optimizing common case of # line NOT being a preprocessor stmt line. for statement_regexp in statement_regexps: match = statement_regexp.match(line) if match: break else: match = None if match: op = match.group("op") logger.debug("%r stmt (states: %r)", op, states) if op == "define": if not (states and states[-1][0] == SKIP): var, val = match.group("var", "val") if val is None: val = None else: try: val = eval(val, {}, {}) except: pass defines[var] = val elif op == "undef": if not (states and states[-1][0] == SKIP): var = match.group("var") try: del defines[var] except KeyError: pass elif op == "include": if not (states and states[-1][0] == SKIP): if "var" in match.groupdict(): # This is the second include form: #include VAR var = match.group("var") f = defines[var] else: # This is the first include form: #include "path" f = match.group("fname") for d in [os.path.dirname(input_filename)] + include_paths: fname = os.path.normpath(os.path.join(d, f)) if os.path.exists(fname): break else: raise PreprocessorError( "could not find #include'd file "\ "\"%s\" on include path: %r"\ % (f, include_paths)) with open(fname, 'rb') as f: defines = preprocess(f, temp_output_buffer, defines=defines, options=options, content_types_db=content_types_db, _preprocessed_files=_preprocessed_files, _depth=1) elif op in ("if", "ifdef", "ifndef"): if op == "if": expr = match.group("expr") elif op == "ifdef": expr = "defined('%s')" % match.group("expr") elif op == "ifndef": expr = "not defined('%s')" % match.group("expr") try: if states and states[-1][0] == SKIP: # Were are nested in a SKIP-portion of an if-block. states.append((SKIP, 0, 0)) elif _evaluate(expr, defines): states.append((EMIT, 1, 0)) else: states.append((SKIP, 0, 0)) except KeyError: raise PreprocessorError("use of undefined variable in "\ "#%s stmt" % op, defines['__FILE__'] , defines['__LINE__'], line) elif op == "elif": expr = match.group("expr") try: if states[-1][2]: # already had #else in this if-block raise PreprocessorError("illegal #elif after #else in "\ "same #if block", defines['__FILE__'], defines['__LINE__'], line) elif states[-1][1]: # if have emitted in this if-block states[-1] = (SKIP, 1, 0) elif states[:-1] and states[-2][0] == SKIP: # Were are nested in a SKIP-portion of an if-block. states[-1] = (SKIP, 0, 0) elif _evaluate(expr, defines): states[-1] = (EMIT, 1, 0) else: states[-1] = (SKIP, 0, 0) except IndexError: raise PreprocessorError("#elif stmt without leading #if "\ "stmt", defines['__FILE__'], defines['__LINE__'], line) elif op == "else": try: if states[-1][2]: # already had #else in this if-block raise PreprocessorError("illegal #else after #else in "\ "same #if block", defines['__FILE__'], defines['__LINE__'], line) elif states[-1][1]: # if have emitted in this if-block states[-1] = (SKIP, 1, 1) elif states[:-1] and states[-2][0] == SKIP: # Were are nested in a SKIP-portion of an if-block. states[-1] = (SKIP, 0, 1) else: states[-1] = (EMIT, 1, 1) except IndexError: raise PreprocessorError("#else stmt without leading #if "\ "stmt", defines['__FILE__'], defines['__LINE__'], line) elif op == "endif": try: states.pop() except IndexError: raise PreprocessorError("#endif stmt without leading #if"\ "stmt", defines['__FILE__'], defines['__LINE__'], line) elif op == "error": if not (states and states[-1][0] == SKIP): error = match.group("error") raise PreprocessorError("#error: " + error, defines['__FILE__'], defines['__LINE__'], line) logger.debug("states: %r", states) if should_keep_lines: temp_output_buffer.write("\n") else: try: if states[-1][0] == EMIT: logger.debug("emit line (%s)" % states[-1][1]) # Substitute all defines into line. # XXX Should avoid recursive substitutions. But that # would be a pain right now. sline = line if should_substitute: for name in reversed(sorted(defines, key=len)): value = defines[name] sline = sline.replace(name, str(value)) temp_output_buffer.write(sline) elif should_keep_lines: logger.debug("keep blank line (%s)" % states[-1][1]) temp_output_buffer.write("\n") else: logger.debug("skip line (%s)" % states[-1][1]) except IndexError: raise PreprocessorError("superfluous #endif before this line", defines['__FILE__'], defines['__LINE__']) if len(states) > 1: raise PreprocessorError("unterminated #if block", defines['__FILE__'], defines['__LINE__']) elif len(states) < 1: raise PreprocessorError("superfluous #endif on or before this line", defines['__FILE__'], defines['__LINE__']) #if temp_output_buffer != output_file: # temp_output_buffer.close() if _depth == 0: output_file.write(temp_output_buffer.getvalue()) temp_output_buffer.close() return defines
[ "def", "preprocess", "(", "input_file", ",", "output_file", ",", "defines", "=", "None", ",", "options", "=", "None", ",", "content_types_db", "=", "None", ",", "_preprocessed_files", "=", "None", ",", "_depth", "=", "0", ")", ":", "# Options that can later be...
Preprocesses the specified file. :param input_filename: The input path. :param output_filename: The output file (NOT path). :param defines: a dictionary of defined variables that will be understood in preprocessor statements. Keys must be strings and, currently, only the truth value of any key's value matters. :param options: A ``Namespace`` of command-line options. :param content_types_db: is an instance of ``ContentTypesDatabase``. :param _preprocessed_files: (for internal use only) is used to ensure files are not recursively preprocessed. :param _depth: When the call reaches _depth == 0, the output file is actually written. For all internal recursive calls _depth == 1. :return: Modified dictionary of defines or raises ``PreprocessorError`` if an error occurred.
[ "Preprocesses", "the", "specified", "file", "." ]
python
train
45.457143
albahnsen/CostSensitiveClassification
costcla/datasets/base.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/datasets/base.py#L183-L249
def load_creditscoring1(cost_mat_parameters=None): """Load and return the credit scoring Kaggle Credit competition dataset (classification). The credit scoring is a easily transformable example-dependent cost-sensitive classification dataset. Parameters ---------- cost_mat_parameters : Dictionary-like object, optional (default=None) If not None, must include 'int_r', 'int_cf', 'cl_max', 'n_term', 'k','lgd' Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'cost_mat', the cost matrix of each example, 'target_names', the meaning of the labels, 'feature_names', the meaning of the features, and 'DESCR', the full description of the dataset. References ---------- .. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten, "Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring", in Proceedings of the International Conference on Machine Learning and Applications, , 2014. Examples -------- Let's say you are interested in the samples 10, 25, and 50 >>> from costcla.datasets import load_creditscoring1 >>> data = load_creditscoring1() >>> data.target[[10, 17, 400]] array([0, 1, 0]) >>> data.cost_mat[[10, 17, 400]] array([[ 1023.73054104, 18750. , 0. , 0. ], [ 717.25781516, 6749.25 , 0. , 0. ], [ 1004.32819923, 17990.25 , 0. , 0. ]]) """ module_path = dirname(__file__) raw_data = pd.read_csv(join(module_path, 'data', 'creditscoring1.csv.gz'), delimiter=',', compression='gzip') descr = open(join(module_path, 'descr', 'creditscoring1.rst')).read() # Exclude MonthlyIncome = nan or =0 or DebtRatio >1 raw_data = raw_data.dropna() raw_data = raw_data.loc[(raw_data['MonthlyIncome'] > 0)] raw_data = raw_data.loc[(raw_data['DebtRatio'] < 1)] target = raw_data['SeriousDlqin2yrs'].values.astype(np.int) data = raw_data.drop(['SeriousDlqin2yrs', 'id'], 1) # Calculate cost_mat (see[1]) if cost_mat_parameters is None: cost_mat_parameters = {'int_r': 0.0479 / 12, 'int_cf': 0.0294 / 12, 'cl_max': 25000, 'n_term': 24, 'k': 3, 'lgd': .75} pi_1 = target.mean() cost_mat = _creditscoring_costmat(data['MonthlyIncome'].values, data['DebtRatio'].values, pi_1, cost_mat_parameters) return Bunch(data=data.values, target=target, cost_mat=cost_mat, target_names=['no', 'yes'], DESCR=descr, feature_names=data.columns.values, name='CreditScoring_Kaggle2011')
[ "def", "load_creditscoring1", "(", "cost_mat_parameters", "=", "None", ")", ":", "module_path", "=", "dirname", "(", "__file__", ")", "raw_data", "=", "pd", ".", "read_csv", "(", "join", "(", "module_path", ",", "'data'", ",", "'creditscoring1.csv.gz'", ")", "...
Load and return the credit scoring Kaggle Credit competition dataset (classification). The credit scoring is a easily transformable example-dependent cost-sensitive classification dataset. Parameters ---------- cost_mat_parameters : Dictionary-like object, optional (default=None) If not None, must include 'int_r', 'int_cf', 'cl_max', 'n_term', 'k','lgd' Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'cost_mat', the cost matrix of each example, 'target_names', the meaning of the labels, 'feature_names', the meaning of the features, and 'DESCR', the full description of the dataset. References ---------- .. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten, "Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring", in Proceedings of the International Conference on Machine Learning and Applications, , 2014. Examples -------- Let's say you are interested in the samples 10, 25, and 50 >>> from costcla.datasets import load_creditscoring1 >>> data = load_creditscoring1() >>> data.target[[10, 17, 400]] array([0, 1, 0]) >>> data.cost_mat[[10, 17, 400]] array([[ 1023.73054104, 18750. , 0. , 0. ], [ 717.25781516, 6749.25 , 0. , 0. ], [ 1004.32819923, 17990.25 , 0. , 0. ]])
[ "Load", "and", "return", "the", "credit", "scoring", "Kaggle", "Credit", "competition", "dataset", "(", "classification", ")", "." ]
python
train
42.41791
xflr6/bitsets
bitsets/bases.py
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L60-L64
def members(self, as_set=False): """Return the set members tuple/frozenset.""" if as_set: return frozenset(map(self._members.__getitem__, self._indexes())) return tuple(map(self._members.__getitem__, self._indexes()))
[ "def", "members", "(", "self", ",", "as_set", "=", "False", ")", ":", "if", "as_set", ":", "return", "frozenset", "(", "map", "(", "self", ".", "_members", ".", "__getitem__", ",", "self", ".", "_indexes", "(", ")", ")", ")", "return", "tuple", "(", ...
Return the set members tuple/frozenset.
[ "Return", "the", "set", "members", "tuple", "/", "frozenset", "." ]
python
train
49.8
brmscheiner/ideogram
ideogram/converter.py
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/converter.py#L104-L111
def formatBodyNode(root,path): '''Format the root node for use as the body node.''' body = root body.name = "body" body.weight = calcFnWeight(body) body.path = path body.pclass = None return body
[ "def", "formatBodyNode", "(", "root", ",", "path", ")", ":", "body", "=", "root", "body", ".", "name", "=", "\"body\"", "body", ".", "weight", "=", "calcFnWeight", "(", "body", ")", "body", ".", "path", "=", "path", "body", ".", "pclass", "=", "None"...
Format the root node for use as the body node.
[ "Format", "the", "root", "node", "for", "use", "as", "the", "body", "node", "." ]
python
train
28.375
pytroll/satpy
satpy/readers/utils.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/utils.py#L178-L193
def get_sub_area(area, xslice, yslice): """Apply slices to the area_extent and size of the area.""" new_area_extent = ((area.pixel_upper_left[0] + (xslice.start - 0.5) * area.pixel_size_x), (area.pixel_upper_left[1] - (yslice.stop - 0.5) * area.pixel_size_y), (area.pixel_upper_left[0] + (xslice.stop - 0.5) * area.pixel_size_x), (area.pixel_upper_left[1] - (yslice.start - 0.5) * area.pixel_size_y)) return AreaDefinition(area.area_id, area.name, area.proj_id, area.proj_dict, xslice.stop - xslice.start, yslice.stop - yslice.start, new_area_extent)
[ "def", "get_sub_area", "(", "area", ",", "xslice", ",", "yslice", ")", ":", "new_area_extent", "=", "(", "(", "area", ".", "pixel_upper_left", "[", "0", "]", "+", "(", "xslice", ".", "start", "-", "0.5", ")", "*", "area", ".", "pixel_size_x", ")", ",...
Apply slices to the area_extent and size of the area.
[ "Apply", "slices", "to", "the", "area_extent", "and", "size", "of", "the", "area", "." ]
python
train
51.0625
NiklasRosenstein-Python/nr-deprecated
nr/py/context.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/py/context.py#L61-L77
def skip(stackframe=1): """ Must be called from within `__enter__()`. Performs some magic to have a #ContextSkipped exception be raised the moment the with context is entered. The #ContextSkipped must then be handled in `__exit__()` to suppress the propagation of the exception. > Important: This function does not raise an exception by itself, thus > the `__enter__()` method will continue to execute after using this function. """ def trace(frame, event, args): raise ContextSkipped sys.settrace(lambda *args, **kwargs: None) frame = sys._getframe(stackframe + 1) frame.f_trace = trace
[ "def", "skip", "(", "stackframe", "=", "1", ")", ":", "def", "trace", "(", "frame", ",", "event", ",", "args", ")", ":", "raise", "ContextSkipped", "sys", ".", "settrace", "(", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "None", ")", "fram...
Must be called from within `__enter__()`. Performs some magic to have a #ContextSkipped exception be raised the moment the with context is entered. The #ContextSkipped must then be handled in `__exit__()` to suppress the propagation of the exception. > Important: This function does not raise an exception by itself, thus > the `__enter__()` method will continue to execute after using this function.
[ "Must", "be", "called", "from", "within", "__enter__", "()", ".", "Performs", "some", "magic", "to", "have", "a", "#ContextSkipped", "exception", "be", "raised", "the", "moment", "the", "with", "context", "is", "entered", ".", "The", "#ContextSkipped", "must",...
python
train
35.411765
jrief/django-websocket-redis
ws4redis/websocket.py
https://github.com/jrief/django-websocket-redis/blob/abcddaad2f579d71dbf375e5e34bc35eef795a81/ws4redis/websocket.py#L157-L207
def read_message(self): """ Return the next text or binary message from the socket. This is an internal method as calling this will not cleanup correctly if an exception is called. Use `receive` instead. """ opcode = None message = None while True: header, payload = self.read_frame() f_opcode = header.opcode if f_opcode in (self.OPCODE_TEXT, self.OPCODE_BINARY): # a new frame if opcode: raise WebSocketError("The opcode in non-fin frame is expected to be zero, got {0!r}".format(f_opcode)) # Start reading a new message, reset the validator self.utf8validator.reset() self.utf8validate_last = (True, True, 0, 0) opcode = f_opcode elif f_opcode == self.OPCODE_CONTINUATION: if not opcode: raise WebSocketError("Unexpected frame with opcode=0") elif f_opcode == self.OPCODE_PING: self.handle_ping(header, payload) continue elif f_opcode == self.OPCODE_PONG: self.handle_pong(header, payload) continue elif f_opcode == self.OPCODE_CLOSE: self.handle_close(header, payload) return else: raise WebSocketError("Unexpected opcode={0!r}".format(f_opcode)) if opcode == self.OPCODE_TEXT: self.validate_utf8(payload) if six.PY3: payload = payload.decode() if message is None: message = six.text_type() if opcode == self.OPCODE_TEXT else six.binary_type() message += payload if header.fin: break if opcode == self.OPCODE_TEXT: if six.PY2: self.validate_utf8(message) else: self.validate_utf8(message.encode()) return message else: return bytearray(message)
[ "def", "read_message", "(", "self", ")", ":", "opcode", "=", "None", "message", "=", "None", "while", "True", ":", "header", ",", "payload", "=", "self", ".", "read_frame", "(", ")", "f_opcode", "=", "header", ".", "opcode", "if", "f_opcode", "in", "("...
Return the next text or binary message from the socket. This is an internal method as calling this will not cleanup correctly if an exception is called. Use `receive` instead.
[ "Return", "the", "next", "text", "or", "binary", "message", "from", "the", "socket", "." ]
python
train
40.215686
googleapis/google-cloud-python
storage/google/cloud/storage/notification.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/notification.py#L291-L321
def reload(self, client=None): """Update this notification from the server configuration. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID. """ if self.notification_id is None: raise ValueError("Notification not intialized by server") client = self._require_client(client) query_params = {} if self.bucket.user_project is not None: query_params["userProject"] = self.bucket.user_project response = client._connection.api_request( method="GET", path=self.path, query_params=query_params ) self._set_properties(response)
[ "def", "reload", "(", "self", ",", "client", "=", "None", ")", ":", "if", "self", ".", "notification_id", "is", "None", ":", "raise", "ValueError", "(", "\"Notification not intialized by server\"", ")", "client", "=", "self", ".", "_require_client", "(", "clie...
Update this notification from the server configuration. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID.
[ "Update", "this", "notification", "from", "the", "server", "configuration", "." ]
python
train
36.83871
Karaage-Cluster/karaage
karaage/plugins/kgusage/tasks.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgusage/tasks.py#L365-L477
def _gen_project_trend_graph(project, start, end, force_overwrite=False): """Generates a bar graph for a project Keyword arguments: project -- Project start -- start date end -- end date """ filename = graphs.get_project_trend_graph_filename(project, start, end) csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv') png_filename = os.path.join(GRAPH_ROOT, filename + '.png') _check_directory_exists(csv_filename) _check_directory_exists(png_filename) if not settings.GRAPH_DEBUG or force_overwrite: if os.path.exists(csv_filename): if os.path.exists(png_filename): return query = CPUJob.objects.filter( project=project, date__range=(start, end) ) query = query.values('account', 'account__username', 'date') query = query.annotate(Sum('cpu_usage')).order_by('account', 'date') t_start = start t_end = end start_str = start.strftime('%Y-%m-%d') end_str = end.strftime('%Y-%m-%d') fig, ax = plt.subplots(figsize=(6, 4)) ax.set_xlim(start, end + datetime.timedelta(days=1)) ax.set_title('%s %s - %s' % (project.pid, start_str, end_str)) ax.set_ylabel("CPU Time (hours)") ax.set_xlabel("Date") locator = mdates.AutoDateLocator() ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator)) ax.xaxis.set_minor_locator(mdates.DayLocator()) data = {} x_data = {} y_data = {} with open(csv_filename, 'w') as csv_file: csv_writer = csv.writer(csv_file) for row in query.iterator(): csv_writer.writerow([ row['account__username'], row['date'], row['cpu_usage__sum'] / 3600.00 ]) account = row['account'] date = row['date'] if account not in data: data[account] = {} x_data[account] = [] y_data[account] = [] data[account][date] = row['cpu_usage__sum'] for account, dates in six.iteritems(data): start = t_start end = t_end while start <= end: total = 0 if start in dates: total = dates[start] x_data[account].append(start) y_data[account].append(total / 3600.00) start = start + datetime.timedelta(days=1) del data totals = [] start = t_start end = t_end while start <= end: totals.append(0) start = start + datetime.timedelta(days=1) count = 0 for account in x_data.keys(): ax.bar( x_data[account], y_data[account], bottom=totals, color=graphs.get_colour(count), edgecolor=graphs.get_colour(count), align='edge') count = count + 1 i = 0 start = t_start end = t_end while start <= end: totals[i] += y_data[account][i] i = i + 1 start = start + datetime.timedelta(days=1) del x_data del y_data del totals fig.autofmt_xdate() plt.tight_layout() plt.savefig(png_filename) plt.close()
[ "def", "_gen_project_trend_graph", "(", "project", ",", "start", ",", "end", ",", "force_overwrite", "=", "False", ")", ":", "filename", "=", "graphs", ".", "get_project_trend_graph_filename", "(", "project", ",", "start", ",", "end", ")", "csv_filename", "=", ...
Generates a bar graph for a project Keyword arguments: project -- Project start -- start date end -- end date
[ "Generates", "a", "bar", "graph", "for", "a", "project" ]
python
train
27.415929
aws/sagemaker-python-sdk
src/sagemaker/amazon/common.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/amazon/common.py#L113-L150
def write_spmatrix_to_sparse_tensor(file, array, labels=None): """Writes a scipy sparse matrix to a sparse tensor""" if not issparse(array): raise TypeError("Array must be sparse") # Validate shape of array and labels, resolve array and label types if not len(array.shape) == 2: raise ValueError("Array must be a Matrix") if labels is not None: if not len(labels.shape) == 1: raise ValueError("Labels must be a Vector") if labels.shape[0] not in array.shape: raise ValueError("Label shape {} not compatible with array shape {}".format( labels.shape, array.shape)) resolved_label_type = _resolve_type(labels.dtype) resolved_type = _resolve_type(array.dtype) csr_array = array.tocsr() n_rows, n_cols = csr_array.shape record = Record() for row_idx in range(n_rows): record.Clear() row = csr_array.getrow(row_idx) # Write values _write_feature_tensor(resolved_type, record, row.data) # Write keys _write_keys_tensor(resolved_type, record, row.indices.astype(np.uint64)) # Write labels if labels is not None: _write_label_tensor(resolved_label_type, record, labels[row_idx]) # Write shape _write_shape(resolved_type, record, n_cols) _write_recordio(file, record.SerializeToString())
[ "def", "write_spmatrix_to_sparse_tensor", "(", "file", ",", "array", ",", "labels", "=", "None", ")", ":", "if", "not", "issparse", "(", "array", ")", ":", "raise", "TypeError", "(", "\"Array must be sparse\"", ")", "# Validate shape of array and labels, resolve array...
Writes a scipy sparse matrix to a sparse tensor
[ "Writes", "a", "scipy", "sparse", "matrix", "to", "a", "sparse", "tensor" ]
python
train
36.342105
Guake/guake
guake/prefs.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L907-L916
def on_palette_name_changed(self, combo): """Changes the value of palette in dconf """ palette_name = combo.get_active_text() if palette_name not in PALETTES: return self.settings.styleFont.set_string('palette', PALETTES[palette_name]) self.settings.styleFont.set_string('palette-name', palette_name) self.set_palette_colors(PALETTES[palette_name]) self.update_demo_palette(PALETTES[palette_name])
[ "def", "on_palette_name_changed", "(", "self", ",", "combo", ")", ":", "palette_name", "=", "combo", ".", "get_active_text", "(", ")", "if", "palette_name", "not", "in", "PALETTES", ":", "return", "self", ".", "settings", ".", "styleFont", ".", "set_string", ...
Changes the value of palette in dconf
[ "Changes", "the", "value", "of", "palette", "in", "dconf" ]
python
train
46.4
django-treebeard/django-treebeard
treebeard/mp_tree.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L1039-L1042
def get_root(self): """:returns: the root node for the current node object.""" return get_result_class(self.__class__).objects.get( path=self.path[0:self.steplen])
[ "def", "get_root", "(", "self", ")", ":", "return", "get_result_class", "(", "self", ".", "__class__", ")", ".", "objects", ".", "get", "(", "path", "=", "self", ".", "path", "[", "0", ":", "self", ".", "steplen", "]", ")" ]
:returns: the root node for the current node object.
[ ":", "returns", ":", "the", "root", "node", "for", "the", "current", "node", "object", "." ]
python
train
47
Cog-Creators/Red-Lavalink
lavalink/utils.py
https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/utils.py#L1-L6
def format_time(time): """ Formats the given time into HH:MM:SS """ h, r = divmod(time / 1000, 3600) m, s = divmod(r, 60) return "%02d:%02d:%02d" % (h, m, s)
[ "def", "format_time", "(", "time", ")", ":", "h", ",", "r", "=", "divmod", "(", "time", "/", "1000", ",", "3600", ")", "m", ",", "s", "=", "divmod", "(", "r", ",", "60", ")", "return", "\"%02d:%02d:%02d\"", "%", "(", "h", ",", "m", ",", "s", ...
Formats the given time into HH:MM:SS
[ "Formats", "the", "given", "time", "into", "HH", ":", "MM", ":", "SS" ]
python
train
28.166667
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L3351-L3379
def resize(self, size, interp='nearest'): """Resize the image. Parameters ---------- size : int, float, or tuple * int - Percentage of current size. * float - Fraction of current size. * tuple - Size of the output image. interp : :obj:`str`, optional Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic', or 'cubic') Returns ------- :obj:`PointCloudImage` The resized image. """ resized_data_0 = sm.imresize(self._data[:,:,0], size, interp=interp, mode='F') resized_data_1 = sm.imresize(self._data[:,:,1], size, interp=interp, mode='F') resized_data_2 = sm.imresize(self._data[:,:,2], size, interp=interp, mode='F') resized_data = np.zeros([resized_data_0.shape[0], resized_data_0.shape[1], self.channels]) resized_data[:,:,0] = resized_data_0 resized_data[:,:,1] = resized_data_1 resized_data[:,:,2] = resized_data_2 return PointCloudImage(resized_data, self._frame)
[ "def", "resize", "(", "self", ",", "size", ",", "interp", "=", "'nearest'", ")", ":", "resized_data_0", "=", "sm", ".", "imresize", "(", "self", ".", "_data", "[", ":", ",", ":", ",", "0", "]", ",", "size", ",", "interp", "=", "interp", ",", "mod...
Resize the image. Parameters ---------- size : int, float, or tuple * int - Percentage of current size. * float - Fraction of current size. * tuple - Size of the output image. interp : :obj:`str`, optional Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic', or 'cubic') Returns ------- :obj:`PointCloudImage` The resized image.
[ "Resize", "the", "image", "." ]
python
train
39.482759
jleclanche/fireplace
fireplace/dsl/evaluator.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/dsl/evaluator.py#L52-L60
def trigger(self, source): """ Triggers all actions meant to trigger on the board state from `source`. """ actions = self.evaluate(source) if actions: if not hasattr(actions, "__iter__"): actions = (actions, ) source.game.trigger_actions(source, actions)
[ "def", "trigger", "(", "self", ",", "source", ")", ":", "actions", "=", "self", ".", "evaluate", "(", "source", ")", "if", "actions", ":", "if", "not", "hasattr", "(", "actions", ",", "\"__iter__\"", ")", ":", "actions", "=", "(", "actions", ",", ")"...
Triggers all actions meant to trigger on the board state from `source`.
[ "Triggers", "all", "actions", "meant", "to", "trigger", "on", "the", "board", "state", "from", "source", "." ]
python
train
29.555556
adobe-apiplatform/umapi-client.py
umapi_client/connection.py
https://github.com/adobe-apiplatform/umapi-client.py/blob/1c446d79643cc8615adaa23e12dce3ac5782cf76/umapi_client/connection.py#L386-L412
def _execute_batch(self, actions): """ Execute a single batch of Actions. For each action that has a problem, we annotate the action with the error information for that action, and we return the number of successful actions in the batch. :param actions: the list of Action objects to be executed :return: count of successful actions """ wire_form = [a.wire_dict() for a in actions] if self.test_mode: result = self.make_call("/action/%s?testOnly=true" % self.org_id, wire_form) else: result = self.make_call("/action/%s" % self.org_id, wire_form) body = result.json() if body.get("errors", None) is None: if body.get("result") != "success": if self.logger: self.logger.warning("Server action result: no errors, but no success:\n%s", body) return len(actions) try: if body.get("result") == "success": if self.logger: self.logger.warning("Server action result: errors, but success report:\n%s", body) for error in body["errors"]: actions[error["index"]].report_command_error(error) except: raise ClientError(str(body), result) return body.get("completed", 0)
[ "def", "_execute_batch", "(", "self", ",", "actions", ")", ":", "wire_form", "=", "[", "a", ".", "wire_dict", "(", ")", "for", "a", "in", "actions", "]", "if", "self", ".", "test_mode", ":", "result", "=", "self", ".", "make_call", "(", "\"/action/%s?t...
Execute a single batch of Actions. For each action that has a problem, we annotate the action with the error information for that action, and we return the number of successful actions in the batch. :param actions: the list of Action objects to be executed :return: count of successful actions
[ "Execute", "a", "single", "batch", "of", "Actions", ".", "For", "each", "action", "that", "has", "a", "problem", "we", "annotate", "the", "action", "with", "the", "error", "information", "for", "that", "action", "and", "we", "return", "the", "number", "of"...
python
train
47.777778
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L160-L175
def role_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an IAM role exists. CLI Example: .. code-block:: bash salt myminion boto_iam.role_exists myirole ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.get_role(name) return True except boto.exception.BotoServerError: return False
[ "def", "role_exists", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", ...
Check to see if an IAM role exists. CLI Example: .. code-block:: bash salt myminion boto_iam.role_exists myirole
[ "Check", "to", "see", "if", "an", "IAM", "role", "exists", "." ]
python
train
25.25
Alignak-monitoring/alignak
alignak/scheduler.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L503-L520
def add_notification(self, notification): """Add a notification into actions list :param notification: notification to add :type notification: alignak.notification.Notification :return: None """ if notification.uuid in self.actions: logger.warning("Already existing notification: %s", notification) return logger.debug("Adding a notification: %s", notification) self.actions[notification.uuid] = notification self.nb_notifications += 1 # A notification which is not a master one asks for a brok if notification.contact is not None: self.add(notification.get_initial_status_brok())
[ "def", "add_notification", "(", "self", ",", "notification", ")", ":", "if", "notification", ".", "uuid", "in", "self", ".", "actions", ":", "logger", ".", "warning", "(", "\"Already existing notification: %s\"", ",", "notification", ")", "return", "logger", "."...
Add a notification into actions list :param notification: notification to add :type notification: alignak.notification.Notification :return: None
[ "Add", "a", "notification", "into", "actions", "list" ]
python
train
38.333333
ibis-project/ibis
ibis/expr/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1642-L1656
def geo_perimeter(arg, use_spheroid=None): """ Compute perimeter of a geo spatial data Parameters ---------- arg : geometry or geography use_spheroid : default None Returns ------- perimeter : double scalar """ op = ops.GeoPerimeter(arg, use_spheroid) return op.to_expr()
[ "def", "geo_perimeter", "(", "arg", ",", "use_spheroid", "=", "None", ")", ":", "op", "=", "ops", ".", "GeoPerimeter", "(", "arg", ",", "use_spheroid", ")", "return", "op", ".", "to_expr", "(", ")" ]
Compute perimeter of a geo spatial data Parameters ---------- arg : geometry or geography use_spheroid : default None Returns ------- perimeter : double scalar
[ "Compute", "perimeter", "of", "a", "geo", "spatial", "data" ]
python
train
20.466667
Ceasar/staticjinja
staticjinja/staticjinja.py
https://github.com/Ceasar/staticjinja/blob/57b8cac81da7fee3387510af4843e1bd1fd3ba28/staticjinja/staticjinja.py#L321-L338
def is_template(self, filename): """Check if a file is a template. A file is a considered a template if it is neither a partial nor ignored. :param filename: the name of the file to check """ if self.is_partial(filename): return False if self.is_ignored(filename): return False if self.is_static(filename): return False return True
[ "def", "is_template", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "is_partial", "(", "filename", ")", ":", "return", "False", "if", "self", ".", "is_ignored", "(", "filename", ")", ":", "return", "False", "if", "self", ".", "is_static", ...
Check if a file is a template. A file is a considered a template if it is neither a partial nor ignored. :param filename: the name of the file to check
[ "Check", "if", "a", "file", "is", "a", "template", "." ]
python
train
23.722222
saltstack/salt
salt/modules/mdata.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mdata.py#L158-L183
def delete_(*keyname): ''' Delete metadata prop : string name of property CLI Example: .. code-block:: bash salt '*' mdata.get salt:role salt '*' mdata.get user-script salt:role ''' mdata = _check_mdata_delete() valid_keynames = list_() ret = {} for k in keyname: if mdata and k in valid_keynames: cmd = '{0} {1}'.format(mdata, k) ret[k] = __salt__['cmd.run_all'](cmd, ignore_retcode=True)['retcode'] == 0 else: ret[k] = True return ret
[ "def", "delete_", "(", "*", "keyname", ")", ":", "mdata", "=", "_check_mdata_delete", "(", ")", "valid_keynames", "=", "list_", "(", ")", "ret", "=", "{", "}", "for", "k", "in", "keyname", ":", "if", "mdata", "and", "k", "in", "valid_keynames", ":", ...
Delete metadata prop : string name of property CLI Example: .. code-block:: bash salt '*' mdata.get salt:role salt '*' mdata.get user-script salt:role
[ "Delete", "metadata" ]
python
train
20.653846
prompt-toolkit/pyvim
pyvim/commands/commands.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/commands.py#L229-L241
def _buffer(editor, variables, force=False): """ Go to one of the open buffers. """ eb = editor.window_arrangement.active_editor_buffer force = bool(variables['force']) buffer_name = variables.get('buffer_name') if buffer_name: if not force and eb.has_unsaved_changes: editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT) else: editor.window_arrangement.go_to_buffer(buffer_name)
[ "def", "_buffer", "(", "editor", ",", "variables", ",", "force", "=", "False", ")", ":", "eb", "=", "editor", ".", "window_arrangement", ".", "active_editor_buffer", "force", "=", "bool", "(", "variables", "[", "'force'", "]", ")", "buffer_name", "=", "var...
Go to one of the open buffers.
[ "Go", "to", "one", "of", "the", "open", "buffers", "." ]
python
train
33.615385
waqasbhatti/astrobase
astrobase/magnitudes.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/magnitudes.py#L296-L316
def jhk_to_sdssg(jmag,hmag,kmag): '''Converts given J, H, Ks mags to an SDSS g magnitude value. Parameters ---------- jmag,hmag,kmag : float 2MASS J, H, Ks mags of the object. Returns ------- float The converted SDSS g band magnitude. ''' return convert_constants(jmag,hmag,kmag, SDSSG_JHK, SDSSG_JH, SDSSG_JK, SDSSG_HK, SDSSG_J, SDSSG_H, SDSSG_K)
[ "def", "jhk_to_sdssg", "(", "jmag", ",", "hmag", ",", "kmag", ")", ":", "return", "convert_constants", "(", "jmag", ",", "hmag", ",", "kmag", ",", "SDSSG_JHK", ",", "SDSSG_JH", ",", "SDSSG_JK", ",", "SDSSG_HK", ",", "SDSSG_J", ",", "SDSSG_H", ",", "SDSSG...
Converts given J, H, Ks mags to an SDSS g magnitude value. Parameters ---------- jmag,hmag,kmag : float 2MASS J, H, Ks mags of the object. Returns ------- float The converted SDSS g band magnitude.
[ "Converts", "given", "J", "H", "Ks", "mags", "to", "an", "SDSS", "g", "magnitude", "value", "." ]
python
valid
22.47619
pandas-dev/pandas
pandas/core/internals/blocks.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1952-L1979
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] # see gh-13418: no special formatting is desired at the # output (important for appropriate 'quoting' behaviour), # so do not pass it through the FloatArrayFormatter if float_format is None and decimal == '.': mask = isna(values) if not quoting: values = values.astype(str) else: values = np.array(values, dtype='object') values[mask] = na_rep return values from pandas.io.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter(values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False) return formatter.get_result_as_array()
[ "def", "to_native_types", "(", "self", ",", "slicer", "=", "None", ",", "na_rep", "=", "''", ",", "float_format", "=", "None", ",", "decimal", "=", "'.'", ",", "quoting", "=", "None", ",", "*", "*", "kwargs", ")", ":", "values", "=", "self", ".", "...
convert to our native types format, slicing if desired
[ "convert", "to", "our", "native", "types", "format", "slicing", "if", "desired" ]
python
train
40.535714
resonai/ybt
yabt/docker.py
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/docker.py#L152-L181
def handle_build_cache( conf: Config, name: str, tag: str, icb: ImageCachingBehavior): """Handle Docker image build cache. Return image ID if image is cached, and there's no need to redo the build. Return None if need to build the image (whether cached locally or not). Raise RuntimeError if not allowed to build the image because of state of local cache. TODO(itamar): figure out a better name for this function, that reflects what it returns (e.g. `get_cached_image_id`), without "surprising" the caller with the potential of long and non-trivial operations that are not usually expected from functions with such names. """ if icb.pull_if_cached or (icb.pull_if_not_cached and get_cached_image_id(icb.remote_image) is None): try: pull_docker_image(icb.remote_image, conf.docker_pull_cmd) except CalledProcessError: pass local_image = '{}:{}'.format(name, tag) if (icb.skip_build_if_cached and get_cached_image_id(icb.remote_image) is not None): tag_docker_image(icb.remote_image, local_image) return get_cached_image_id(local_image) if ((not icb.allow_build_if_not_cached) and get_cached_image_id(icb.remote_image) is None): raise RuntimeError('No cached image for {}'.format(local_image)) return None
[ "def", "handle_build_cache", "(", "conf", ":", "Config", ",", "name", ":", "str", ",", "tag", ":", "str", ",", "icb", ":", "ImageCachingBehavior", ")", ":", "if", "icb", ".", "pull_if_cached", "or", "(", "icb", ".", "pull_if_not_cached", "and", "get_cached...
Handle Docker image build cache. Return image ID if image is cached, and there's no need to redo the build. Return None if need to build the image (whether cached locally or not). Raise RuntimeError if not allowed to build the image because of state of local cache. TODO(itamar): figure out a better name for this function, that reflects what it returns (e.g. `get_cached_image_id`), without "surprising" the caller with the potential of long and non-trivial operations that are not usually expected from functions with such names.
[ "Handle", "Docker", "image", "build", "cache", "." ]
python
train
45.533333
jobovy/galpy
galpy/util/bovy_coords.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L871-L903
def cov_dvrpmllbb_to_vxyz_single(d,e_d,e_vr,pmll,pmbb,cov_pmllbb,l,b): """ NAME: cov_dvrpmllbb_to_vxyz PURPOSE: propagate distance, radial velocity, and proper motion uncertainties to Galactic coordinates for scalar inputs INPUT: d - distance [kpc, as/mas for plx] e_d - distance uncertainty [kpc, [as/mas] for plx] e_vr - low velocity uncertainty [km/s] pmll - proper motion in l (*cos(b)) [ [as/mas]/yr ] pmbb - proper motion in b [ [as/mas]/yr ] cov_pmllbb - uncertainty covariance for proper motion l - Galactic longitude [rad] b - Galactic lattitude [rad] OUTPUT: cov(vx,vy,vz) [3,3] HISTORY: 2010-04-12 - Written - Bovy (NYU) """ M= _K*sc.array([[pmll,d,0.],[pmbb,0.,d]]) cov_dpmllbb= sc.zeros((3,3)) cov_dpmllbb[0,0]= e_d**2. cov_dpmllbb[1:3,1:3]= cov_pmllbb cov_vlvb= sc.dot(M,sc.dot(cov_dpmllbb,M.T)) cov_vrvlvb= sc.zeros((3,3)) cov_vrvlvb[0,0]= e_vr**2. cov_vrvlvb[1:3,1:3]= cov_vlvb R= sc.array([[m.cos(l)*m.cos(b), m.sin(l)*m.cos(b), m.sin(b)], [-m.sin(l),m.cos(l),0.], [-m.cos(l)*m.sin(b),-m.sin(l)*m.sin(b), m.cos(b)]]) return sc.dot(R.T,sc.dot(cov_vrvlvb,R))
[ "def", "cov_dvrpmllbb_to_vxyz_single", "(", "d", ",", "e_d", ",", "e_vr", ",", "pmll", ",", "pmbb", ",", "cov_pmllbb", ",", "l", ",", "b", ")", ":", "M", "=", "_K", "*", "sc", ".", "array", "(", "[", "[", "pmll", ",", "d", ",", "0.", "]", ",", ...
NAME: cov_dvrpmllbb_to_vxyz PURPOSE: propagate distance, radial velocity, and proper motion uncertainties to Galactic coordinates for scalar inputs INPUT: d - distance [kpc, as/mas for plx] e_d - distance uncertainty [kpc, [as/mas] for plx] e_vr - low velocity uncertainty [km/s] pmll - proper motion in l (*cos(b)) [ [as/mas]/yr ] pmbb - proper motion in b [ [as/mas]/yr ] cov_pmllbb - uncertainty covariance for proper motion l - Galactic longitude [rad] b - Galactic lattitude [rad] OUTPUT: cov(vx,vy,vz) [3,3] HISTORY: 2010-04-12 - Written - Bovy (NYU)
[ "NAME", ":", "cov_dvrpmllbb_to_vxyz", "PURPOSE", ":", "propagate", "distance", "radial", "velocity", "and", "proper", "motion", "uncertainties", "to", "Galactic", "coordinates", "for", "scalar", "inputs", "INPUT", ":", "d", "-", "distance", "[", "kpc", "as", "/"...
python
train
37.424242
Clinical-Genomics/scout
scout/parse/ensembl.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/ensembl.py#L340-L395
def parse_ensembl_exon_request(result): """Parse a dataframe with ensembl exon information Args: res(pandas.DataFrame) Yields: gene_info(dict) """ keys = [ 'chrom', 'gene', 'transcript', 'exon_id', 'exon_chrom_start', 'exon_chrom_end', '5_utr_start', '5_utr_end', '3_utr_start', '3_utr_end', 'strand', 'rank' ] # for res in result.itertuples(): for res in zip(result['Chromosome/scaffold name'], result['Gene stable ID'], result['Transcript stable ID'], result['Exon stable ID'], result['Exon region start (bp)'], result['Exon region end (bp)'], result["5' UTR start"], result["5' UTR end"], result["3' UTR start"], result["3' UTR end"], result["Strand"], result["Exon rank in transcript"]): ensembl_info = dict(zip(keys, res)) # Recalculate start and stop (taking UTR regions into account for end exons) if ensembl_info['strand'] == 1: # highest position: start of exon or end of 5' UTR # If no 5' UTR make sure exon_start is allways choosen start = max(ensembl_info['exon_chrom_start'], ensembl_info['5_utr_end'] or -1) # lowest position: end of exon or start of 3' UTR end = min(ensembl_info['exon_chrom_end'], ensembl_info['3_utr_start'] or float('inf')) elif ensembl_info['strand'] == -1: # highest position: start of exon or end of 3' UTR start = max(ensembl_info['exon_chrom_start'], ensembl_info['3_utr_end'] or -1) # lowest position: end of exon or start of 5' UTR end = min(ensembl_info['exon_chrom_end'], ensembl_info['5_utr_start'] or float('inf')) ensembl_info['start'] = start ensembl_info['end'] = end yield ensembl_info
[ "def", "parse_ensembl_exon_request", "(", "result", ")", ":", "keys", "=", "[", "'chrom'", ",", "'gene'", ",", "'transcript'", ",", "'exon_id'", ",", "'exon_chrom_start'", ",", "'exon_chrom_end'", ",", "'5_utr_start'", ",", "'5_utr_end'", ",", "'3_utr_start'", ","...
Parse a dataframe with ensembl exon information Args: res(pandas.DataFrame) Yields: gene_info(dict)
[ "Parse", "a", "dataframe", "with", "ensembl", "exon", "information" ]
python
test
35.767857
edx/edx-django-utils
edx_django_utils/monitoring/middleware.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/middleware.py#L145-L154
def _log_prefix(self, prefix, request): """ Returns a formatted prefix for logging for the given request. """ # After a celery task runs, the request cache is cleared. So if celery # tasks are running synchronously (CELERY_ALWAYS _EAGER), "guid_key" # will no longer be in the request cache when process_response executes. cached_guid_response = self._cache.get_cached_response(self.guid_key) cached_guid = cached_guid_response.get_value_or_default(u"without_guid") return u"{} request '{} {} {}'".format(prefix, request.method, request.path, cached_guid)
[ "def", "_log_prefix", "(", "self", ",", "prefix", ",", "request", ")", ":", "# After a celery task runs, the request cache is cleared. So if celery", "# tasks are running synchronously (CELERY_ALWAYS _EAGER), \"guid_key\"", "# will no longer be in the request cache when process_response exec...
Returns a formatted prefix for logging for the given request.
[ "Returns", "a", "formatted", "prefix", "for", "logging", "for", "the", "given", "request", "." ]
python
train
61.8
buildbot/buildbot
master/buildbot/www/service.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/www/service.py#L91-L108
def updateSession(self, request): """ Update the cookie after session object was modified @param request: the request object which should get a new cookie """ # we actually need to copy some hardcoded constants from twisted :-( # Make sure we aren't creating a secure session on a non-secure page secure = request.isSecure() if not secure: cookieString = b"TWISTED_SESSION" else: cookieString = b"TWISTED_SECURE_SESSION" cookiename = b"_".join([cookieString] + request.sitepath) request.addCookie(cookiename, self.uid, path=b"/", secure=secure)
[ "def", "updateSession", "(", "self", ",", "request", ")", ":", "# we actually need to copy some hardcoded constants from twisted :-(", "# Make sure we aren't creating a secure session on a non-secure page", "secure", "=", "request", ".", "isSecure", "(", ")", "if", "not", "secu...
Update the cookie after session object was modified @param request: the request object which should get a new cookie
[ "Update", "the", "cookie", "after", "session", "object", "was", "modified" ]
python
train
37.111111
odlgroup/odl
odl/util/ufuncs.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/ufuncs.py#L164-L174
def max(self, axis=None, dtype=None, out=None, keepdims=False): """Return the maximum of ``self``. See Also -------- numpy.amax min """ return self.elem.__array_ufunc__( np.maximum, 'reduce', self.elem, axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
[ "def", "max", "(", "self", ",", "axis", "=", "None", ",", "dtype", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "False", ")", ":", "return", "self", ".", "elem", ".", "__array_ufunc__", "(", "np", ".", "maximum", ",", "'reduce'", ","...
Return the maximum of ``self``. See Also -------- numpy.amax min
[ "Return", "the", "maximum", "of", "self", "." ]
python
train
29.818182
viralogic/py-enumerable
py_linq/py_linq.py
https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq.py#L428-L437
def any(self, predicate): """ Returns true if any elements that satisfy predicate are found :param predicate: condition to satisfy as lambda expression :return: boolean True or False """ if predicate is None: raise NullArgumentError( u"predicate lambda expression is necessary") return self.where(predicate).count() > 0
[ "def", "any", "(", "self", ",", "predicate", ")", ":", "if", "predicate", "is", "None", ":", "raise", "NullArgumentError", "(", "u\"predicate lambda expression is necessary\"", ")", "return", "self", ".", "where", "(", "predicate", ")", ".", "count", "(", ")",...
Returns true if any elements that satisfy predicate are found :param predicate: condition to satisfy as lambda expression :return: boolean True or False
[ "Returns", "true", "if", "any", "elements", "that", "satisfy", "predicate", "are", "found", ":", "param", "predicate", ":", "condition", "to", "satisfy", "as", "lambda", "expression", ":", "return", ":", "boolean", "True", "or", "False" ]
python
train
39.4
tensorflow/tensor2tensor
tensor2tensor/data_generators/text_problems.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_problems.py#L607-L611
def txt_line_iterator(txt_path): """Iterate through lines of file.""" with tf.gfile.Open(txt_path) as f: for line in f: yield line.strip()
[ "def", "txt_line_iterator", "(", "txt_path", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "txt_path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "yield", "line", ".", "strip", "(", ")" ]
Iterate through lines of file.
[ "Iterate", "through", "lines", "of", "file", "." ]
python
train
29.6
ChrisTimperley/Kaskara
python/kaskara/functions.py
https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/functions.py#L97-L106
def encloses(self, location: FileLocation ) -> Optional[FunctionDesc]: """ Returns the function, if any, that encloses a given location. """ for func in self.in_file(location.filename): if location in func.location: return func return None
[ "def", "encloses", "(", "self", ",", "location", ":", "FileLocation", ")", "->", "Optional", "[", "FunctionDesc", "]", ":", "for", "func", "in", "self", ".", "in_file", "(", "location", ".", "filename", ")", ":", "if", "location", "in", "func", ".", "l...
Returns the function, if any, that encloses a given location.
[ "Returns", "the", "function", "if", "any", "that", "encloses", "a", "given", "location", "." ]
python
train
33.2
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py#L160-L168
def find_datastore_by_name(self, si, path, name): """ Finds datastore in the vCenter or returns "None" :param si: pyvmomi 'ServiceInstance' :param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') :param name: the datastore name to return """ return self.find_obj_by_path(si, path, name, self.Datastore)
[ "def", "find_datastore_by_name", "(", "self", ",", "si", ",", "path", ",", "name", ")", ":", "return", "self", ".", "find_obj_by_path", "(", "si", ",", "path", ",", "name", ",", "self", ".", "Datastore", ")" ]
Finds datastore in the vCenter or returns "None" :param si: pyvmomi 'ServiceInstance' :param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') :param name: the datastore name to return
[ "Finds", "datastore", "in", "the", "vCenter", "or", "returns", "None" ]
python
train
45.222222
rdeits/meshcat-python
src/meshcat/animation.py
https://github.com/rdeits/meshcat-python/blob/aa3865143120f5ace8e62aab71d825e33674d277/src/meshcat/animation.py#L132-L165
def convert_frames_to_video(tar_file_path, output_path="output.mp4", framerate=60, overwrite=False): """ Try to convert a tar file containing a sequence of frames saved by the meshcat viewer into a single video file. This relies on having `ffmpeg` installed on your system. """ output_path = os.path.abspath(output_path) if os.path.isfile(output_path) and not overwrite: raise ValueError("The output path {:s} already exists. To overwrite that file, you can pass overwrite=True to this function.".format(output_path)) with tempfile.TemporaryDirectory() as tmp_dir: with tarfile.open(tar_file_path) as tar: tar.extractall(tmp_dir) args = ["ffmpeg", "-r", str(framerate), "-i", r"%07d.png", "-vcodec", "libx264", "-preset", "slow", "-crf", "18"] if overwrite: args.append("-y") args.append(output_path) try: subprocess.check_call(args, cwd=tmp_dir) except subprocess.CalledProcessError as e: print(""" Could not call `ffmpeg` to convert your frames into a video. If you want to convert the frames manually, you can extract the .tar archive into a directory, cd to that directory, and run: ffmpeg -r 60 -i %07d.png \\\n\t -vcodec libx264 \\\n\t -preset slow \\\n\t -crf 18 \\\n\t output.mp4 """) raise print("Saved output as {:s}".format(output_path)) return output_path
[ "def", "convert_frames_to_video", "(", "tar_file_path", ",", "output_path", "=", "\"output.mp4\"", ",", "framerate", "=", "60", ",", "overwrite", "=", "False", ")", ":", "output_path", "=", "os", ".", "path", ".", "abspath", "(", "output_path", ")", "if", "o...
Try to convert a tar file containing a sequence of frames saved by the meshcat viewer into a single video file. This relies on having `ffmpeg` installed on your system.
[ "Try", "to", "convert", "a", "tar", "file", "containing", "a", "sequence", "of", "frames", "saved", "by", "the", "meshcat", "viewer", "into", "a", "single", "video", "file", "." ]
python
valid
43.735294
google/tangent
tangent/optimization.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/optimization.py#L62-L88
def dead_code_elimination(node): """Perform a simple form of dead code elimination on a Python AST. This method performs reaching definitions analysis on all function definitions. It then looks for the definition of variables that are not used elsewhere and removes those definitions. This function takes into consideration push and pop statements; if a pop statement is removed, it will also try to remove the accompanying push statement. Note that this *requires dead code elimination to be performed on the primal and adjoint simultaneously*. Args: node: The AST to optimize. Returns: The optimized AST. """ to_remove = set(def_[1] for def_ in annotate.unused(node) if not isinstance(def_[1], (gast.arguments, gast.For))) for n in list(to_remove): for succ in gast.walk(n): if anno.getanno(succ, 'push', False): to_remove.add(anno.getanno(succ, 'push')) transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
[ "def", "dead_code_elimination", "(", "node", ")", ":", "to_remove", "=", "set", "(", "def_", "[", "1", "]", "for", "def_", "in", "annotate", ".", "unused", "(", "node", ")", "if", "not", "isinstance", "(", "def_", "[", "1", "]", ",", "(", "gast", "...
Perform a simple form of dead code elimination on a Python AST. This method performs reaching definitions analysis on all function definitions. It then looks for the definition of variables that are not used elsewhere and removes those definitions. This function takes into consideration push and pop statements; if a pop statement is removed, it will also try to remove the accompanying push statement. Note that this *requires dead code elimination to be performed on the primal and adjoint simultaneously*. Args: node: The AST to optimize. Returns: The optimized AST.
[ "Perform", "a", "simple", "form", "of", "dead", "code", "elimination", "on", "a", "Python", "AST", "." ]
python
train
36.518519
aiortc/aiortc
aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcdtlstransport.py#L378-L459
async def start(self, remoteParameters): """ Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param: remoteParameters: An :class:`RTCDtlsParameters`. """ assert self._state == State.NEW assert len(remoteParameters.fingerprints) if self.transport.role == 'controlling': self._role = 'server' lib.SSL_set_accept_state(self.ssl) else: self._role = 'client' lib.SSL_set_connect_state(self.ssl) self._set_state(State.CONNECTING) try: while not self.encrypted: result = lib.SSL_do_handshake(self.ssl) await self._write_ssl() if result > 0: self.encrypted = True break error = lib.SSL_get_error(self.ssl, result) if error == lib.SSL_ERROR_WANT_READ: await self._recv_next() else: self.__log_debug('x DTLS handshake failed (error %d)', error) for info in get_error_queue(): self.__log_debug('x %s', ':'.join(info)) self._set_state(State.FAILED) return except ConnectionError: self.__log_debug('x DTLS handshake failed (connection error)') self._set_state(State.FAILED) return # check remote fingerprint x509 = lib.SSL_get_peer_certificate(self.ssl) remote_fingerprint = certificate_digest(x509) fingerprint_is_valid = False for f in remoteParameters.fingerprints: if f.algorithm.lower() == 'sha-256' and f.value.lower() == remote_fingerprint.lower(): fingerprint_is_valid = True break if not fingerprint_is_valid: self.__log_debug('x DTLS handshake failed (fingerprint mismatch)') self._set_state(State.FAILED) return # generate keying material buf = ffi.new('unsigned char[]', 2 * (SRTP_KEY_LEN + SRTP_SALT_LEN)) extractor = b'EXTRACTOR-dtls_srtp' _openssl_assert(lib.SSL_export_keying_material( self.ssl, buf, len(buf), extractor, len(extractor), ffi.NULL, 0, 0) == 1) view = ffi.buffer(buf) if self._role == 'server': srtp_tx_key = get_srtp_key_salt(view, 1) srtp_rx_key = get_srtp_key_salt(view, 0) else: srtp_tx_key = get_srtp_key_salt(view, 0) srtp_rx_key = get_srtp_key_salt(view, 1) rx_policy = Policy(key=srtp_rx_key, ssrc_type=Policy.SSRC_ANY_INBOUND) rx_policy.allow_repeat_tx = True rx_policy.window_size = 1024 self._rx_srtp = Session(rx_policy) tx_policy = Policy(key=srtp_tx_key, ssrc_type=Policy.SSRC_ANY_OUTBOUND) tx_policy.allow_repeat_tx = True tx_policy.window_size = 1024 self._tx_srtp = Session(tx_policy) # start data pump self.__log_debug('- DTLS handshake complete') self._set_state(State.CONNECTED) self._task = asyncio.ensure_future(self.__run())
[ "async", "def", "start", "(", "self", ",", "remoteParameters", ")", ":", "assert", "self", ".", "_state", "==", "State", ".", "NEW", "assert", "len", "(", "remoteParameters", ".", "fingerprints", ")", "if", "self", ".", "transport", ".", "role", "==", "'...
Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param: remoteParameters: An :class:`RTCDtlsParameters`.
[ "Start", "DTLS", "transport", "negotiation", "with", "the", "parameters", "of", "the", "remote", "DTLS", "transport", "." ]
python
train
38.073171
romanvm/django-tinymce4-lite
tinymce/settings.py
https://github.com/romanvm/django-tinymce4-lite/blob/3b9221db5f0327e1e08c79b7b8cdbdcb1848a390/tinymce/settings.py#L12-L23
def is_managed(): """ Check if a Django project is being managed with ``manage.py`` or ``django-admin`` scripts :return: Check result :rtype: bool """ for item in sys.argv: if re.search(r'manage.py|django-admin|django', item) is not None: return True return False
[ "def", "is_managed", "(", ")", ":", "for", "item", "in", "sys", ".", "argv", ":", "if", "re", ".", "search", "(", "r'manage.py|django-admin|django'", ",", "item", ")", "is", "not", "None", ":", "return", "True", "return", "False" ]
Check if a Django project is being managed with ``manage.py`` or ``django-admin`` scripts :return: Check result :rtype: bool
[ "Check", "if", "a", "Django", "project", "is", "being", "managed", "with", "manage", ".", "py", "or", "django", "-", "admin", "scripts" ]
python
train
25.416667
wandb/client
wandb/vendor/prompt_toolkit/cache.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/cache.py#L96-L111
def memoized(maxsize=1024): """ Momoization decorator for immutable classes and pure functions. """ cache = SimpleCache(maxsize=maxsize) def decorator(obj): @wraps(obj) def new_callable(*a, **kw): def create_new(): return obj(*a, **kw) key = (a, tuple(kw.items())) return cache.get(key, create_new) return new_callable return decorator
[ "def", "memoized", "(", "maxsize", "=", "1024", ")", ":", "cache", "=", "SimpleCache", "(", "maxsize", "=", "maxsize", ")", "def", "decorator", "(", "obj", ")", ":", "@", "wraps", "(", "obj", ")", "def", "new_callable", "(", "*", "a", ",", "*", "*"...
Momoization decorator for immutable classes and pure functions.
[ "Momoization", "decorator", "for", "immutable", "classes", "and", "pure", "functions", "." ]
python
train
26.375
ncrocfer/similar
similar/similar.py
https://github.com/ncrocfer/similar/blob/94adde7d6f4fa924a6fc3e6be2dee62e48d0f608/similar/similar.py#L24-L37
def results(self): """ Returns a list of tuple, ordered by similarity. """ d = dict() words = [word.strip() for word in self.haystack] if not words: raise NoResultException('No similar word found.') for w in words: d[w] = Levenshtein.ratio(self.needle, w) return sorted(d.items(), key=operator.itemgetter(1), reverse=True)
[ "def", "results", "(", "self", ")", ":", "d", "=", "dict", "(", ")", "words", "=", "[", "word", ".", "strip", "(", ")", "for", "word", "in", "self", ".", "haystack", "]", "if", "not", "words", ":", "raise", "NoResultException", "(", "'No similar word...
Returns a list of tuple, ordered by similarity.
[ "Returns", "a", "list", "of", "tuple", "ordered", "by", "similarity", "." ]
python
train
28.571429
azraq27/neural
neural/general.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/general.py#L24-L30
def voxel_loop(self): '''iterator that loops through each voxel and yields the coords and time series as a tuple''' # Prob not the most efficient, but the best I can do for now: for x in xrange(len(self.data)): for y in xrange(len(self.data[x])): for z in xrange(len(self.data[x][y])): yield ((x,y,z),self.data[x][y][z])
[ "def", "voxel_loop", "(", "self", ")", ":", "# Prob not the most efficient, but the best I can do for now:", "for", "x", "in", "xrange", "(", "len", "(", "self", ".", "data", ")", ")", ":", "for", "y", "in", "xrange", "(", "len", "(", "self", ".", "data", ...
iterator that loops through each voxel and yields the coords and time series as a tuple
[ "iterator", "that", "loops", "through", "each", "voxel", "and", "yields", "the", "coords", "and", "time", "series", "as", "a", "tuple" ]
python
train
55.142857
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1839-L1850
def get_font_options(self): """Retrieves font rendering options set via :meth:`set_font_options`. Note that the returned options do not include any options derived from the underlying surface; they are literally the options passed to :meth:`set_font_options`. :return: A new :class:`FontOptions` object. """ font_options = FontOptions() cairo.cairo_get_font_options(self._pointer, font_options._pointer) return font_options
[ "def", "get_font_options", "(", "self", ")", ":", "font_options", "=", "FontOptions", "(", ")", "cairo", ".", "cairo_get_font_options", "(", "self", ".", "_pointer", ",", "font_options", ".", "_pointer", ")", "return", "font_options" ]
Retrieves font rendering options set via :meth:`set_font_options`. Note that the returned options do not include any options derived from the underlying surface; they are literally the options passed to :meth:`set_font_options`. :return: A new :class:`FontOptions` object.
[ "Retrieves", "font", "rendering", "options", "set", "via", ":", "meth", ":", "set_font_options", ".", "Note", "that", "the", "returned", "options", "do", "not", "include", "any", "options", "derived", "from", "the", "underlying", "surface", ";", "they", "are",...
python
train
40.5
mitsei/dlkit
dlkit/json_/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L6241-L6262
def is_child_of_objective_bank(self, id_, objective_bank_id): """Tests if an objective bank is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: objective_bank_id (osid.id.Id): the ``Id`` of an objective bank return: (boolean) - ``true`` if the ``id`` is a child of ``objective_bank_id,`` ``false`` otherwise raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``id`` or ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_child_of_bin if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=objective_bank_id) return self._hierarchy_session.is_child(id_=objective_bank_id, child_id=id_)
[ "def", "is_child_of_objective_bank", "(", "self", ",", "id_", ",", "objective_bank_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_child_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", "...
Tests if an objective bank is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: objective_bank_id (osid.id.Id): the ``Id`` of an objective bank return: (boolean) - ``true`` if the ``id`` is a child of ``objective_bank_id,`` ``false`` otherwise raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``id`` or ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "an", "objective", "bank", "is", "a", "direct", "child", "of", "another", "." ]
python
train
51.181818
toumorokoshi/sprinter
sprinter/formula/perforce.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/formula/perforce.py#L144-L164
def __install_perforce(self, config): """ install perforce binary """ if not system.is_64_bit(): self.logger.warn("Perforce formula is only designed for 64 bit systems! Not install executables...") return False version = config.get('version', 'r13.2') key = 'osx' if system.is_osx() else 'linux' perforce_packages = package_dict[version][key] d = self.directory.install_directory(self.feature_name) if not os.path.exists(d): os.makedirs(d) self.logger.info("Downloading p4 executable...") with open(os.path.join(d, "p4"), 'wb+') as fh: fh.write(lib.cleaned_request('get', url_prefix + perforce_packages['p4']).content) self.directory.symlink_to_bin("p4", os.path.join(d, "p4")) self.p4_command = os.path.join(d, "p4") self.logger.info("Installing p4v...") if system.is_osx(): return self._install_p4v_osx(url_prefix + perforce_packages['p4v']) else: return self._install_p4v_linux(url_prefix + perforce_packages['p4v'])
[ "def", "__install_perforce", "(", "self", ",", "config", ")", ":", "if", "not", "system", ".", "is_64_bit", "(", ")", ":", "self", ".", "logger", ".", "warn", "(", "\"Perforce formula is only designed for 64 bit systems! Not install executables...\"", ")", "return", ...
install perforce binary
[ "install", "perforce", "binary" ]
python
train
51.571429
edx/i18n-tools
i18n/dummy.py
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L217-L221
def new_filename(original_filename, new_locale): """Returns a filename derived from original_filename, using new_locale as the locale""" orig_file = Path(original_filename) new_file = orig_file.parent.parent.parent / new_locale / orig_file.parent.name / orig_file.name return new_file.abspath()
[ "def", "new_filename", "(", "original_filename", ",", "new_locale", ")", ":", "orig_file", "=", "Path", "(", "original_filename", ")", "new_file", "=", "orig_file", ".", "parent", ".", "parent", ".", "parent", "/", "new_locale", "/", "orig_file", ".", "parent"...
Returns a filename derived from original_filename, using new_locale as the locale
[ "Returns", "a", "filename", "derived", "from", "original_filename", "using", "new_locale", "as", "the", "locale" ]
python
train
61.2
blockstack-packages/keychain-manager-py
keychain/utils.py
https://github.com/blockstack-packages/keychain-manager-py/blob/c15c4ed8f3ed155f71ccac7c13ee08f081d38c06/keychain/utils.py#L41-L55
def bip32_deserialize(data): """ Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin """ dbin = changebase(data, 58, 256) if bin_dbl_sha256(dbin[:-4])[:4] != dbin[-4:]: raise Exception("Invalid checksum") vbytes = dbin[0:4] depth = from_byte_to_int(dbin[4]) fingerprint = dbin[5:9] i = decode(dbin[9:13], 256) chaincode = dbin[13:45] key = dbin[46:78]+b'\x01' if vbytes in PRIVATE else dbin[45:78] return (vbytes, depth, fingerprint, i, chaincode, key)
[ "def", "bip32_deserialize", "(", "data", ")", ":", "dbin", "=", "changebase", "(", "data", ",", "58", ",", "256", ")", "if", "bin_dbl_sha256", "(", "dbin", "[", ":", "-", "4", "]", ")", "[", ":", "4", "]", "!=", "dbin", "[", "-", "4", ":", "]",...
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin
[ "Derived", "from", "code", "from", "pybitcointools", "(", "https", ":", "//", "github", ".", "com", "/", "vbuterin", "/", "pybitcointools", ")", "by", "Vitalik", "Buterin" ]
python
test
36.533333
googleapis/google-cloud-python
logging/google/cloud/logging/_http.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_http.py#L354-L391
def list_metrics(self, project, page_size=None, page_token=None): """List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type project: str :param project: ID of the project whose metrics are to be listed. :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current API. """ extra_params = {} if page_size is not None: extra_params["pageSize"] = page_size path = "/projects/%s/metrics" % (project,) return page_iterator.HTTPIterator( client=self._client, api_request=self._client._connection.api_request, path=path, item_to_value=_item_to_metric, items_key="metrics", page_token=page_token, extra_params=extra_params, )
[ "def", "list_metrics", "(", "self", ",", "project", ",", "page_size", "=", "None", ",", "page_token", "=", "None", ")", ":", "extra_params", "=", "{", "}", "if", "page_size", "is", "not", "None", ":", "extra_params", "[", "\"pageSize\"", "]", "=", "page_...
List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type project: str :param project: ID of the project whose metrics are to be listed. :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current API.
[ "List", "metrics", "for", "the", "project", "associated", "with", "this", "client", "." ]
python
train
36.684211
cltk/cltk
cltk/prosody/greek/scanner.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/greek/scanner.py#L46-L62
def _clean_text(self, text): """Clean the text of extraneous punction. By default, ':', ';', and '.' are defined as stops. :param text: raw text :return: clean text :rtype : string """ clean = [] for char in text: if char in self.punc_stops: clean += '.' elif char not in self.punc: clean += char else: pass return (''.join(clean)).lower()
[ "def", "_clean_text", "(", "self", ",", "text", ")", ":", "clean", "=", "[", "]", "for", "char", "in", "text", ":", "if", "char", "in", "self", ".", "punc_stops", ":", "clean", "+=", "'.'", "elif", "char", "not", "in", "self", ".", "punc", ":", "...
Clean the text of extraneous punction. By default, ':', ';', and '.' are defined as stops. :param text: raw text :return: clean text :rtype : string
[ "Clean", "the", "text", "of", "extraneous", "punction", "." ]
python
train
28.235294
msikma/kanaconv
kanaconv/converter.py
https://github.com/msikma/kanaconv/blob/194f142e616ab5dd6d13a687b96b9f8abd1b4ea8/kanaconv/converter.py#L722-L730
def _add_punctuation_spacing(self, input): ''' Adds additional spacing to punctuation characters. For example, this puts an extra space after a fullwidth full stop. ''' for replacement in punct_spacing: input = re.sub(replacement[0], replacement[1], input) return input
[ "def", "_add_punctuation_spacing", "(", "self", ",", "input", ")", ":", "for", "replacement", "in", "punct_spacing", ":", "input", "=", "re", ".", "sub", "(", "replacement", "[", "0", "]", ",", "replacement", "[", "1", "]", ",", "input", ")", "return", ...
Adds additional spacing to punctuation characters. For example, this puts an extra space after a fullwidth full stop.
[ "Adds", "additional", "spacing", "to", "punctuation", "characters", ".", "For", "example", "this", "puts", "an", "extra", "space", "after", "a", "fullwidth", "full", "stop", "." ]
python
train
35.777778
saltstack/salt
salt/crypt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/crypt.py#L1428-L1464
def decrypt(self, data): ''' verify HMAC-SHA256 signature and decrypt data with AES-CBC ''' aes_key, hmac_key = self.keys sig = data[-self.SIG_SIZE:] data = data[:-self.SIG_SIZE] if six.PY3 and not isinstance(data, bytes): data = salt.utils.stringutils.to_bytes(data) mac_bytes = hmac.new(hmac_key, data, hashlib.sha256).digest() if len(mac_bytes) != len(sig): log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') result = 0 if six.PY2: for zipped_x, zipped_y in zip(mac_bytes, sig): result |= ord(zipped_x) ^ ord(zipped_y) else: for zipped_x, zipped_y in zip(mac_bytes, sig): result |= zipped_x ^ zipped_y if result != 0: log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') iv_bytes = data[:self.AES_BLOCK_SIZE] data = data[self.AES_BLOCK_SIZE:] if HAS_M2: cypher = EVP.Cipher(alg='aes_192_cbc', key=aes_key, iv=iv_bytes, op=0, padding=False) encr = cypher.update(data) data = encr + cypher.final() else: cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = cypher.decrypt(data) if six.PY2: return data[:-ord(data[-1])] else: return data[:-data[-1]]
[ "def", "decrypt", "(", "self", ",", "data", ")", ":", "aes_key", ",", "hmac_key", "=", "self", ".", "keys", "sig", "=", "data", "[", "-", "self", ".", "SIG_SIZE", ":", "]", "data", "=", "data", "[", ":", "-", "self", ".", "SIG_SIZE", "]", "if", ...
verify HMAC-SHA256 signature and decrypt data with AES-CBC
[ "verify", "HMAC", "-", "SHA256", "signature", "and", "decrypt", "data", "with", "AES", "-", "CBC" ]
python
train
39.783784
google/grr
grr/server/grr_response_server/databases/mysql_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_utils.py#L141-L161
def ComponentsToPath(components): """Converts a list of path components to a canonical path representation. Args: components: A sequence of path components. Returns: A canonical MySQL path representation. """ precondition.AssertIterableType(components, Text) for component in components: if not component: raise ValueError("Empty path component in: {}".format(components)) if "/" in component: raise ValueError("Path component with '/' in: {}".format(components)) if components: return "/" + "/".join(components) else: return ""
[ "def", "ComponentsToPath", "(", "components", ")", ":", "precondition", ".", "AssertIterableType", "(", "components", ",", "Text", ")", "for", "component", "in", "components", ":", "if", "not", "component", ":", "raise", "ValueError", "(", "\"Empty path component ...
Converts a list of path components to a canonical path representation. Args: components: A sequence of path components. Returns: A canonical MySQL path representation.
[ "Converts", "a", "list", "of", "path", "components", "to", "a", "canonical", "path", "representation", "." ]
python
train
26.857143
jorgenkg/python-neural-network
nimblenet/cost_functions.py
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/cost_functions.py#L41-L50
def softmax_categorical_cross_entropy_cost( outputs, targets, derivative=False, epsilon=1e-11 ): """ The output signals should be in the range [0, 1] """ outputs = np.clip(outputs, epsilon, 1 - epsilon) if derivative: return outputs - targets else: return np.mean(-np.sum(targets * np.log( outputs ), axis=1))
[ "def", "softmax_categorical_cross_entropy_cost", "(", "outputs", ",", "targets", ",", "derivative", "=", "False", ",", "epsilon", "=", "1e-11", ")", ":", "outputs", "=", "np", ".", "clip", "(", "outputs", ",", "epsilon", ",", "1", "-", "epsilon", ")", "if"...
The output signals should be in the range [0, 1]
[ "The", "output", "signals", "should", "be", "in", "the", "range", "[", "0", "1", "]" ]
python
train
34.5
deschler/django-modeltranslation
modeltranslation/manager.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/manager.py#L79-L87
def append_translated(model, fields): "If translated field is encountered, add also all its translation fields." fields = set(fields) from modeltranslation.translator import translator opts = translator.get_options_for_model(model) for key, translated in opts.fields.items(): if key in fields: fields = fields.union(f.name for f in translated) return fields
[ "def", "append_translated", "(", "model", ",", "fields", ")", ":", "fields", "=", "set", "(", "fields", ")", "from", "modeltranslation", ".", "translator", "import", "translator", "opts", "=", "translator", ".", "get_options_for_model", "(", "model", ")", "for...
If translated field is encountered, add also all its translation fields.
[ "If", "translated", "field", "is", "encountered", "add", "also", "all", "its", "translation", "fields", "." ]
python
train
43.666667
django-import-export/django-import-export
import_export/resources.py
https://github.com/django-import-export/django-import-export/blob/127f00d03fd0ad282615b064b7f444a639e6ff0c/import_export/resources.py#L910-L924
def after_import(self, dataset, result, using_transactions, dry_run, **kwargs): """ Reset the SQL sequences after new objects are imported """ # Adapted from django's loaddata if not dry_run and any(r.import_type == RowResult.IMPORT_TYPE_NEW for r in result.rows): connection = connections[DEFAULT_DB_ALIAS] sequence_sql = connection.ops.sequence_reset_sql(no_style(), [self._meta.model]) if sequence_sql: cursor = connection.cursor() try: for line in sequence_sql: cursor.execute(line) finally: cursor.close()
[ "def", "after_import", "(", "self", ",", "dataset", ",", "result", ",", "using_transactions", ",", "dry_run", ",", "*", "*", "kwargs", ")", ":", "# Adapted from django's loaddata", "if", "not", "dry_run", "and", "any", "(", "r", ".", "import_type", "==", "Ro...
Reset the SQL sequences after new objects are imported
[ "Reset", "the", "SQL", "sequences", "after", "new", "objects", "are", "imported" ]
python
train
45.6
Nekroze/librarian
librarian/deck.py
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L22-L45
def get_card(self, index=-1, cache=True, remove=True): """ Retrieve a card any number of cards from the top. Returns a ``Card`` object loaded from a library if one is specified otherwise just it will simply return its code. If `index` is not set then the top card will be retrieved. If cache is set to True (the default) it will tell the library to cache the returned card for faster look-ups in the future. If remove is true then the card will be removed from the deck before returning it. """ if len(self.cards) < index: return None retriever = self.cards.pop if remove else self.cards.__getitem__ code = retriever(index) if self.library: return self.library.load_card(code, cache) else: return code
[ "def", "get_card", "(", "self", ",", "index", "=", "-", "1", ",", "cache", "=", "True", ",", "remove", "=", "True", ")", ":", "if", "len", "(", "self", ".", "cards", ")", "<", "index", ":", "return", "None", "retriever", "=", "self", ".", "cards"...
Retrieve a card any number of cards from the top. Returns a ``Card`` object loaded from a library if one is specified otherwise just it will simply return its code. If `index` is not set then the top card will be retrieved. If cache is set to True (the default) it will tell the library to cache the returned card for faster look-ups in the future. If remove is true then the card will be removed from the deck before returning it.
[ "Retrieve", "a", "card", "any", "number", "of", "cards", "from", "the", "top", ".", "Returns", "a", "Card", "object", "loaded", "from", "a", "library", "if", "one", "is", "specified", "otherwise", "just", "it", "will", "simply", "return", "its", "code", ...
python
train
35
Diaoul/subliminal
subliminal/subtitle.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L185-L244
def guess_matches(video, guess, partial=False): """Get matches between a `video` and a `guess`. If a guess is `partial`, the absence information won't be counted as a match. :param video: the video. :type video: :class:`~subliminal.video.Video` :param guess: the guess. :type guess: dict :param bool partial: whether or not the guess is partial. :return: matches between the `video` and the `guess`. :rtype: set """ matches = set() if isinstance(video, Episode): # series if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series): matches.add('series') # title if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title): matches.add('title') # season if video.season and 'season' in guess and guess['season'] == video.season: matches.add('season') # episode if video.episode and 'episode' in guess and guess['episode'] == video.episode: matches.add('episode') # year if video.year and 'year' in guess and guess['year'] == video.year: matches.add('year') # count "no year" as an information if not partial and video.original_series and 'year' not in guess: matches.add('year') elif isinstance(video, Movie): # year if video.year and 'year' in guess and guess['year'] == video.year: matches.add('year') # title if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title): matches.add('title') # release_group if (video.release_group and 'release_group' in guess and sanitize_release_group(guess['release_group']) in get_equivalent_release_groups(sanitize_release_group(video.release_group))): matches.add('release_group') # resolution if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution: matches.add('resolution') # format if video.format and 'format' in guess and guess['format'].lower() == video.format.lower(): matches.add('format') # video_codec if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec: matches.add('video_codec') # audio_codec if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec: matches.add('audio_codec') return matches
[ "def", "guess_matches", "(", "video", ",", "guess", ",", "partial", "=", "False", ")", ":", "matches", "=", "set", "(", ")", "if", "isinstance", "(", "video", ",", "Episode", ")", ":", "# series", "if", "video", ".", "series", "and", "'title'", "in", ...
Get matches between a `video` and a `guess`. If a guess is `partial`, the absence information won't be counted as a match. :param video: the video. :type video: :class:`~subliminal.video.Video` :param guess: the guess. :type guess: dict :param bool partial: whether or not the guess is partial. :return: matches between the `video` and the `guess`. :rtype: set
[ "Get", "matches", "between", "a", "video", "and", "a", "guess", "." ]
python
train
41.666667
singnet/snet-cli
snet_cli/mpe_channel_command.py
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L21-L25
def _get_persistent_mpe_dir(self): """ get persistent storage for mpe """ mpe_address = self.get_mpe_address().lower() registry_address = self.get_registry_address().lower() return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address))
[ "def", "_get_persistent_mpe_dir", "(", "self", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", ".", "lower", "(", ")", "registry_address", "=", "self", ".", "get_registry_address", "(", ")", ".", "lower", "(", ")", "return", "Path", ...
get persistent storage for mpe
[ "get", "persistent", "storage", "for", "mpe" ]
python
train
59.6
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L22-L33
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None): """Create a plot from individual summary csv files with classification metrics. """ dfs = [pd.read_csv(x) for x in plot_files] samples = [] for df in dfs: for sample in df["sample"].unique(): if sample not in samples: samples.append(sample) df = pd.concat(dfs) df.to_csv(out_csv, index=False) return classifyplot_from_valfile(out_csv, outtype, title, size, samples)
[ "def", "classifyplot_from_plotfiles", "(", "plot_files", ",", "out_csv", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "dfs", "=", "[", "pd", ".", "read_csv", "(", "x", ")", "for", "x", "in", "plot_files...
Create a plot from individual summary csv files with classification metrics.
[ "Create", "a", "plot", "from", "individual", "summary", "csv", "files", "with", "classification", "metrics", "." ]
python
train
42.833333
bcbio/bcbio-nextgen
bcbio/variation/vardict.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L129-L138
def _get_jvm_opts(data, out_file): """Retrieve JVM options when running the Java version of VarDict. """ if get_vardict_command(data) == "vardict-java": resources = config_utils.get_resources("vardict", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"]) jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file)) return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts) else: return ""
[ "def", "_get_jvm_opts", "(", "data", ",", "out_file", ")", ":", "if", "get_vardict_command", "(", "data", ")", "==", "\"vardict-java\"", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"vardict\"", ",", "data", "[", "\"config\"", "]", ")", ...
Retrieve JVM options when running the Java version of VarDict.
[ "Retrieve", "JVM", "options", "when", "running", "the", "Java", "version", "of", "VarDict", "." ]
python
train
46.8
selik/xport
xport/v56.py
https://github.com/selik/xport/blob/fafd15a24ccd102fc92d0c0123b9877a0c752182/xport/v56.py#L292-L309
def header_match(cls, header): ''' Parse the 4-line (320-byte) library member header. ''' mo = cls.header_re.match(header) if mo is None: msg = f'Expected {cls.header_re.pattern!r}, got {header!r}' raise ValueError(msg) return { 'name': mo['name'].decode().strip(), 'label': mo['label'].decode().strip(), 'type': mo['type'].decode().strip(), 'created': strptime(mo['created']), 'modified': strptime(mo['modified']), 'sas_version': float(mo['version']), 'os_version': mo['os'].decode().strip(), 'namestr_size': mo['descriptor_size'], }
[ "def", "header_match", "(", "cls", ",", "header", ")", ":", "mo", "=", "cls", ".", "header_re", ".", "match", "(", "header", ")", "if", "mo", "is", "None", ":", "msg", "=", "f'Expected {cls.header_re.pattern!r}, got {header!r}'", "raise", "ValueError", "(", ...
Parse the 4-line (320-byte) library member header.
[ "Parse", "the", "4", "-", "line", "(", "320", "-", "byte", ")", "library", "member", "header", "." ]
python
train
38.5
tensorflow/probability
experimental/neutra/neutra_kernel.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/neutra/neutra_kernel.py#L350-L381
def one_step(self, current_state, previous_kernel_results): """Runs one iteration of NeuTra. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. previous_kernel_results: `collections.namedtuple` containing `Tensor`s representing values from previous calls to this function (or from the `bootstrap_results` function.) Returns: next_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) after taking exactly one step. Has same type and shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. """ @tfp.mcmc.internal.util.make_innermost_setter def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps): return kernel_results._replace( accepted_results=kernel_results.accepted_results._replace( num_leapfrog_steps=num_leapfrog_steps)) step_size = previous_kernel_results.new_step_size previous_kernel_results = set_num_leapfrog_steps( previous_kernel_results, self._num_leapfrog_steps(step_size)) new_state, kernel_results = self._kernel.one_step( self._flatten_state(current_state), previous_kernel_results) return self._unflatten_state(new_state), kernel_results
[ "def", "one_step", "(", "self", ",", "current_state", ",", "previous_kernel_results", ")", ":", "@", "tfp", ".", "mcmc", ".", "internal", ".", "util", ".", "make_innermost_setter", "def", "set_num_leapfrog_steps", "(", "kernel_results", ",", "num_leapfrog_steps", ...
Runs one iteration of NeuTra. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. previous_kernel_results: `collections.namedtuple` containing `Tensor`s representing values from previous calls to this function (or from the `bootstrap_results` function.) Returns: next_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) after taking exactly one step. Has same type and shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain.
[ "Runs", "one", "iteration", "of", "NeuTra", "." ]
python
test
46
tdryer/hangups
hangups/client.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/client.py#L114-L148
async def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or :func:`disconnect` has been called. """ proxy = os.environ.get('HTTP_PROXY') self._session = http_utils.Session(self._cookies, proxy=proxy) try: self._channel = channel.Channel( self._session, self._max_retries, self._retry_backoff_base ) # Forward the Channel events to the Client events. self._channel.on_connect.add_observer(self.on_connect.fire) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_receive_array.add_observer(self._on_receive_array) # Wrap the coroutine in a Future so it can be cancelled. self._listen_future = asyncio.ensure_future(self._channel.listen()) # Listen for StateUpdate messages from the Channel until it # disconnects. try: await self._listen_future except asyncio.CancelledError: # If this task is cancelled, we need to cancel our child task # as well. We don't need an additional yield because listen # cancels immediately. self._listen_future.cancel() logger.info( 'Client.connect returning because Channel.listen returned' ) finally: await self._session.close()
[ "async", "def", "connect", "(", "self", ")", ":", "proxy", "=", "os", ".", "environ", ".", "get", "(", "'HTTP_PROXY'", ")", "self", ".", "_session", "=", "http_utils", ".", "Session", "(", "self", ".", "_cookies", ",", "proxy", "=", "proxy", ")", "tr...
Establish a connection to the chat server. Returns when an error has occurred, or :func:`disconnect` has been called.
[ "Establish", "a", "connection", "to", "the", "chat", "server", "." ]
python
valid
44.028571
balloob/pychromecast
pychromecast/socket_client.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L586-L608
def _cleanup(self): """ Cleanup open channels and handlers """ for channel in self._open_channels: try: self.disconnect_channel(channel) except Exception: # pylint: disable=broad-except pass for handler in self._handlers.values(): try: handler.tear_down() except Exception: # pylint: disable=broad-except pass try: self.socket.close() except Exception: # pylint: disable=broad-except self.logger.exception( "[%s:%s] _cleanup", self.fn or self.host, self.port) self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_DISCONNECTED, NetworkAddress(self.host, self.port))) self.connecting = True
[ "def", "_cleanup", "(", "self", ")", ":", "for", "channel", "in", "self", ".", "_open_channels", ":", "try", ":", "self", ".", "disconnect_channel", "(", "channel", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "pass", "for", "handler", "i...
Cleanup open channels and handlers
[ "Cleanup", "open", "channels", "and", "handlers" ]
python
train
36.26087
pypa/pipenv
pipenv/vendor/pipdeptree.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pipdeptree.py#L460-L476
def conflicting_deps(tree): """Returns dependencies which are not present or conflict with the requirements of other packages. e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed :param tree: the requirements tree (dict) :returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage :rtype: dict """ conflicting = defaultdict(list) for p, rs in tree.items(): for req in rs: if req.is_conflicting(): conflicting[p].append(req) return conflicting
[ "def", "conflicting_deps", "(", "tree", ")", ":", "conflicting", "=", "defaultdict", "(", "list", ")", "for", "p", ",", "rs", "in", "tree", ".", "items", "(", ")", ":", "for", "req", "in", "rs", ":", "if", "req", ".", "is_conflicting", "(", ")", ":...
Returns dependencies which are not present or conflict with the requirements of other packages. e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed :param tree: the requirements tree (dict) :returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage :rtype: dict
[ "Returns", "dependencies", "which", "are", "not", "present", "or", "conflict", "with", "the", "requirements", "of", "other", "packages", "." ]
python
train
31.411765
saltstack/salt
salt/modules/napalm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_network.py#L2166-L2193
def config_changed(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' Will prompt if the configuration has been changed. :return: A tuple with a boolean that specifies if the config was changed on the device.\ And a string that provides more details of the reason why the configuration was not changed. CLI Example: .. code-block:: bash salt '*' net.config_changed ''' is_config_changed = False reason = '' try_compare = compare_config(inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable if try_compare.get('result'): if try_compare.get('out'): is_config_changed = True else: reason = 'Configuration was not changed on the device.' else: reason = try_compare.get('comment') return is_config_changed, reason
[ "def", "config_changed", "(", "inherit_napalm_device", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "is_config_changed", "=", "False", "reason", "=", "''", "try_compare", "=", "compare_config", "(", "inherit_napalm_device", "=",...
Will prompt if the configuration has been changed. :return: A tuple with a boolean that specifies if the config was changed on the device.\ And a string that provides more details of the reason why the configuration was not changed. CLI Example: .. code-block:: bash salt '*' net.config_changed
[ "Will", "prompt", "if", "the", "configuration", "has", "been", "changed", "." ]
python
train
30.178571
fboender/ansible-cmdb
src/ansiblecmdb/ansible.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/ansible.py#L348-L361
def hosts_in_group(self, groupname): """ Return a list of hostnames that are in a group. """ result = [] for hostname, hostinfo in self.hosts.items(): if groupname == 'all': result.append(hostname) elif 'groups' in hostinfo: if groupname in hostinfo['groups']: result.append(hostname) else: hostinfo['groups'] = [groupname] return result
[ "def", "hosts_in_group", "(", "self", ",", "groupname", ")", ":", "result", "=", "[", "]", "for", "hostname", ",", "hostinfo", "in", "self", ".", "hosts", ".", "items", "(", ")", ":", "if", "groupname", "==", "'all'", ":", "result", ".", "append", "(...
Return a list of hostnames that are in a group.
[ "Return", "a", "list", "of", "hostnames", "that", "are", "in", "a", "group", "." ]
python
train
34
portfors-lab/sparkle
sparkle/gui/plotting/calibration_explore_display.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/calibration_explore_display.py#L136-L143
def setXlimits(self, lims): """Sets the X axis limits of the signal plots :param lims: (min, max) of x axis, in same units as data :type lims: (float, float) """ self.responseSignalPlot.setXlim(lims) self.stimSignalPlot.setXlim(lims)
[ "def", "setXlimits", "(", "self", ",", "lims", ")", ":", "self", ".", "responseSignalPlot", ".", "setXlim", "(", "lims", ")", "self", ".", "stimSignalPlot", ".", "setXlim", "(", "lims", ")" ]
Sets the X axis limits of the signal plots :param lims: (min, max) of x axis, in same units as data :type lims: (float, float)
[ "Sets", "the", "X", "axis", "limits", "of", "the", "signal", "plots" ]
python
train
34.375
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1049-L1061
def clear_provider_links(self): """Removes the provider chain. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.ActivityForm.clear_assets_template if (self.get_provider_links_metadata().is_read_only() or self.get_provider_links_metadata().is_required()): raise errors.NoAccess() self._my_map['providerLinkIds'] = self._provider_links_default
[ "def", "clear_provider_links", "(", "self", ")", ":", "# Implemented from template for osid.learning.ActivityForm.clear_assets_template", "if", "(", "self", ".", "get_provider_links_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_provider_links_m...
Removes the provider chain. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Removes", "the", "provider", "chain", "." ]
python
train
45.384615
JoelBender/bacpypes
py25/bacpypes/bvll.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/bvll.py#L89-L103
def bvlci_contents(self, use_dict=None, as_class=dict): """Return the contents of an object as a dict.""" if _debug: BVLCI._debug("bvlci_contents use_dict=%r as_class=%r", use_dict, as_class) # make/extend the dictionary of content if use_dict is None: use_dict = as_class() # save the mapped value use_dict.__setitem__('type', self.bvlciType) use_dict.__setitem__('function', self.bvlciFunction) use_dict.__setitem__('length', self.bvlciLength) # return what we built/updated return use_dict
[ "def", "bvlci_contents", "(", "self", ",", "use_dict", "=", "None", ",", "as_class", "=", "dict", ")", ":", "if", "_debug", ":", "BVLCI", ".", "_debug", "(", "\"bvlci_contents use_dict=%r as_class=%r\"", ",", "use_dict", ",", "as_class", ")", "# make/extend the ...
Return the contents of an object as a dict.
[ "Return", "the", "contents", "of", "an", "object", "as", "a", "dict", "." ]
python
train
38.2
CZ-NIC/yangson
yangson/schpattern.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schpattern.py#L138-L140
def nullable(self, ctype: ContentType) -> bool: """Override the superclass method.""" return (not self.check_when() or self.pattern.nullable(ctype))
[ "def", "nullable", "(", "self", ",", "ctype", ":", "ContentType", ")", "->", "bool", ":", "return", "(", "not", "self", ".", "check_when", "(", ")", "or", "self", ".", "pattern", ".", "nullable", "(", "ctype", ")", ")" ]
Override the superclass method.
[ "Override", "the", "superclass", "method", "." ]
python
train
54
pantsbuild/pants
src/python/pants/java/nailgun_executor.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/nailgun_executor.py#L113-L128
def _fingerprint(jvm_options, classpath, java_version): """Compute a fingerprint for this invocation of a Java task. :param list jvm_options: JVM options passed to the java invocation :param list classpath: The -cp arguments passed to the java invocation :param Revision java_version: return value from Distribution.version() :return: a hexstring representing a fingerprint of the java invocation """ digest = hashlib.sha1() # TODO(John Sirois): hash classpath contents? encoded_jvm_options = [option.encode('utf-8') for option in sorted(jvm_options)] encoded_classpath = [cp.encode('utf-8') for cp in sorted(classpath)] encoded_java_version = repr(java_version).encode('utf-8') for item in (encoded_jvm_options, encoded_classpath, encoded_java_version): digest.update(str(item).encode('utf-8')) return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8')
[ "def", "_fingerprint", "(", "jvm_options", ",", "classpath", ",", "java_version", ")", ":", "digest", "=", "hashlib", ".", "sha1", "(", ")", "# TODO(John Sirois): hash classpath contents?", "encoded_jvm_options", "=", "[", "option", ".", "encode", "(", "'utf-8'", ...
Compute a fingerprint for this invocation of a Java task. :param list jvm_options: JVM options passed to the java invocation :param list classpath: The -cp arguments passed to the java invocation :param Revision java_version: return value from Distribution.version() :return: a hexstring representing a fingerprint of the java invocation
[ "Compute", "a", "fingerprint", "for", "this", "invocation", "of", "a", "Java", "task", "." ]
python
train
57.75
msmbuilder/msmbuilder
msmbuilder/msm/ratematrix.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/ratematrix.py#L271-L293
def _initial_guess(self, countsmat): """Generate an initial guess for \theta. """ if self.theta_ is not None: return self.theta_ if self.guess == 'log': transmat, pi = _transmat_mle_prinz(countsmat) K = np.real(scipy.linalg.logm(transmat)) / self.lag_time elif self.guess == 'pseudo': transmat, pi = _transmat_mle_prinz(countsmat) K = (transmat - np.eye(self.n_states_)) / self.lag_time elif isinstance(self.guess, np.ndarray): pi = _solve_ratemat_eigensystem(self.guess)[1][:, 0] K = self.guess S = np.multiply(np.sqrt(np.outer(pi, 1/pi)), K) sflat = np.maximum(S[np.triu_indices_from(countsmat, k=1)], 0) theta0 = np.concatenate((sflat, np.log(pi))) return theta0
[ "def", "_initial_guess", "(", "self", ",", "countsmat", ")", ":", "if", "self", ".", "theta_", "is", "not", "None", ":", "return", "self", ".", "theta_", "if", "self", ".", "guess", "==", "'log'", ":", "transmat", ",", "pi", "=", "_transmat_mle_prinz", ...
Generate an initial guess for \theta.
[ "Generate", "an", "initial", "guess", "for", "\\", "theta", "." ]
python
train
35.304348
josuebrunel/myql
myql/utils.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/utils.py#L12-L16
def pretty_xml(data): """Return a pretty formated xml """ parsed_string = minidom.parseString(data.decode('utf-8')) return parsed_string.toprettyxml(indent='\t', encoding='utf-8')
[ "def", "pretty_xml", "(", "data", ")", ":", "parsed_string", "=", "minidom", ".", "parseString", "(", "data", ".", "decode", "(", "'utf-8'", ")", ")", "return", "parsed_string", ".", "toprettyxml", "(", "indent", "=", "'\\t'", ",", "encoding", "=", "'utf-8...
Return a pretty formated xml
[ "Return", "a", "pretty", "formated", "xml" ]
python
train
38.2
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/PharLapCommon.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/PharLapCommon.py#L68-L86
def getPharLapVersion(): """Returns the version of the installed ETS Tool Suite as a decimal number. This version comes from the ETS_VER #define in the embkern.h header. For example, '#define ETS_VER 1010' (which is what Phar Lap 10.1 defines) would cause this method to return 1010. Phar Lap 9.1 does not have such a #define, but this method will return 910 as a default. Raises UserError if no installed version of Phar Lap can be found.""" include_path = os.path.join(getPharLapPath(), os.path.normpath("include/embkern.h")) if not os.path.exists(include_path): raise SCons.Errors.UserError("Cannot find embkern.h in ETS include directory.\nIs Phar Lap ETS installed properly?") mo = REGEX_ETS_VER.search(open(include_path, 'r').read()) if mo: return int(mo.group(1)) # Default return for Phar Lap 9.1 return 910
[ "def", "getPharLapVersion", "(", ")", ":", "include_path", "=", "os", ".", "path", ".", "join", "(", "getPharLapPath", "(", ")", ",", "os", ".", "path", ".", "normpath", "(", "\"include/embkern.h\"", ")", ")", "if", "not", "os", ".", "path", ".", "exis...
Returns the version of the installed ETS Tool Suite as a decimal number. This version comes from the ETS_VER #define in the embkern.h header. For example, '#define ETS_VER 1010' (which is what Phar Lap 10.1 defines) would cause this method to return 1010. Phar Lap 9.1 does not have such a #define, but this method will return 910 as a default. Raises UserError if no installed version of Phar Lap can be found.
[ "Returns", "the", "version", "of", "the", "installed", "ETS", "Tool", "Suite", "as", "a", "decimal", "number", ".", "This", "version", "comes", "from", "the", "ETS_VER", "#define", "in", "the", "embkern", ".", "h", "header", ".", "For", "example", "#define...
python
train
45.947368