repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
smarie/python-parsyfiles
parsyfiles/plugins_optional/support_for_pandas.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_optional/support_for_pandas.py#L156-L180
def single_row_or_col_df_to_series(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\ -> pd.Series: """ Helper method to convert a dataframe with one row or one or two columns into a Series :param desired_type: :param single_col_df: :param logger: :param kwargs: :return: """ if single_rowcol_df.shape[0] == 1: # one row return single_rowcol_df.transpose()[0] elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex): # two columns but the index contains nothing but the row number : we can use the first column d = single_rowcol_df.set_index(single_rowcol_df.columns[0]) return d[d.columns[0]] elif single_rowcol_df.shape[1] == 1: # one column and one index d = single_rowcol_df return d[d.columns[0]] else: raise ValueError('Unable to convert provided dataframe to a series : ' 'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '')
[ "def", "single_row_or_col_df_to_series", "(", "desired_type", ":", "Type", "[", "T", "]", ",", "single_rowcol_df", ":", "pd", ".", "DataFrame", ",", "logger", ":", "Logger", ",", "*", "*", "kwargs", ")", "->", "pd", ".", "Series", ":", "if", "single_rowcol...
Helper method to convert a dataframe with one row or one or two columns into a Series :param desired_type: :param single_col_df: :param logger: :param kwargs: :return:
[ "Helper", "method", "to", "convert", "a", "dataframe", "with", "one", "row", "or", "one", "or", "two", "columns", "into", "a", "Series" ]
python
train
DataBiosphere/toil
src/toil/jobStores/fileJobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/fileJobStore.py#L323-L343
def _getUserCodeFunctionName(self): """ Get the name of the function 4 levels up the stack (above this function, our caller, and whatever Toil code delegated to the JobStore implementation). Returns a string usable in a filename, and returns a placeholder string if the function name is unsuitable or can't be gotten. """ # Record the name of the job/function writing the file in the file name try: # It ought to be fourth-to-last on the stack, above us, the write # function, and the FileStore or context manager. Probably. sourceFunctionName = traceback.extract_stack()[-4][2] except: sourceFunctionName = "UNKNOWNJOB" # make sure the function name fetched has no spaces or oddities if not re.match("^[A-Za-z0-9_-]*$", sourceFunctionName): sourceFunctionName = "ODDLYNAMEDJOB" return sourceFunctionName
[ "def", "_getUserCodeFunctionName", "(", "self", ")", ":", "# Record the name of the job/function writing the file in the file name", "try", ":", "# It ought to be fourth-to-last on the stack, above us, the write", "# function, and the FileStore or context manager. Probably.", "sourceFunctionNa...
Get the name of the function 4 levels up the stack (above this function, our caller, and whatever Toil code delegated to the JobStore implementation). Returns a string usable in a filename, and returns a placeholder string if the function name is unsuitable or can't be gotten.
[ "Get", "the", "name", "of", "the", "function", "4", "levels", "up", "the", "stack", "(", "above", "this", "function", "our", "caller", "and", "whatever", "Toil", "code", "delegated", "to", "the", "JobStore", "implementation", ")", ".", "Returns", "a", "str...
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1198-L1211
def items(self): """Return items as ``(key, value)`` pairs. Returns: Iterable[Tuple[str, object]]: The ``(key, value)`` pairs representing this row. Examples: >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items()) [('x', 'a'), ('y', 'b')] """ for key, index in six.iteritems(self._xxx_field_to_index): yield (key, copy.deepcopy(self._xxx_values[index]))
[ "def", "items", "(", "self", ")", ":", "for", "key", ",", "index", "in", "six", ".", "iteritems", "(", "self", ".", "_xxx_field_to_index", ")", ":", "yield", "(", "key", ",", "copy", ".", "deepcopy", "(", "self", ".", "_xxx_values", "[", "index", "]"...
Return items as ``(key, value)`` pairs. Returns: Iterable[Tuple[str, object]]: The ``(key, value)`` pairs representing this row. Examples: >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items()) [('x', 'a'), ('y', 'b')]
[ "Return", "items", "as", "(", "key", "value", ")", "pairs", "." ]
python
train
Metatab/metatab
metatab/appurl.py
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/appurl.py#L92-L96
def doc(self): """Return the metatab document for the URL""" from metatab import MetatabDoc t = self.get_resource().get_target() return MetatabDoc(t.inner)
[ "def", "doc", "(", "self", ")", ":", "from", "metatab", "import", "MetatabDoc", "t", "=", "self", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "return", "MetatabDoc", "(", "t", ".", "inner", ")" ]
Return the metatab document for the URL
[ "Return", "the", "metatab", "document", "for", "the", "URL" ]
python
train
ulule/django-linguist
linguist/admin.py
https://github.com/ulule/django-linguist/blob/d2b95a6ab921039d56d5eeb352badfe5be9e8f77/linguist/admin.py#L49-L56
def languages_column(self, obj): """ Adds languages columns. """ languages = self.get_available_languages(obj) return '<span class="available-languages">{0}</span>'.format( " ".join(languages) )
[ "def", "languages_column", "(", "self", ",", "obj", ")", ":", "languages", "=", "self", ".", "get_available_languages", "(", "obj", ")", "return", "'<span class=\"available-languages\">{0}</span>'", ".", "format", "(", "\" \"", ".", "join", "(", "languages", ")", ...
Adds languages columns.
[ "Adds", "languages", "columns", "." ]
python
train
gristlabs/asttokens
asttokens/asttokens.py
https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L111-L116
def get_token_from_offset(self, offset): """ Returns the token containing the given character offset (0-based position in source text), or the preceeding token if the position is between tokens. """ return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
[ "def", "get_token_from_offset", "(", "self", ",", "offset", ")", ":", "return", "self", ".", "_tokens", "[", "bisect", ".", "bisect", "(", "self", ".", "_token_offsets", ",", "offset", ")", "-", "1", "]" ]
Returns the token containing the given character offset (0-based position in source text), or the preceeding token if the position is between tokens.
[ "Returns", "the", "token", "containing", "the", "given", "character", "offset", "(", "0", "-", "based", "position", "in", "source", "text", ")", "or", "the", "preceeding", "token", "if", "the", "position", "is", "between", "tokens", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L241-L264
def wmo(self, value=None): """Corresponds to IDD Field `wmo` usually a 6 digit field. Used as alpha in EnergyPlus. Args: value (str): value for IDD Field `wmo` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `wmo`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `wmo`') self._wmo = value
[ "def", "wmo", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'...
Corresponds to IDD Field `wmo` usually a 6 digit field. Used as alpha in EnergyPlus. Args: value (str): value for IDD Field `wmo` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "wmo", "usually", "a", "6", "digit", "field", ".", "Used", "as", "alpha", "in", "EnergyPlus", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/extensions.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/extensions.py#L501-L584
def ext_import(soname, module_subpath=""): """ Loads a turicreate toolkit module (a shared library) into the tc.extensions namespace. Toolkit module created via SDK can either be directly imported, e.g. ``import example`` or via this function, e.g. ``turicreate.ext_import("example.so")``. Use ``ext_import`` when you need more namespace control, or when the shared library is not local, e.g. in http, s3 or hdfs. Parameters ---------- soname : string The filename of the shared library to load. This can be a URL, or a HDFS location. For instance if soname is somewhere/outthere/toolkit.so The functions in toolkit.so will appear in tc.extensions.toolkit.* module_subpath : string, optional Any additional module paths to prepend to the toolkit module after it is imported. For instance if soname is somewhere/outthere/toolkit.so, by default the functions in toolkit.so will appear in tc.extensions.toolkit.*. However, if I module_subpath="somewhere.outthere", the functions in toolkit.so will appear in tc.extensions.somewhere.outthere.toolkit.* Returns ------- out : a list of functions and classes loaded. Examples -------- For instance, given a module which implements the function "square_root", .. code-block:: c++ #include <cmath> #include <turicreate/sdk/toolkit_function_macros.hpp> double square_root(double a) { return sqrt(a); } BEGIN_FUNCTION_REGISTRATION REGISTER_FUNCTION(square_root, "a"); END_FUNCTION_REGISTRATION compiled into example.so >>> turicreate.ext_import('example1.so') ['example1.square_root'] >>> turicreate.extensions.example1.square_root(9) 3.0 We can customize the import location with module_subpath which can be used to avoid namespace conflicts when you have multiple toolkits with the same filename. >>> turicreate.ext_import('example1.so', 'math') ['math.example1.square_root'] >>> turicreate.extensions.math.example1.square_root(9) 3.0 The module can also be imported directly, but turicreate *must* be imported first. turicreate will intercept the module loading process to load the toolkit. >>> import turicreate >>> import example1 #searches for example1.so in all the python paths >>> example1.square_root(9) 3.0 """ unity = _get_unity() import os if os.path.exists(soname): soname = os.path.abspath(soname) else: soname = _make_internal_url(soname) ret = unity.load_toolkit(soname, module_subpath) if len(ret) > 0: raise RuntimeError(ret) _publish() # push the functions into the corresponding module namespace return unity.list_toolkit_functions_in_dynamic_module(soname) + unity.list_toolkit_classes_in_dynamic_module(soname)
[ "def", "ext_import", "(", "soname", ",", "module_subpath", "=", "\"\"", ")", ":", "unity", "=", "_get_unity", "(", ")", "import", "os", "if", "os", ".", "path", ".", "exists", "(", "soname", ")", ":", "soname", "=", "os", ".", "path", ".", "abspath",...
Loads a turicreate toolkit module (a shared library) into the tc.extensions namespace. Toolkit module created via SDK can either be directly imported, e.g. ``import example`` or via this function, e.g. ``turicreate.ext_import("example.so")``. Use ``ext_import`` when you need more namespace control, or when the shared library is not local, e.g. in http, s3 or hdfs. Parameters ---------- soname : string The filename of the shared library to load. This can be a URL, or a HDFS location. For instance if soname is somewhere/outthere/toolkit.so The functions in toolkit.so will appear in tc.extensions.toolkit.* module_subpath : string, optional Any additional module paths to prepend to the toolkit module after it is imported. For instance if soname is somewhere/outthere/toolkit.so, by default the functions in toolkit.so will appear in tc.extensions.toolkit.*. However, if I module_subpath="somewhere.outthere", the functions in toolkit.so will appear in tc.extensions.somewhere.outthere.toolkit.* Returns ------- out : a list of functions and classes loaded. Examples -------- For instance, given a module which implements the function "square_root", .. code-block:: c++ #include <cmath> #include <turicreate/sdk/toolkit_function_macros.hpp> double square_root(double a) { return sqrt(a); } BEGIN_FUNCTION_REGISTRATION REGISTER_FUNCTION(square_root, "a"); END_FUNCTION_REGISTRATION compiled into example.so >>> turicreate.ext_import('example1.so') ['example1.square_root'] >>> turicreate.extensions.example1.square_root(9) 3.0 We can customize the import location with module_subpath which can be used to avoid namespace conflicts when you have multiple toolkits with the same filename. >>> turicreate.ext_import('example1.so', 'math') ['math.example1.square_root'] >>> turicreate.extensions.math.example1.square_root(9) 3.0 The module can also be imported directly, but turicreate *must* be imported first. turicreate will intercept the module loading process to load the toolkit. >>> import turicreate >>> import example1 #searches for example1.so in all the python paths >>> example1.square_root(9) 3.0
[ "Loads", "a", "turicreate", "toolkit", "module", "(", "a", "shared", "library", ")", "into", "the", "tc", ".", "extensions", "namespace", "." ]
python
train
GaryLee/cmdlet
cmdlet/cmds.py
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L248-L287
def pack(prev, n, rest=False, **kw): """pack pipe takes n elements from previous generator and yield one list to next. :param prev: The previous iterator of pipe. :type prev: Pipe :param rest: Set True to allow to output the rest part of last elements. :type prev: boolean :param padding: Specify the padding element for the rest part of last elements. :type prev: boolean :returns: generator :Example: >>> result([1,2,3,4,5,6,7] | pack(3)) [[1, 2, 3], [4, 5, 6]] >>> result([1,2,3,4,5,6,7] | pack(3, rest=True)) [[1, 2, 3], [4, 5, 6], [7,]] >>> result([1,2,3,4,5,6,7] | pack(3, padding=None)) [[1, 2, 3], [4, 5, 6], [7, None, None]] """ if 'padding' in kw: use_padding = True padding = kw['padding'] else: use_padding = False padding = None items = [] for i, data in enumerate(prev, 1): items.append(data) if (i % n) == 0: yield items items = [] if len(items) != 0 and rest: if use_padding: items.extend([padding, ] * (n - (i % n))) yield items
[ "def", "pack", "(", "prev", ",", "n", ",", "rest", "=", "False", ",", "*", "*", "kw", ")", ":", "if", "'padding'", "in", "kw", ":", "use_padding", "=", "True", "padding", "=", "kw", "[", "'padding'", "]", "else", ":", "use_padding", "=", "False", ...
pack pipe takes n elements from previous generator and yield one list to next. :param prev: The previous iterator of pipe. :type prev: Pipe :param rest: Set True to allow to output the rest part of last elements. :type prev: boolean :param padding: Specify the padding element for the rest part of last elements. :type prev: boolean :returns: generator :Example: >>> result([1,2,3,4,5,6,7] | pack(3)) [[1, 2, 3], [4, 5, 6]] >>> result([1,2,3,4,5,6,7] | pack(3, rest=True)) [[1, 2, 3], [4, 5, 6], [7,]] >>> result([1,2,3,4,5,6,7] | pack(3, padding=None)) [[1, 2, 3], [4, 5, 6], [7, None, None]]
[ "pack", "pipe", "takes", "n", "elements", "from", "previous", "generator", "and", "yield", "one", "list", "to", "next", "." ]
python
valid
guaix-ucm/pyemir
emirdrp/processing/bardetect.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/bardetect.py#L110-L120
def simple_prot(x, start): """Find the first peak to the right of start""" # start must b >= 1 for i in range(start,len(x)-1): a,b,c = x[i-1], x[i], x[i+1] if b - a > 0 and b -c >= 0: return i else: return None
[ "def", "simple_prot", "(", "x", ",", "start", ")", ":", "# start must b >= 1", "for", "i", "in", "range", "(", "start", ",", "len", "(", "x", ")", "-", "1", ")", ":", "a", ",", "b", ",", "c", "=", "x", "[", "i", "-", "1", "]", ",", "x", "["...
Find the first peak to the right of start
[ "Find", "the", "first", "peak", "to", "the", "right", "of", "start" ]
python
train
pybel/pybel
src/pybel/canonicalize.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/canonicalize.py#L166-L175
def _set_annotation_to_str(annotation_data: Mapping[str, Mapping[str, bool]], key: str) -> str: """Return a set annotation string.""" value = annotation_data[key] if len(value) == 1: return 'SET {} = "{}"'.format(key, list(value)[0]) x = ('"{}"'.format(v) for v in sorted(value)) return 'SET {} = {{{}}}'.format(key, ', '.join(x))
[ "def", "_set_annotation_to_str", "(", "annotation_data", ":", "Mapping", "[", "str", ",", "Mapping", "[", "str", ",", "bool", "]", "]", ",", "key", ":", "str", ")", "->", "str", ":", "value", "=", "annotation_data", "[", "key", "]", "if", "len", "(", ...
Return a set annotation string.
[ "Return", "a", "set", "annotation", "string", "." ]
python
train
gwpy/gwpy
gwpy/time/__main__.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/time/__main__.py#L35-L63
def main(args=None): """Parse command-line arguments, tconvert inputs, and print """ # define command line arguments parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-V", "--version", action="version", version=__version__, help="show version number and exit") parser.add_argument("-l", "--local", action="store_true", default=False, help="print datetimes in local timezone") parser.add_argument("-f", "--format", type=str, action="store", default=r"%Y-%m-%d %H:%M:%S.%f %Z", help="output datetime format (default: %(default)r)") parser.add_argument("input", help="GPS or datetime string to convert", nargs="*") # parse and convert args = parser.parse_args(args) input_ = " ".join(args.input) output = tconvert(input_) # print (now with timezones!) if isinstance(output, datetime.datetime): output = output.replace(tzinfo=tz.tzutc()) if args.local: output = output.astimezone(tz.tzlocal()) print(output.strftime(args.format)) else: print(output)
[ "def", "main", "(", "args", "=", "None", ")", ":", "# define command line arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "__doc__", ")", "parser", ".", "add_argument", "(", "\"-V\"", ",", "\"--version\"", ",", "action", ...
Parse command-line arguments, tconvert inputs, and print
[ "Parse", "command", "-", "line", "arguments", "tconvert", "inputs", "and", "print" ]
python
train
alerta/python-alerta-client
alertaclient/commands/cmd_version.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_version.py#L11-L18
def cli(ctx, obj): """Show Alerta server and client versions.""" client = obj['client'] click.echo('alerta {}'.format(client.mgmt_status()['version'])) click.echo('alerta client {}'.format(client_version)) click.echo('requests {}'.format(requests_version)) click.echo('click {}'.format(click.__version__)) ctx.exit()
[ "def", "cli", "(", "ctx", ",", "obj", ")", ":", "client", "=", "obj", "[", "'client'", "]", "click", ".", "echo", "(", "'alerta {}'", ".", "format", "(", "client", ".", "mgmt_status", "(", ")", "[", "'version'", "]", ")", ")", "click", ".", "echo",...
Show Alerta server and client versions.
[ "Show", "Alerta", "server", "and", "client", "versions", "." ]
python
train
alpacahq/pipeline-live
pipeline_live/engine.py
https://github.com/alpacahq/pipeline-live/blob/6f42d64354a17e2546ca74f18004454f2019bd83/pipeline_live/engine.py#L146-L180
def _inputs_for_term(term, workspace, graph): """ Compute inputs for the given term. This is mostly complicated by the fact that for each input we store as many rows as will be necessary to serve **any** computation requiring that input. """ offsets = graph.offset out = [] if term.windowed: # If term is windowed, then all input data should be instances of # AdjustedArray. for input_ in term.inputs: adjusted_array = ensure_adjusted_array( workspace[input_], input_.missing_value, ) out.append( adjusted_array.traverse( window_length=term.window_length, offset=offsets[term, input_], ) ) else: # If term is not windowed, input_data may be an AdjustedArray or # np.ndarray. Coerce the former to the latter. for input_ in term.inputs: input_data = ensure_ndarray(workspace[input_]) offset = offsets[term, input_] # OPTIMIZATION: Don't make a copy by doing input_data[0:] if # offset is zero. if offset: input_data = input_data[offset:] out.append(input_data) return out
[ "def", "_inputs_for_term", "(", "term", ",", "workspace", ",", "graph", ")", ":", "offsets", "=", "graph", ".", "offset", "out", "=", "[", "]", "if", "term", ".", "windowed", ":", "# If term is windowed, then all input data should be instances of", "# AdjustedArray....
Compute inputs for the given term. This is mostly complicated by the fact that for each input we store as many rows as will be necessary to serve **any** computation requiring that input.
[ "Compute", "inputs", "for", "the", "given", "term", "." ]
python
train
Chilipp/psyplot
psyplot/project.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/project.py#L2298-L2310
def _scp(p, main=False): """scp version that allows a bit more control over whether the project is a main project or not""" global _current_subproject global _current_project if p is None: mp = project() if main or _current_project is None else \ _current_project _current_subproject = Project(main=mp) elif not main: _current_subproject = p else: _current_project = p
[ "def", "_scp", "(", "p", ",", "main", "=", "False", ")", ":", "global", "_current_subproject", "global", "_current_project", "if", "p", "is", "None", ":", "mp", "=", "project", "(", ")", "if", "main", "or", "_current_project", "is", "None", "else", "_cur...
scp version that allows a bit more control over whether the project is a main project or not
[ "scp", "version", "that", "allows", "a", "bit", "more", "control", "over", "whether", "the", "project", "is", "a", "main", "project", "or", "not" ]
python
train
remram44/usagestats
wsgi/usagestats_server.py
https://github.com/remram44/usagestats/blob/6ffd1a51d81d1b4570916c1594aee6a98089fa71/wsgi/usagestats_server.py#L50-L83
def application(environ, start_response): """WSGI interface. """ def send_response(status, body): if not isinstance(body, bytes): body = body.encode('utf-8') start_response(status, [('Content-Type', 'text/plain'), ('Content-Length', '%d' % len(body))]) return [body] if environ['REQUEST_METHOD'] != 'POST': return send_response('403 Forbidden', "invalid request") # Gets the posted input try: request_body_size = int(environ['CONTENT_LENGTH']) except (KeyError, ValueError): return send_response('400 Bad Request', "invalid content length") if request_body_size > MAX_SIZE: return send_response('403 Forbidden', "report too big") request_body = environ['wsgi.input'].read(request_body_size) # Tries to store response_body = store(request_body, environ.get('REMOTE_ADDR')) if not response_body: status = '200 OK' response_body = "stored" else: status = '501 Server Error' # Sends the response return send_response(status, response_body)
[ "def", "application", "(", "environ", ",", "start_response", ")", ":", "def", "send_response", "(", "status", ",", "body", ")", ":", "if", "not", "isinstance", "(", "body", ",", "bytes", ")", ":", "body", "=", "body", ".", "encode", "(", "'utf-8'", ")"...
WSGI interface.
[ "WSGI", "interface", "." ]
python
train
lappis-unb/salic-ml
src/salicml/metrics/finance/proponent_projects.py
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/metrics/finance/proponent_projects.py#L9-L46
def proponent_projects(pronac, data): """ Checks the CNPJ/CPF of the proponent of project with the given pronac and returns all the projects that have been submitted by this proponent and all projects that have already been analyzed. """ cpf_cnpj = get_cpf_cnpj_by_pronac(pronac) proponent_submitted_projects = {} proponent_analyzed_projects = {} if cpf_cnpj: submitted_projects = get_proponent_submitted_projects(cpf_cnpj) analyzed_projects = get_proponent_analyzed_projects(cpf_cnpj) try: proponent_submitted_projects = { 'number_of_projects': submitted_projects['num_pronacs'], 'pronacs_of_this_proponent': submitted_projects['pronac_list'] } except KeyError: pass try: proponent_analyzed_projects = { 'number_of_projects': analyzed_projects['num_pronacs'], 'pronacs_of_this_proponent': analyzed_projects['pronac_list'] } except KeyError: pass return { 'cpf_cnpj': cpf_cnpj, 'valor': len(proponent_submitted_projects), 'projetos_submetidos': proponent_submitted_projects, 'projetos_analizados': proponent_analyzed_projects, }
[ "def", "proponent_projects", "(", "pronac", ",", "data", ")", ":", "cpf_cnpj", "=", "get_cpf_cnpj_by_pronac", "(", "pronac", ")", "proponent_submitted_projects", "=", "{", "}", "proponent_analyzed_projects", "=", "{", "}", "if", "cpf_cnpj", ":", "submitted_projects"...
Checks the CNPJ/CPF of the proponent of project with the given pronac and returns all the projects that have been submitted by this proponent and all projects that have already been analyzed.
[ "Checks", "the", "CNPJ", "/", "CPF", "of", "the", "proponent", "of", "project", "with", "the", "given", "pronac", "and", "returns", "all", "the", "projects", "that", "have", "been", "submitted", "by", "this", "proponent", "and", "all", "projects", "that", ...
python
train
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/servicebus_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/servicebus_client.py#L104-L133
def get_queue(self, queue_name): """Get a client for a queue entity. :param queue_name: The name of the queue. :type queue_name: str :rtype: ~azure.servicebus.servicebus_client.QueueClient :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the queue is not found. Example: .. literalinclude:: ../examples/test_examples.py :start-after: [START get_queue_client] :end-before: [END get_queue_client] :language: python :dedent: 8 :caption: Get the specific queue client from Service Bus client """ try: queue = self.mgmt_client.get_queue(queue_name) except requests.exceptions.ConnectionError as e: raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e) except AzureServiceBusResourceNotFound: raise ServiceBusResourceNotFound("Specificed queue does not exist.") return QueueClient.from_entity( self._get_host(), queue, shared_access_key_name=self.shared_access_key_name, shared_access_key_value=self.shared_access_key_value, mgmt_client=self.mgmt_client, debug=self.debug)
[ "def", "get_queue", "(", "self", ",", "queue_name", ")", ":", "try", ":", "queue", "=", "self", ".", "mgmt_client", ".", "get_queue", "(", "queue_name", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "raise", "Service...
Get a client for a queue entity. :param queue_name: The name of the queue. :type queue_name: str :rtype: ~azure.servicebus.servicebus_client.QueueClient :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the queue is not found. Example: .. literalinclude:: ../examples/test_examples.py :start-after: [START get_queue_client] :end-before: [END get_queue_client] :language: python :dedent: 8 :caption: Get the specific queue client from Service Bus client
[ "Get", "a", "client", "for", "a", "queue", "entity", "." ]
python
test
twisted/vertex
vertex/tcpdfa.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/tcpdfa.py#L241-L252
def maybeReceiveAck(self, ackPacket): """ Receive an L{ack} or L{synAck} input from the given packet. """ ackPredicate = self.ackPredicate self.ackPredicate = lambda packet: False if ackPacket.syn: # New SYN packets are always news. self.synAck() return if ackPredicate(ackPacket): self.ack()
[ "def", "maybeReceiveAck", "(", "self", ",", "ackPacket", ")", ":", "ackPredicate", "=", "self", ".", "ackPredicate", "self", ".", "ackPredicate", "=", "lambda", "packet", ":", "False", "if", "ackPacket", ".", "syn", ":", "# New SYN packets are always news.", "se...
Receive an L{ack} or L{synAck} input from the given packet.
[ "Receive", "an", "L", "{", "ack", "}", "or", "L", "{", "synAck", "}", "input", "from", "the", "given", "packet", "." ]
python
train
cocaine/cocaine-tools
cocaine/tools/dispatch.py
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1472-L1480
def unicorn_edit(path, **kwargs): """Edit Unicorn node interactively. """ ctx = Context(**kwargs) ctx.timeout = None ctx.execute_action('unicorn:edit', **{ 'unicorn': ctx.repo.create_secure_service('unicorn'), 'path': path, })
[ "def", "unicorn_edit", "(", "path", ",", "*", "*", "kwargs", ")", ":", "ctx", "=", "Context", "(", "*", "*", "kwargs", ")", "ctx", ".", "timeout", "=", "None", "ctx", ".", "execute_action", "(", "'unicorn:edit'", ",", "*", "*", "{", "'unicorn'", ":",...
Edit Unicorn node interactively.
[ "Edit", "Unicorn", "node", "interactively", "." ]
python
train
deep-compute/logagg
logagg/formatters.py
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L273-L304
def basescript(line): ''' >>> import pprint >>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}' >>> output_line1 = basescript(input_line) >>> pprint.pprint(output_line1) {'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py', u'fn': u'start', u'ln': 58, u'name': u'basescript.basescript'}, u'event': u'exited via keyboard interrupt', u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65', u'level': u'warning', u'timestamp': u'2018-02-07T06:37:00.297610Z', u'type': u'log'}, 'event': u'exited via keyboard interrupt', 'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65', 'level': u'warning', 'timestamp': u'2018-02-07T06:37:00.297610Z', 'type': u'log'} ''' log = json.loads(line) return dict( timestamp=log['timestamp'], data=log, id=log['id'], type=log['type'], level=log['level'], event=log['event'] )
[ "def", "basescript", "(", "line", ")", ":", "log", "=", "json", ".", "loads", "(", "line", ")", "return", "dict", "(", "timestamp", "=", "log", "[", "'timestamp'", "]", ",", "data", "=", "log", ",", "id", "=", "log", "[", "'id'", "]", ",", "type"...
>>> import pprint >>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}' >>> output_line1 = basescript(input_line) >>> pprint.pprint(output_line1) {'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py', u'fn': u'start', u'ln': 58, u'name': u'basescript.basescript'}, u'event': u'exited via keyboard interrupt', u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65', u'level': u'warning', u'timestamp': u'2018-02-07T06:37:00.297610Z', u'type': u'log'}, 'event': u'exited via keyboard interrupt', 'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65', 'level': u'warning', 'timestamp': u'2018-02-07T06:37:00.297610Z', 'type': u'log'}
[ ">>>", "import", "pprint", ">>>", "input_line", "=", "{", "level", ":", "warning", "timestamp", ":", "2018", "-", "02", "-", "07T06", ":", "37", ":", "00", ".", "297610Z", "event", ":", "exited", "via", "keyboard", "interrupt", "type", ":", "log", "id"...
python
train
pyblish/pyblish-nuke
pyblish_nuke/lib.py
https://github.com/pyblish/pyblish-nuke/blob/5fbd766774e999e5e3015201094a07a92d800c4f/pyblish_nuke/lib.py#L55-L72
def show(): """Try showing the most desirable GUI This function cycles through the currently registered graphical user interfaces, if any, and presents it to the user. """ parent = None current = QtWidgets.QApplication.activeWindow() while current: parent = current current = parent.parent() window = (_discover_gui() or _show_no_gui)(parent) return window
[ "def", "show", "(", ")", ":", "parent", "=", "None", "current", "=", "QtWidgets", ".", "QApplication", ".", "activeWindow", "(", ")", "while", "current", ":", "parent", "=", "current", "current", "=", "parent", ".", "parent", "(", ")", "window", "=", "...
Try showing the most desirable GUI This function cycles through the currently registered graphical user interfaces, if any, and presents it to the user.
[ "Try", "showing", "the", "most", "desirable", "GUI" ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/events.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/events.py#L23-L33
def query_events(resource_root, query_str=None): """ Search for events. @param query_str: Query string. @return: A list of ApiEvent. """ params = None if query_str: params = dict(query=query_str) return call(resource_root.get, EVENTS_PATH, ApiEventQueryResult, params=params)
[ "def", "query_events", "(", "resource_root", ",", "query_str", "=", "None", ")", ":", "params", "=", "None", "if", "query_str", ":", "params", "=", "dict", "(", "query", "=", "query_str", ")", "return", "call", "(", "resource_root", ".", "get", ",", "EVE...
Search for events. @param query_str: Query string. @return: A list of ApiEvent.
[ "Search", "for", "events", "." ]
python
train
avelino/bottle-auth
bottle_auth/core/auth.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/auth.py#L150-L166
def get_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name. If default is not provided, the argument is considered to be required, and we throw an HTTP 400 exception if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. """ args = self.get_arguments(name, strip=strip) if not args: if default is self._ARG_DEFAULT: raise HTTPError(400, "Missing argument %s" % name) return default return args[-1]
[ "def", "get_argument", "(", "self", ",", "name", ",", "default", "=", "_ARG_DEFAULT", ",", "strip", "=", "True", ")", ":", "args", "=", "self", ".", "get_arguments", "(", "name", ",", "strip", "=", "strip", ")", "if", "not", "args", ":", "if", "defau...
Returns the value of the argument with the given name. If default is not provided, the argument is considered to be required, and we throw an HTTP 400 exception if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode.
[ "Returns", "the", "value", "of", "the", "argument", "with", "the", "given", "name", "." ]
python
test
SBRG/ssbio
ssbio/core/protein.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1438-L1455
def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2): """Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop""" if isinstance(seqprop1, str): seqprop1_id = seqprop1 else: seqprop1_id = seqprop1.id if isinstance(seqprop2, str): seqprop2_id = seqprop2 else: seqprop2_id = seqprop2.id aln_id = '{}_{}'.format(seqprop1_id, seqprop2_id) if self.sequence_alignments.has_id(aln_id): alignment = self.sequence_alignments.get_by_id(aln_id) return alignment else: raise ValueError('{}: sequence alignment not found, please run the alignment first'.format(aln_id))
[ "def", "_get_seqprop_to_seqprop_alignment", "(", "self", ",", "seqprop1", ",", "seqprop2", ")", ":", "if", "isinstance", "(", "seqprop1", ",", "str", ")", ":", "seqprop1_id", "=", "seqprop1", "else", ":", "seqprop1_id", "=", "seqprop1", ".", "id", "if", "isi...
Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop
[ "Return", "the", "alignment", "stored", "in", "self", ".", "sequence_alignments", "given", "a", "seqprop", "+", "another", "seqprop" ]
python
train
klahnakoski/pyLibrary
jx_python/containers/cube.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/containers/cube.py#L260-L279
def forall(self, method): """ TODO: I AM NOT HAPPY THAT THIS WILL NOT WORK WELL WITH WINDOW FUNCTIONS THE parts GIVE NO INDICATION OF NEXT ITEM OR PREVIOUS ITEM LIKE rownum DOES. MAYBE ALGEBRAIC EDGES SHOULD BE LOOPED DIFFERENTLY? ON THE OTHER HAND, MAYBE WINDOW FUNCTIONS ARE RESPONSIBLE FOR THIS COMPLICATION MAR 2015: THE ISSUE IS parts, IT SHOULD BE coord INSTEAD IT IS EXPECTED THE method ACCEPTS (value, coord, cube), WHERE value - VALUE FOUND AT ELEMENT parts - THE ONE PART CORRESPONDING TO EACH EDGE cube - THE WHOLE CUBE, FOR USE IN WINDOW FUNCTIONS """ if not self.is_value: Log.error("Not dealing with this case yet") matrix = self.data.values()[0] parts = [e.domain.partitions for e in self.edges] for c in matrix._all_combos(): method(matrix[c], [parts[i][cc] for i, cc in enumerate(c)], self)
[ "def", "forall", "(", "self", ",", "method", ")", ":", "if", "not", "self", ".", "is_value", ":", "Log", ".", "error", "(", "\"Not dealing with this case yet\"", ")", "matrix", "=", "self", ".", "data", ".", "values", "(", ")", "[", "0", "]", "parts", ...
TODO: I AM NOT HAPPY THAT THIS WILL NOT WORK WELL WITH WINDOW FUNCTIONS THE parts GIVE NO INDICATION OF NEXT ITEM OR PREVIOUS ITEM LIKE rownum DOES. MAYBE ALGEBRAIC EDGES SHOULD BE LOOPED DIFFERENTLY? ON THE OTHER HAND, MAYBE WINDOW FUNCTIONS ARE RESPONSIBLE FOR THIS COMPLICATION MAR 2015: THE ISSUE IS parts, IT SHOULD BE coord INSTEAD IT IS EXPECTED THE method ACCEPTS (value, coord, cube), WHERE value - VALUE FOUND AT ELEMENT parts - THE ONE PART CORRESPONDING TO EACH EDGE cube - THE WHOLE CUBE, FOR USE IN WINDOW FUNCTIONS
[ "TODO", ":", "I", "AM", "NOT", "HAPPY", "THAT", "THIS", "WILL", "NOT", "WORK", "WELL", "WITH", "WINDOW", "FUNCTIONS", "THE", "parts", "GIVE", "NO", "INDICATION", "OF", "NEXT", "ITEM", "OR", "PREVIOUS", "ITEM", "LIKE", "rownum", "DOES", ".", "MAYBE", "ALG...
python
train
totalgood/pugnlp
src/pugnlp/plots.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L291-L293
def save(self, filename): """ save colormap to file""" plt.savefig(filename, fig=self.fig, facecolor='black', edgecolor='black')
[ "def", "save", "(", "self", ",", "filename", ")", ":", "plt", ".", "savefig", "(", "filename", ",", "fig", "=", "self", ".", "fig", ",", "facecolor", "=", "'black'", ",", "edgecolor", "=", "'black'", ")" ]
save colormap to file
[ "save", "colormap", "to", "file" ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/ui.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/ui.py#L69-L81
def updateLodState(self, verbose=None): """ Switch between full graphics details <---> fast rendering mode. Returns a success message. :param verbose: print more :returns: 200: successful operation """ response=api(url=self.___url+'ui/lod', method="PUT", verbose=verbose) return response
[ "def", "updateLodState", "(", "self", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'ui/lod'", ",", "method", "=", "\"PUT\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Switch between full graphics details <---> fast rendering mode. Returns a success message. :param verbose: print more :returns: 200: successful operation
[ "Switch", "between", "full", "graphics", "details", "<", "---", ">", "fast", "rendering", "mode", ".", "Returns", "a", "success", "message", "." ]
python
train
KarchinLab/probabilistic2020
prob2020/python/gene_sequence.py
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/gene_sequence.py#L34-L60
def add_germline_variants(self, germline_nucs, coding_pos): """Add potential germline variants into the nucleotide sequence. Sequenced individuals may potentially have a SNP at a somatic mutation position. Therefore they may differ from the reference genome. This method updates the gene germline gene sequence to match the actual individual. Parameters ---------- germline_nucs : list of str list of DNA nucleotides containing the germline letter coding_pos : int 0-based nucleotide position in coding sequence NOTE: the self.exon_seq attribute is updated, no return value """ if len(germline_nucs) != len(coding_pos): raise ValueError('Each germline nucleotide should have a coding position') es = list(self.exon_seq) for i in range(len(germline_nucs)): gl_nuc, cpos = germline_nucs[i].upper(), coding_pos[i] if not utils.is_valid_nuc(gl_nuc): raise ValueError('{0} is not a valid nucleotide'.format(gl_nuc)) if cpos >= 0: es[cpos] = gl_nuc self.exon_seq = ''.join(es)
[ "def", "add_germline_variants", "(", "self", ",", "germline_nucs", ",", "coding_pos", ")", ":", "if", "len", "(", "germline_nucs", ")", "!=", "len", "(", "coding_pos", ")", ":", "raise", "ValueError", "(", "'Each germline nucleotide should have a coding position'", ...
Add potential germline variants into the nucleotide sequence. Sequenced individuals may potentially have a SNP at a somatic mutation position. Therefore they may differ from the reference genome. This method updates the gene germline gene sequence to match the actual individual. Parameters ---------- germline_nucs : list of str list of DNA nucleotides containing the germline letter coding_pos : int 0-based nucleotide position in coding sequence NOTE: the self.exon_seq attribute is updated, no return value
[ "Add", "potential", "germline", "variants", "into", "the", "nucleotide", "sequence", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L4060-L4083
def ekfind(query, lenout=_default_len_out): """ Find E-kernel data that satisfy a set of constraints. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekfind_c.html :param query: Query specifying data to be found. :type query: str :param lenout: Declared length of output error message string. :type lenout: int :return: Number of matching rows, Flag indicating whether query parsed correctly, Parse error description. :rtype: tuple """ query = stypes.stringToCharP(query) lenout = ctypes.c_int(lenout) nmrows = ctypes.c_int() error = ctypes.c_int() errmsg = stypes.stringToCharP(lenout) libspice.ekfind_c(query, lenout, ctypes.byref(nmrows), ctypes.byref(error), errmsg) return nmrows.value, error.value, stypes.toPythonString(errmsg)
[ "def", "ekfind", "(", "query", ",", "lenout", "=", "_default_len_out", ")", ":", "query", "=", "stypes", ".", "stringToCharP", "(", "query", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "lenout", ")", "nmrows", "=", "ctypes", ".", "c_int", "(", ")"...
Find E-kernel data that satisfy a set of constraints. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekfind_c.html :param query: Query specifying data to be found. :type query: str :param lenout: Declared length of output error message string. :type lenout: int :return: Number of matching rows, Flag indicating whether query parsed correctly, Parse error description. :rtype: tuple
[ "Find", "E", "-", "kernel", "data", "that", "satisfy", "a", "set", "of", "constraints", "." ]
python
train
mwickert/scikit-dsp-comm
sk_dsp_comm/digitalcom.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/digitalcom.py#L619-L686
def QPSK_BEP(tx_data,rx_data,Ncorr = 1024,Ntransient = 0): """ Count bit errors between a transmitted and received QPSK signal. Time delay between streams is detected as well as ambiquity resolution due to carrier phase lock offsets of :math:`k*\\frac{\\pi}{4}`, k=0,1,2,3. The ndarray sdata is Tx +/-1 symbols as complex numbers I + j*Q. The ndarray data is Rx +/-1 symbols as complex numbers I + j*Q. Note: Ncorr needs to be even """ #Remove Ntransient symbols tx_data = tx_data[Ntransient:] rx_data = rx_data[Ntransient:] #Correlate the first Ncorr symbols at four possible phase rotations R0 = np.fft.ifft(np.fft.fft(rx_data,Ncorr)* np.conj(np.fft.fft(tx_data,Ncorr))) R1 = np.fft.ifft(np.fft.fft(1j*rx_data,Ncorr)* np.conj(np.fft.fft(tx_data,Ncorr))) R2 = np.fft.ifft(np.fft.fft(-1*rx_data,Ncorr)* np.conj(np.fft.fft(tx_data,Ncorr))) R3 = np.fft.ifft(np.fft.fft(-1j*rx_data,Ncorr)* np.conj(np.fft.fft(tx_data,Ncorr))) #Place the zero lag value in the center of the array R0 = np.fft.fftshift(R0) R1 = np.fft.fftshift(R1) R2 = np.fft.fftshift(R2) R3 = np.fft.fftshift(R3) R0max = np.max(R0.real) R1max = np.max(R1.real) R2max = np.max(R2.real) R3max = np.max(R3.real) R = np.array([R0max,R1max,R2max,R3max]) Rmax = np.max(R) kphase_max = np.where(R == Rmax)[0] kmax = kphase_max[0] #Correlation lag value is zero at the center of the array if kmax == 0: lagmax = np.where(R0.real == Rmax)[0] - Ncorr/2 elif kmax == 1: lagmax = np.where(R1.real == Rmax)[0] - Ncorr/2 elif kmax == 2: lagmax = np.where(R2.real == Rmax)[0] - Ncorr/2 elif kmax == 3: lagmax = np.where(R3.real == Rmax)[0] - Ncorr/2 taumax = lagmax[0] print('kmax = %d, taumax = %d' % (kmax, taumax)) # Count bit and symbol errors over the entire input ndarrays # Begin by making tx and rx length equal and apply phase rotation to rx if taumax < 0: tx_data = tx_data[-taumax:] tx_data = tx_data[:min(len(tx_data),len(rx_data))] rx_data = 1j**kmax*rx_data[:len(tx_data)] else: rx_data = 1j**kmax*rx_data[taumax:] rx_data = rx_data[:min(len(tx_data),len(rx_data))] tx_data = tx_data[:len(rx_data)] #Convert to 0's and 1's S_count = len(tx_data) tx_I = np.int16((tx_data.real + 1)/2) tx_Q = np.int16((tx_data.imag + 1)/2) rx_I = np.int16((rx_data.real + 1)/2) rx_Q = np.int16((rx_data.imag + 1)/2) I_errors = tx_I ^ rx_I Q_errors = tx_Q ^ rx_Q #A symbol errors occurs when I or Q or both are in error S_errors = I_errors | Q_errors #return 0 return S_count,np.sum(I_errors),np.sum(Q_errors),np.sum(S_errors)
[ "def", "QPSK_BEP", "(", "tx_data", ",", "rx_data", ",", "Ncorr", "=", "1024", ",", "Ntransient", "=", "0", ")", ":", "#Remove Ntransient symbols", "tx_data", "=", "tx_data", "[", "Ntransient", ":", "]", "rx_data", "=", "rx_data", "[", "Ntransient", ":", "]...
Count bit errors between a transmitted and received QPSK signal. Time delay between streams is detected as well as ambiquity resolution due to carrier phase lock offsets of :math:`k*\\frac{\\pi}{4}`, k=0,1,2,3. The ndarray sdata is Tx +/-1 symbols as complex numbers I + j*Q. The ndarray data is Rx +/-1 symbols as complex numbers I + j*Q. Note: Ncorr needs to be even
[ "Count", "bit", "errors", "between", "a", "transmitted", "and", "received", "QPSK", "signal", ".", "Time", "delay", "between", "streams", "is", "detected", "as", "well", "as", "ambiquity", "resolution", "due", "to", "carrier", "phase", "lock", "offsets", "of",...
python
valid
raphaelm/python-fints
fints/parser.py
https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/parser.py#L145-L154
def parse_message(self, data: bytes) -> SegmentSequence: """Takes a FinTS 3.0 message as byte array, and returns a parsed segment sequence""" if isinstance(data, bytes): data = self.explode_segments(data) message = SegmentSequence() for segment in data: seg = self.parse_segment(segment) message.segments.append(seg) return message
[ "def", "parse_message", "(", "self", ",", "data", ":", "bytes", ")", "->", "SegmentSequence", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "data", "=", "self", ".", "explode_segments", "(", "data", ")", "message", "=", "SegmentSequence", ...
Takes a FinTS 3.0 message as byte array, and returns a parsed segment sequence
[ "Takes", "a", "FinTS", "3", ".", "0", "message", "as", "byte", "array", "and", "returns", "a", "parsed", "segment", "sequence" ]
python
train
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L65-L157
def replace_fields(text, context, autoescape=None, errors='inline'): """ Allow simple field replacements, using the python str.format() syntax. When a string is passed that is tagged with :func:`~django.utils.safestring.mark_safe`, the context variables will be escaped before replacement. This function is used instead of lazily using Django templates, which can also the {% load %} stuff and {% include %} things. """ raise_errors = errors == 'raise' ignore_errors = errors == 'ignore' inline_errors = errors == 'inline' if autoescape is None: # When passing a real template context, use it's autoescape setting. # Otherwise, default to true. autoescape = getattr(context, 'autoescape', True) is_safe_string = isinstance(text, SafeData) if is_safe_string and autoescape: escape_function = conditional_escape escape_error = lambda x: u"<span style='color:red;'>{0}</span>".format(x) else: escape_function = force_text escape_error = six.text_type # Using str.format() may raise a KeyError when some fields are not provided. # Instead, simulate its' behavior to make sure all items that were found will be replaced. start = 0 new_text = [] for match in RE_FORMAT.finditer(text): new_text.append(text[start:match.start()]) start = match.end() # See if the element was found key = match.group('var') try: value = context[key] except KeyError: logger.debug("Missing key %s in email template %s!", key, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!missing {0}!!".format(key))) continue # See if further processing is needed. attr = match.group('attr') if attr: try: value = getattr(value, attr) except AttributeError: logger.debug("Missing attribute %s in email template %s!", attr, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!invalid attribute {0}.{1}!!".format(key, attr))) continue format = match.group('format') if format: try: template = u"{0" + format + "}" value = template.format(value) except ValueError: logger.debug("Invalid format %s in email template %s!", format, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!invalid format {0}!!".format(format))) continue else: value = escape_function(value) # Add the value new_text.append(value) # Add remainder, and join new_text.append(text[start:]) new_text = u"".join(new_text) # Convert back to safestring if it was passed that way if is_safe_string: return mark_safe(new_text) else: return new_text
[ "def", "replace_fields", "(", "text", ",", "context", ",", "autoescape", "=", "None", ",", "errors", "=", "'inline'", ")", ":", "raise_errors", "=", "errors", "==", "'raise'", "ignore_errors", "=", "errors", "==", "'ignore'", "inline_errors", "=", "errors", ...
Allow simple field replacements, using the python str.format() syntax. When a string is passed that is tagged with :func:`~django.utils.safestring.mark_safe`, the context variables will be escaped before replacement. This function is used instead of lazily using Django templates, which can also the {% load %} stuff and {% include %} things.
[ "Allow", "simple", "field", "replacements", "using", "the", "python", "str", ".", "format", "()", "syntax", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L412-L434
def validate_value_string (f, value_string): """ Checks that value-string is a valid value-string for the given feature. """ assert isinstance(f, Feature) assert isinstance(value_string, basestring) if f.free or value_string in f.values: return values = [value_string] if f.subfeatures: if not value_string in f.values and \ not value_string in f.subfeatures: values = value_string.split('-') # An empty value is allowed for optional features if not values[0] in f.values and \ (values[0] or not f.optional): raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values)) for v in values [1:]: # this will validate any subfeature values in value-string implied_subfeature(f, v, values[0])
[ "def", "validate_value_string", "(", "f", ",", "value_string", ")", ":", "assert", "isinstance", "(", "f", ",", "Feature", ")", "assert", "isinstance", "(", "value_string", ",", "basestring", ")", "if", "f", ".", "free", "or", "value_string", "in", "f", "....
Checks that value-string is a valid value-string for the given feature.
[ "Checks", "that", "value", "-", "string", "is", "a", "valid", "value", "-", "string", "for", "the", "given", "feature", "." ]
python
train
Diaoul/subliminal
subliminal/core.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L90-L122
def list_subtitles_provider(self, provider, video, languages): """List subtitles with a single provider. The video and languages are checked against the provider. :param str provider: name of the provider. :param video: video to list subtitles for. :type video: :class:`~subliminal.video.Video` :param languages: languages to search for. :type languages: set of :class:`~babelfish.language.Language` :return: found subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` or None """ # check video validity if not provider_manager[provider].plugin.check(video): logger.info('Skipping provider %r: not a valid video', provider) return [] # check supported languages provider_languages = provider_manager[provider].plugin.languages & languages if not provider_languages: logger.info('Skipping provider %r: no language to search for', provider) return [] # list subtitles logger.info('Listing subtitles with provider %r and languages %r', provider, provider_languages) try: return self[provider].list_subtitles(video, provider_languages) except (requests.Timeout, socket.timeout): logger.error('Provider %r timed out', provider) except: logger.exception('Unexpected error in provider %r', provider)
[ "def", "list_subtitles_provider", "(", "self", ",", "provider", ",", "video", ",", "languages", ")", ":", "# check video validity", "if", "not", "provider_manager", "[", "provider", "]", ".", "plugin", ".", "check", "(", "video", ")", ":", "logger", ".", "in...
List subtitles with a single provider. The video and languages are checked against the provider. :param str provider: name of the provider. :param video: video to list subtitles for. :type video: :class:`~subliminal.video.Video` :param languages: languages to search for. :type languages: set of :class:`~babelfish.language.Language` :return: found subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` or None
[ "List", "subtitles", "with", "a", "single", "provider", "." ]
python
train
MIT-LCP/wfdb-python
wfdb/io/_signal.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1798-L1824
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None): """ Infer the length of a signal from a dat file. Parameters ---------- file_name : str Name of the dat file fmt : str WFDB fmt of the dat file n_sig : int Number of signals contained in the dat file Notes ----- sig_len * n_sig * bytes_per_sample == file_size """ if pb_dir is None: file_size = os.path.getsize(os.path.join(dir_name, file_name)) else: file_size = download._remote_file_size(file_name=file_name, pb_dir=pb_dir) sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig)) return sig_len
[ "def", "_infer_sig_len", "(", "file_name", ",", "fmt", ",", "n_sig", ",", "dir_name", ",", "pb_dir", "=", "None", ")", ":", "if", "pb_dir", "is", "None", ":", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "os", ".", "path", ".", "join", ...
Infer the length of a signal from a dat file. Parameters ---------- file_name : str Name of the dat file fmt : str WFDB fmt of the dat file n_sig : int Number of signals contained in the dat file Notes ----- sig_len * n_sig * bytes_per_sample == file_size
[ "Infer", "the", "length", "of", "a", "signal", "from", "a", "dat", "file", "." ]
python
train
fermiPy/fermipy
fermipy/scripts/coadd.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/coadd.py#L12-L37
def main(): """ Main function for command line usage """ usage = "usage: %(prog)s [options] " description = "Merge a set of Fermi-LAT files." parser = argparse.ArgumentParser(usage=usage, description=description) parser.add_argument('-o', '--output', default=None, type=str, help='Output file.') parser.add_argument('--clobber', default=False, action='store_true', help='Overwrite output file.') parser.add_argument('files', nargs='+', default=None, help='List of input files.') args = parser.parse_args() proj, f, hdu = fits_utils.read_projection_from_fits(args.files[0]) if isinstance(proj, WCS): hdulist = merge_utils.merge_wcs_counts_cubes(args.files) elif isinstance(proj, HPX): hdulist = merge_utils.merge_hpx_counts_cubes(args.files) else: raise TypeError("Could not read projection from file %s" % args.files[0]) if args.output: hdulist.writeto(args.output, clobber=args.clobber, output_verify='silentfix')
[ "def", "main", "(", ")", ":", "usage", "=", "\"usage: %(prog)s [options] \"", "description", "=", "\"Merge a set of Fermi-LAT files.\"", "parser", "=", "argparse", ".", "ArgumentParser", "(", "usage", "=", "usage", ",", "description", "=", "description", ")", "parse...
Main function for command line usage
[ "Main", "function", "for", "command", "line", "usage" ]
python
train
proycon/pynlpl
pynlpl/common.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/common.py#L98-L136
def log(msg, **kwargs): """Generic log method. Will prepend timestamp. Keyword arguments: system - Name of the system/module indent - Integer denoting the desired level of indentation streams - List of streams to output to stream - Stream to output to (singleton version of streams) """ if 'debug' in kwargs: if 'currentdebug' in kwargs: if kwargs['currentdebug'] < kwargs['debug']: return False else: return False #no currentdebug passed, assuming no debug mode and thus skipping message s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] " if 'system' in kwargs: s += "[" + system + "] " if 'indent' in kwargs: s += ("\t" * int(kwargs['indent'])) s += u(msg) if s[-1] != '\n': s += '\n' if 'streams' in kwargs: streams = kwargs['streams'] elif 'stream' in kwargs: streams = [kwargs['stream']] else: streams = [stderr] for stream in streams: stream.write(s) return s
[ "def", "log", "(", "msg", ",", "*", "*", "kwargs", ")", ":", "if", "'debug'", "in", "kwargs", ":", "if", "'currentdebug'", "in", "kwargs", ":", "if", "kwargs", "[", "'currentdebug'", "]", "<", "kwargs", "[", "'debug'", "]", ":", "return", "False", "e...
Generic log method. Will prepend timestamp. Keyword arguments: system - Name of the system/module indent - Integer denoting the desired level of indentation streams - List of streams to output to stream - Stream to output to (singleton version of streams)
[ "Generic", "log", "method", ".", "Will", "prepend", "timestamp", "." ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L1532-L1557
def _string_from_ip_int(self, ip_int=None): """Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones. """ if not ip_int and ip_int != 0: ip_int = int(self._ip) if ip_int > self._ALL_ONES: raise ValueError('IPv6 address is too large') hex_str = '%032x' % ip_int hextets = [] for x in range(0, 32, 4): hextets.append('%x' % int(hex_str[x:x+4], 16)) hextets = self._compress_hextets(hextets) return ':'.join(hextets)
[ "def", "_string_from_ip_int", "(", "self", ",", "ip_int", "=", "None", ")", ":", "if", "not", "ip_int", "and", "ip_int", "!=", "0", ":", "ip_int", "=", "int", "(", "self", ".", "_ip", ")", "if", "ip_int", ">", "self", ".", "_ALL_ONES", ":", "raise", ...
Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones.
[ "Turns", "a", "128", "-", "bit", "integer", "into", "hexadecimal", "notation", "." ]
python
train
ArchiveTeam/wpull
wpull/network/dns.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/dns.py#L156-L238
def resolve(self, host: str) -> ResolveResult: '''Resolve hostname. Args: host: Hostname. Returns: Resolved IP addresses. Raises: DNSNotFound if the hostname could not be resolved or NetworkError if there was an error connecting to DNS servers. Coroutine. ''' _logger.debug(__('Lookup address {0}.', host)) try: host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host ) or host except HookDisconnected: pass cache_key = (host, self._family) if self._cache and cache_key in self._cache: resolve_result = self._cache[cache_key] _logger.debug(__('Return by cache {0}.', resolve_result)) if self._rotate: resolve_result.rotate() return resolve_result address_infos = [] dns_infos = [] if not self.dns_python_enabled: families = () elif self._family == IPFamilyPreference.any: families = (socket.AF_INET, socket.AF_INET6) elif self._family == IPFamilyPreference.ipv4_only: families = (socket.AF_INET, ) else: families = (socket.AF_INET6, ) for family in families: datetime_now = datetime.datetime.utcnow() try: answer = yield from self._query_dns(host, family) except DNSNotFound: continue else: dns_infos.append(DNSInfo(datetime_now, answer.response.answer)) address_infos.extend(self._convert_dns_answer(answer)) if not address_infos: # Maybe the address is defined in hosts file or mDNS if self._family == IPFamilyPreference.any: family = socket.AF_UNSPEC elif self._family == IPFamilyPreference.ipv4_only: family = socket.AF_INET else: family = socket.AF_INET6 results = yield from self._getaddrinfo(host, family) address_infos.extend(self._convert_addrinfo(results)) _logger.debug(__('Resolved addresses: {0}.', address_infos)) resolve_result = ResolveResult(address_infos, dns_infos) if self._cache: self._cache[cache_key] = resolve_result self.event_dispatcher.notify(PluginFunctions.resolve_dns_result, host, resolve_result) if self._rotate: resolve_result.shuffle() return resolve_result
[ "def", "resolve", "(", "self", ",", "host", ":", "str", ")", "->", "ResolveResult", ":", "_logger", ".", "debug", "(", "__", "(", "'Lookup address {0}.'", ",", "host", ")", ")", "try", ":", "host", "=", "self", ".", "hook_dispatcher", ".", "call", "(",...
Resolve hostname. Args: host: Hostname. Returns: Resolved IP addresses. Raises: DNSNotFound if the hostname could not be resolved or NetworkError if there was an error connecting to DNS servers. Coroutine.
[ "Resolve", "hostname", "." ]
python
train
ffalcinelli/pydivert
pydivert/packet/__init__.py
https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L186-L193
def icmpv6(self): """ - An ICMPv6Header instance, if the packet is valid ICMPv6. - None, otherwise. """ ipproto, proto_start = self.protocol if ipproto == Protocol.ICMPV6: return ICMPv6Header(self, proto_start)
[ "def", "icmpv6", "(", "self", ")", ":", "ipproto", ",", "proto_start", "=", "self", ".", "protocol", "if", "ipproto", "==", "Protocol", ".", "ICMPV6", ":", "return", "ICMPv6Header", "(", "self", ",", "proto_start", ")" ]
- An ICMPv6Header instance, if the packet is valid ICMPv6. - None, otherwise.
[ "-", "An", "ICMPv6Header", "instance", "if", "the", "packet", "is", "valid", "ICMPv6", ".", "-", "None", "otherwise", "." ]
python
train
numenta/htmresearch
htmresearch/algorithms/TM.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/TM.py#L2363-L2413
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0): """Free up some synapses in this segment. We always free up inactive synapses (lowest permanence freed up first) before we start to free up active ones. @param numToFree number of synapses to free up @param inactiveSynapseIndices list of the inactive synapse indices. """ # Make sure numToFree isn't larger than the total number of syns we have assert (numToFree <= len(self.syns)) if (verbosity >= 4): print "\nIn PY freeNSynapses with numToFree =", numToFree, print "inactiveSynapseIndices =", for i in inactiveSynapseIndices: print self.syns[i][0:2], print # Remove the lowest perm inactive synapses first if len(inactiveSynapseIndices) > 0: perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices]) candidates = numpy.array(inactiveSynapseIndices)[ perms.argsort()[0:numToFree]] candidates = list(candidates) else: candidates = [] # Do we need more? if so, remove the lowest perm active synapses too if len(candidates) < numToFree: activeSynIndices = [i for i in xrange(len(self.syns)) if i not in inactiveSynapseIndices] perms = numpy.array([self.syns[i][2] for i in activeSynIndices]) moreToFree = numToFree - len(candidates) moreCandidates = numpy.array(activeSynIndices)[ perms.argsort()[0:moreToFree]] candidates += list(moreCandidates) if verbosity >= 4: print "Deleting %d synapses from segment to make room for new ones:" % ( len(candidates)), candidates print "BEFORE:", self.printSegment() # Free up all the candidates now synsToDelete = [self.syns[i] for i in candidates] for syn in synsToDelete: self.syns.remove(syn) if verbosity >= 4: print "AFTER:", self.printSegment()
[ "def", "freeNSynapses", "(", "self", ",", "numToFree", ",", "inactiveSynapseIndices", ",", "verbosity", "=", "0", ")", ":", "# Make sure numToFree isn't larger than the total number of syns we have", "assert", "(", "numToFree", "<=", "len", "(", "self", ".", "syns", "...
Free up some synapses in this segment. We always free up inactive synapses (lowest permanence freed up first) before we start to free up active ones. @param numToFree number of synapses to free up @param inactiveSynapseIndices list of the inactive synapse indices.
[ "Free", "up", "some", "synapses", "in", "this", "segment", ".", "We", "always", "free", "up", "inactive", "synapses", "(", "lowest", "permanence", "freed", "up", "first", ")", "before", "we", "start", "to", "free", "up", "active", "ones", "." ]
python
train
ucbvislab/radiotool
radiotool/composer/song.py
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/song.py#L25-L43
def analysis(self): """Get musical analysis of the song using the librosa library """ if self._analysis is not None: return self._analysis if self.cache_dir is not None: path = os.path.join(self.cache_dir, self.checksum) try: if self.refresh_cache: raise IOError with open(path + '.pickle', 'rb') as pickle_file: self._analysis = pickle.load(pickle_file) except IOError: self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate) with open(path + '.pickle', 'wb') as pickle_file: pickle.dump(self._analysis, pickle_file, pickle.HIGHEST_PROTOCOL) else: self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate) return self._analysis
[ "def", "analysis", "(", "self", ")", ":", "if", "self", ".", "_analysis", "is", "not", "None", ":", "return", "self", ".", "_analysis", "if", "self", ".", "cache_dir", "is", "not", "None", ":", "path", "=", "os", ".", "path", ".", "join", "(", "sel...
Get musical analysis of the song using the librosa library
[ "Get", "musical", "analysis", "of", "the", "song", "using", "the", "librosa", "library" ]
python
train
mwgielen/jackal
jackal/scripts/relaying.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L179-L192
def get_interface_name(): """ Returns the interface name of the first not link_local and not loopback interface. """ interface_name = '' interfaces = psutil.net_if_addrs() for name, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): interface_name = name break return interface_name
[ "def", "get_interface_name", "(", ")", ":", "interface_name", "=", "''", "interfaces", "=", "psutil", ".", "net_if_addrs", "(", ")", "for", "name", ",", "details", "in", "interfaces", ".", "items", "(", ")", ":", "for", "detail", "in", "details", ":", "i...
Returns the interface name of the first not link_local and not loopback interface.
[ "Returns", "the", "interface", "name", "of", "the", "first", "not", "link_local", "and", "not", "loopback", "interface", "." ]
python
valid
nschloe/matplotlib2tikz
matplotlib2tikz/quadmesh.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/quadmesh.py#L8-L66
def draw_quadmesh(data, obj): """Returns the PGFPlots code for an graphics environment holding a rendering of the object. """ content = [] # Generate file name for current object filename, rel_filepath = files.new_filename(data, "img", ".png") # Get the dpi for rendering and store the original dpi of the figure dpi = data["dpi"] fig_dpi = obj.figure.get_dpi() obj.figure.set_dpi(dpi) # Render the object and save as png file from matplotlib.backends.backend_agg import RendererAgg cbox = obj.get_clip_box() width = int(round(cbox.extents[2])) height = int(round(cbox.extents[3])) ren = RendererAgg(width, height, dpi) obj.draw(ren) # Generate a image from the render buffer image = Image.frombuffer( "RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1 ) # Crop the image to the actual content (removing the the regions otherwise # used for axes, etc.) # 'image.crop' expects the crop box to specify the left, upper, right, and # lower pixel. 'cbox.extents' gives the left, lower, right, and upper # pixel. box = ( int(round(cbox.extents[0])), 0, int(round(cbox.extents[2])), int(round(cbox.extents[3] - cbox.extents[1])), ) cropped = image.crop(box) cropped.save(filename) # Restore the original dpi of the figure obj.figure.set_dpi(fig_dpi) # write the corresponding information to the TikZ file extent = obj.axes.get_xlim() + obj.axes.get_ylim() # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
[ "def", "draw_quadmesh", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "# Generate file name for current object", "filename", ",", "rel_filepath", "=", "files", ".", "new_filename", "(", "data", ",", "\"img\"", ",", "\".png\"", ")", "# Get the dpi ...
Returns the PGFPlots code for an graphics environment holding a rendering of the object.
[ "Returns", "the", "PGFPlots", "code", "for", "an", "graphics", "environment", "holding", "a", "rendering", "of", "the", "object", "." ]
python
train
bcb/jsonrpcserver
jsonrpcserver/dispatcher.py
https://github.com/bcb/jsonrpcserver/blob/26bb70e868f81691816cabfc4b60a83428842b2f/jsonrpcserver/dispatcher.py#L79-L81
def log_response(response: str, trim_log_values: bool = False, **kwargs: Any) -> None: """Log a response""" return log_(response, response_logger, logging.INFO, trim=trim_log_values, **kwargs)
[ "def", "log_response", "(", "response", ":", "str", ",", "trim_log_values", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "None", ":", "return", "log_", "(", "response", ",", "response_logger", ",", "logging", ".", "INFO", ...
Log a response
[ "Log", "a", "response" ]
python
train
numenta/htmresearch
htmresearch/frameworks/sp_paper/sp_metrics.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L152-L167
def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42): """ Generate a set of random SDR's @param numSDR: @param nDim: @param numActiveInputBits: """ randomSDRs = np.zeros((numSDR, numDims), dtype=uintType) indices = np.array(range(numDims)) np.random.seed(seed) for i in range(numSDR): randomIndices = np.random.permutation(indices) activeBits = randomIndices[:numActiveInputBits] randomSDRs[i, activeBits] = 1 return randomSDRs
[ "def", "generateRandomSDR", "(", "numSDR", ",", "numDims", ",", "numActiveInputBits", ",", "seed", "=", "42", ")", ":", "randomSDRs", "=", "np", ".", "zeros", "(", "(", "numSDR", ",", "numDims", ")", ",", "dtype", "=", "uintType", ")", "indices", "=", ...
Generate a set of random SDR's @param numSDR: @param nDim: @param numActiveInputBits:
[ "Generate", "a", "set", "of", "random", "SDR", "s" ]
python
train
ArchiveTeam/wpull
wpull/stats.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/stats.py#L56-L66
def increment(self, size: int): '''Increment the number of files downloaded. Args: size: The size of the file ''' assert size >= 0, size self.files += 1 self.size += size self.bandwidth_meter.feed(size)
[ "def", "increment", "(", "self", ",", "size", ":", "int", ")", ":", "assert", "size", ">=", "0", ",", "size", "self", ".", "files", "+=", "1", "self", ".", "size", "+=", "size", "self", ".", "bandwidth_meter", ".", "feed", "(", "size", ")" ]
Increment the number of files downloaded. Args: size: The size of the file
[ "Increment", "the", "number", "of", "files", "downloaded", "." ]
python
train
neherlab/treetime
treetime/treeanc.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeanc.py#L1621-L1632
def _store_compressed_sequence_pairs(self): """ Traverse the tree, and for each node store the compressed sequence pair. **Note** sequence reconstruction should be performed prior to calling this method. """ self.logger("TreeAnc._store_compressed_sequence_pairs...",2) for node in self.tree.find_clades(): if node.up is None: continue self._store_compressed_sequence_to_node(node) self.logger("TreeAnc._store_compressed_sequence_pairs...done",3)
[ "def", "_store_compressed_sequence_pairs", "(", "self", ")", ":", "self", ".", "logger", "(", "\"TreeAnc._store_compressed_sequence_pairs...\"", ",", "2", ")", "for", "node", "in", "self", ".", "tree", ".", "find_clades", "(", ")", ":", "if", "node", ".", "up"...
Traverse the tree, and for each node store the compressed sequence pair. **Note** sequence reconstruction should be performed prior to calling this method.
[ "Traverse", "the", "tree", "and", "for", "each", "node", "store", "the", "compressed", "sequence", "pair", ".", "**", "Note", "**", "sequence", "reconstruction", "should", "be", "performed", "prior", "to", "calling", "this", "method", "." ]
python
test
ibis-project/ibis
ibis/expr/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1300-L1330
def quantile(arg, quantile, interpolation='linear'): """ Return value at the given quantile, a la numpy.percentile. Parameters ---------- quantile : float/int or array-like 0 <= quantile <= 1, the quantile(s) to compute interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- quantile if scalar input, scalar type, same as input if array input, list of scalar type """ if isinstance(quantile, collections.abc.Sequence): op = ops.MultiQuantile(arg, quantile, interpolation) else: op = ops.Quantile(arg, quantile, interpolation) return op.to_expr()
[ "def", "quantile", "(", "arg", ",", "quantile", ",", "interpolation", "=", "'linear'", ")", ":", "if", "isinstance", "(", "quantile", ",", "collections", ".", "abc", ".", "Sequence", ")", ":", "op", "=", "ops", ".", "MultiQuantile", "(", "arg", ",", "q...
Return value at the given quantile, a la numpy.percentile. Parameters ---------- quantile : float/int or array-like 0 <= quantile <= 1, the quantile(s) to compute interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- quantile if scalar input, scalar type, same as input if array input, list of scalar type
[ "Return", "value", "at", "the", "given", "quantile", "a", "la", "numpy", ".", "percentile", "." ]
python
train
stevearc/dynamo3
dynamo3/connection.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L1099-L1178
def query(self, tablename, attributes=None, consistent=False, count=False, index=None, limit=None, desc=False, return_capacity=None, filter=None, filter_or=False, exclusive_start_key=None, **kwargs): """ Perform an index query on a table This uses the older version of the DynamoDB API. See also: :meth:`~.query2`. Parameters ---------- tablename : str Name of the table to query attributes : list If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) count : bool, optional If True, return a count of matched items instead of the items themselves (default False) index : str, optional The name of the index to query limit : int, optional Maximum number of items to return desc : bool, optional If True, return items in descending order (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : dict, optional Query arguments. Same format as **kwargs, but these arguments filter the results on the server before they are returned. They will NOT use an index, as that is what the **kwargs are for. filter_or : bool, optional If True, multiple filter args will be OR'd together. If False, they will be AND'd together. (default False) exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional Query arguments (examples below) Examples -------- You may pass in constraints using the Django-style '__' syntax. For example: .. code-block:: python connection.query('mytable', foo__eq=5) connection.query('mytable', foo__eq=5, bar__lt=22) connection.query('mytable', foo__eq=5, bar__between=(1, 10)) """ keywords = { 'TableName': tablename, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), 'ConsistentRead': consistent, 'ScanIndexForward': not desc, 'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs), } if attributes is not None: keywords['AttributesToGet'] = attributes if index is not None: keywords['IndexName'] = index if filter is not None: if len(filter) > 1: keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND' keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer, filter) if exclusive_start_key is not None: keywords['ExclusiveStartKey'] = \ self.dynamizer.maybe_encode_keys(exclusive_start_key) if not isinstance(limit, Limit): limit = Limit(limit) if count: keywords['Select'] = COUNT return self._count('query', limit, keywords) else: return ResultSet(self, limit, 'query', **keywords)
[ "def", "query", "(", "self", ",", "tablename", ",", "attributes", "=", "None", ",", "consistent", "=", "False", ",", "count", "=", "False", ",", "index", "=", "None", ",", "limit", "=", "None", ",", "desc", "=", "False", ",", "return_capacity", "=", ...
Perform an index query on a table This uses the older version of the DynamoDB API. See also: :meth:`~.query2`. Parameters ---------- tablename : str Name of the table to query attributes : list If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) count : bool, optional If True, return a count of matched items instead of the items themselves (default False) index : str, optional The name of the index to query limit : int, optional Maximum number of items to return desc : bool, optional If True, return items in descending order (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : dict, optional Query arguments. Same format as **kwargs, but these arguments filter the results on the server before they are returned. They will NOT use an index, as that is what the **kwargs are for. filter_or : bool, optional If True, multiple filter args will be OR'd together. If False, they will be AND'd together. (default False) exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional Query arguments (examples below) Examples -------- You may pass in constraints using the Django-style '__' syntax. For example: .. code-block:: python connection.query('mytable', foo__eq=5) connection.query('mytable', foo__eq=5, bar__lt=22) connection.query('mytable', foo__eq=5, bar__between=(1, 10))
[ "Perform", "an", "index", "query", "on", "a", "table" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_policystream/policystream.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_policystream/policystream.py#L493-L498
def flush(self): """flush any buffered messages""" buf = self.buf self.buf = [] if buf: self._flush(buf)
[ "def", "flush", "(", "self", ")", ":", "buf", "=", "self", ".", "buf", "self", ".", "buf", "=", "[", "]", "if", "buf", ":", "self", ".", "_flush", "(", "buf", ")" ]
flush any buffered messages
[ "flush", "any", "buffered", "messages" ]
python
train
wtsi-hgi/python-hgijson
hgijson/json_converters/automatic.py
https://github.com/wtsi-hgi/python-hgijson/blob/6e8ccb562eabcaa816a136268a16504c2e0d4664/hgijson/json_converters/automatic.py#L83-L91
def get_json_encoders_for_type(self, type_to_encode: type) -> Optional[Iterable[JSONEncoder]]: """ Gets the registered JSON encoder for the given type. :param type_to_encode: the type of object that is to be encoded :return: the encoder for the given object else `None` if unknown """ if type_to_encode not in self._json_encoders: return None return self._json_encoders[type_to_encode]
[ "def", "get_json_encoders_for_type", "(", "self", ",", "type_to_encode", ":", "type", ")", "->", "Optional", "[", "Iterable", "[", "JSONEncoder", "]", "]", ":", "if", "type_to_encode", "not", "in", "self", ".", "_json_encoders", ":", "return", "None", "return"...
Gets the registered JSON encoder for the given type. :param type_to_encode: the type of object that is to be encoded :return: the encoder for the given object else `None` if unknown
[ "Gets", "the", "registered", "JSON", "encoder", "for", "the", "given", "type", ".", ":", "param", "type_to_encode", ":", "the", "type", "of", "object", "that", "is", "to", "be", "encoded", ":", "return", ":", "the", "encoder", "for", "the", "given", "obj...
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L1006-L1018
def full_datatype_to_mysql(d: str) -> str: """Converts a full datatype, e.g. INT, VARCHAR(10), VARCHAR(MAX), to a MySQL equivalent.""" d = d.upper() (s, length) = split_long_sqltype(d) if d in ["VARCHAR(MAX)", "NVARCHAR(MAX)"]: # http://wiki.ispirer.com/sqlways/mysql/data-types/longtext return "LONGTEXT" elif d in ["VARBINARY(MAX)"] or s in ["IMAGE"]: # http://wiki.ispirer.com/sqlways/mysql/data-types/varbinary return "LONGBLOB" else: return d
[ "def", "full_datatype_to_mysql", "(", "d", ":", "str", ")", "->", "str", ":", "d", "=", "d", ".", "upper", "(", ")", "(", "s", ",", "length", ")", "=", "split_long_sqltype", "(", "d", ")", "if", "d", "in", "[", "\"VARCHAR(MAX)\"", ",", "\"NVARCHAR(MA...
Converts a full datatype, e.g. INT, VARCHAR(10), VARCHAR(MAX), to a MySQL equivalent.
[ "Converts", "a", "full", "datatype", "e", ".", "g", ".", "INT", "VARCHAR", "(", "10", ")", "VARCHAR", "(", "MAX", ")", "to", "a", "MySQL", "equivalent", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/shared.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/shared.py#L243-L257
def find_existing_split_discordants(data): """Check for pre-calculated split reads and discordants done as part of alignment streaming. """ in_bam = dd.get_align_bam(data) sr_file = "%s-sr.bam" % os.path.splitext(in_bam)[0] disc_file = "%s-disc.bam" % os.path.splitext(in_bam)[0] if utils.file_exists(sr_file) and utils.file_exists(disc_file): return sr_file, disc_file else: sr_file = dd.get_sr_bam(data) disc_file = dd.get_disc_bam(data) if sr_file and utils.file_exists(sr_file) and disc_file and utils.file_exists(disc_file): return sr_file, disc_file else: return None, None
[ "def", "find_existing_split_discordants", "(", "data", ")", ":", "in_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "sr_file", "=", "\"%s-sr.bam\"", "%", "os", ".", "path", ".", "splitext", "(", "in_bam", ")", "[", "0", "]", "disc_file", "=", "...
Check for pre-calculated split reads and discordants done as part of alignment streaming.
[ "Check", "for", "pre", "-", "calculated", "split", "reads", "and", "discordants", "done", "as", "part", "of", "alignment", "streaming", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/askbot.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/askbot.py#L307-L320
def get_html_question(self, question_id, page=1): """Retrieve a raw HTML question and all it's information. :param question_id: question identifier :param page: page to retrieve """ path = urijoin(self.base_url, self.HTML_QUESTION, question_id) params = { 'page': page, 'sort': self.ORDER_HTML } response = self.fetch(path, payload=params) return response.text
[ "def", "get_html_question", "(", "self", ",", "question_id", ",", "page", "=", "1", ")", ":", "path", "=", "urijoin", "(", "self", ".", "base_url", ",", "self", ".", "HTML_QUESTION", ",", "question_id", ")", "params", "=", "{", "'page'", ":", "page", "...
Retrieve a raw HTML question and all it's information. :param question_id: question identifier :param page: page to retrieve
[ "Retrieve", "a", "raw", "HTML", "question", "and", "all", "it", "s", "information", "." ]
python
test
zyga/guacamole
guacamole/recipes/__init__.py
https://github.com/zyga/guacamole/blob/105c10a798144e3b89659b500d7c2b84b0c76546/guacamole/recipes/__init__.py#L89-L131
def main(self, argv=None, exit=True): """ Shortcut to prepare a bowl of guacamole and eat it. :param argv: Command line arguments or None. None means that sys.argv is used :param exit: Raise SystemExit after finishing execution :returns: Whatever is returned by the eating the guacamole. :raises: Whatever is raised by eating the guacamole. .. note:: This method always either raises and exception or returns an object. The way it behaves depends on the value of the `exit` argument. This method can be used to quickly take a recipe, prepare the guacamole and eat it. It is named main as it is applicable as the main method of an application. The `exit` argument controls if main returns normally or raises SystemExit. By default it will raise SystemExit (it will either wrap the return value with SystemExit or re-raise the SystemExit exception again). If SystemExit is raised but `exit` is False the argument to SystemExit is unwrapped and returned instead. """ bowl = self.prepare() try: retval = bowl.eat(argv) except SystemExit as exc: if exit: raise else: return exc.args[0] else: if retval is None: retval = 0 if exit: raise SystemExit(retval) else: return retval
[ "def", "main", "(", "self", ",", "argv", "=", "None", ",", "exit", "=", "True", ")", ":", "bowl", "=", "self", ".", "prepare", "(", ")", "try", ":", "retval", "=", "bowl", ".", "eat", "(", "argv", ")", "except", "SystemExit", "as", "exc", ":", ...
Shortcut to prepare a bowl of guacamole and eat it. :param argv: Command line arguments or None. None means that sys.argv is used :param exit: Raise SystemExit after finishing execution :returns: Whatever is returned by the eating the guacamole. :raises: Whatever is raised by eating the guacamole. .. note:: This method always either raises and exception or returns an object. The way it behaves depends on the value of the `exit` argument. This method can be used to quickly take a recipe, prepare the guacamole and eat it. It is named main as it is applicable as the main method of an application. The `exit` argument controls if main returns normally or raises SystemExit. By default it will raise SystemExit (it will either wrap the return value with SystemExit or re-raise the SystemExit exception again). If SystemExit is raised but `exit` is False the argument to SystemExit is unwrapped and returned instead.
[ "Shortcut", "to", "prepare", "a", "bowl", "of", "guacamole", "and", "eat", "it", "." ]
python
train
gem/oq-engine
openquake/hmtk/seismicity/selector.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/selector.py#L263-L288
def within_joyner_boore_distance(self, surface, distance, **kwargs): ''' Select events within a Joyner-Boore distance of a fault :param surface: Fault surface as instance of nhlib.geo.surface.base.SimpleFaultSurface or as instance of nhlib.geo.surface.ComplexFaultSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' upper_depth, lower_depth = _check_depth_limits(kwargs) rjb = surface.get_joyner_boore_distance( self.catalogue.hypocentres_as_mesh()) is_valid = np.logical_and( rjb <= distance, np.logical_and(self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth)) return self.select_catalogue(is_valid)
[ "def", "within_joyner_boore_distance", "(", "self", ",", "surface", ",", "distance", ",", "*", "*", "kwargs", ")", ":", "upper_depth", ",", "lower_depth", "=", "_check_depth_limits", "(", "kwargs", ")", "rjb", "=", "surface", ".", "get_joyner_boore_distance", "(...
Select events within a Joyner-Boore distance of a fault :param surface: Fault surface as instance of nhlib.geo.surface.base.SimpleFaultSurface or as instance of nhlib.geo.surface.ComplexFaultSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
[ "Select", "events", "within", "a", "Joyner", "-", "Boore", "distance", "of", "a", "fault" ]
python
train
jdoda/sdl2hl
sdl2hl/video.py
https://github.com/jdoda/sdl2hl/blob/3b477e1e01cea5d8e15e9e5ef3a302ea460f5946/sdl2hl/video.py#L74-L78
def size(self): """Tuple[int, int]: The width and height of the window.""" size = ffi.new('int[]', 2) lib.SDL_GetWindowSize(self._ptr, size + 0, size + 1) return (size[0], size[1])
[ "def", "size", "(", "self", ")", ":", "size", "=", "ffi", ".", "new", "(", "'int[]'", ",", "2", ")", "lib", ".", "SDL_GetWindowSize", "(", "self", ".", "_ptr", ",", "size", "+", "0", ",", "size", "+", "1", ")", "return", "(", "size", "[", "0", ...
Tuple[int, int]: The width and height of the window.
[ "Tuple", "[", "int", "int", "]", ":", "The", "width", "and", "height", "of", "the", "window", "." ]
python
train
rbarrois/mpdlcd
mpdlcd/vendor/lcdproc/screen.py
https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/vendor/lcdproc/screen.py#L87-L92
def set_duration(self, duration): """ Set Screen Change Interval Duration """ if duration > 0: self.duration = duration self.server.request("screen_set %s duration %i" % (self.ref, (self.duration * 8)))
[ "def", "set_duration", "(", "self", ",", "duration", ")", ":", "if", "duration", ">", "0", ":", "self", ".", "duration", "=", "duration", "self", ".", "server", ".", "request", "(", "\"screen_set %s duration %i\"", "%", "(", "self", ".", "ref", ",", "(",...
Set Screen Change Interval Duration
[ "Set", "Screen", "Change", "Interval", "Duration" ]
python
train
marrow/cinje
cinje/inline/text.py
https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L112-L162
def process(self, context, lines): """Chop up individual lines into static and dynamic parts. Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different chunk types. The processor protocol here requires the method to accept values by yielding resulting lines while accepting sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to be given a chance to yield a final line and perform any clean-up. """ handler = None for line in lines: for chunk in chunk_(line): if 'strip' in context.flag: chunk.line = chunk.stripped if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc. if not handler or handler[0] != chunk.kind: if handler: try: result = next(handler[1]) except StopIteration: result = None if result: yield result handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context) handler = (chunk.kind, handler) try: next(handler[1]) # We fast-forward to the first yield. except StopIteration: return result = handler[1].send(chunk) # Send the handler the next contiguous chunk. if result: yield result if __debug__: # In development mode we skip the contiguous chunk compaction optimization. handler = (None, handler[1]) # Clean up the final iteration. if handler: try: result = next(handler[1]) except StopIteration: return if result: yield result
[ "def", "process", "(", "self", ",", "context", ",", "lines", ")", ":", "handler", "=", "None", "for", "line", "in", "lines", ":", "for", "chunk", "in", "chunk_", "(", "line", ")", ":", "if", "'strip'", "in", "context", ".", "flag", ":", "chunk", "....
Chop up individual lines into static and dynamic parts. Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different chunk types. The processor protocol here requires the method to accept values by yielding resulting lines while accepting sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to be given a chance to yield a final line and perform any clean-up.
[ "Chop", "up", "individual", "lines", "into", "static", "and", "dynamic", "parts", ".", "Applies", "light", "optimizations", "such", "as", "empty", "chunk", "removal", "and", "calls", "out", "to", "other", "methods", "to", "process", "different", "chunk", "type...
python
train
log2timeline/plaso
plaso/cli/storage_media_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/storage_media_tool.py#L255-L301
def _GetVSSStoreIdentifiers(self, scan_node): """Determines the VSS store identifiers. Args: scan_node (dfvfs.SourceScanNode): scan node. Returns: list[str]: VSS store identifiers. Raises: SourceScannerError: if the format of or within the source is not supported or the scan node is invalid. UserAbort: if the user requested to abort. """ if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError('Invalid scan node.') volume_system = vshadow_volume_system.VShadowVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] # TODO: refactor to use scan options. if self._vss_stores: if self._vss_stores == 'all': vss_stores = range(1, volume_system.number_of_volumes + 1) else: vss_stores = self._vss_stores selected_volume_identifiers = self._NormalizedVolumeIdentifiers( volume_system, vss_stores, prefix='vss') if not set(selected_volume_identifiers).difference(volume_identifiers): return selected_volume_identifiers try: volume_identifiers = self._PromptUserForVSSStoreIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='vss')
[ "def", "_GetVSSStoreIdentifiers", "(", "self", ",", "scan_node", ")", ":", "if", "not", "scan_node", "or", "not", "scan_node", ".", "path_spec", ":", "raise", "errors", ".", "SourceScannerError", "(", "'Invalid scan node.'", ")", "volume_system", "=", "vshadow_vol...
Determines the VSS store identifiers. Args: scan_node (dfvfs.SourceScanNode): scan node. Returns: list[str]: VSS store identifiers. Raises: SourceScannerError: if the format of or within the source is not supported or the scan node is invalid. UserAbort: if the user requested to abort.
[ "Determines", "the", "VSS", "store", "identifiers", "." ]
python
train
pypa/pipenv
pipenv/vendor/cerberus/validator.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/validator.py#L1140-L1143
def _validate_minlength(self, min_length, field, value): """ {'type': 'integer'} """ if isinstance(value, Iterable) and len(value) < min_length: self._error(field, errors.MIN_LENGTH, len(value))
[ "def", "_validate_minlength", "(", "self", ",", "min_length", ",", "field", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Iterable", ")", "and", "len", "(", "value", ")", "<", "min_length", ":", "self", ".", "_error", "(", "field", ",...
{'type': 'integer'}
[ "{", "type", ":", "integer", "}" ]
python
train
intiocean/pyinter
pyinter/interval_set.py
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/interval_set.py#L69-L79
def union(self, other): """Returns a new IntervalSet which represents the union of each of the intervals in this IntervalSet with each of the intervals in the other IntervalSet :param other: An IntervalSet to union with this one. """ result = IntervalSet() for el in self: result.add(el) for el in other: result.add(el) return result
[ "def", "union", "(", "self", ",", "other", ")", ":", "result", "=", "IntervalSet", "(", ")", "for", "el", "in", "self", ":", "result", ".", "add", "(", "el", ")", "for", "el", "in", "other", ":", "result", ".", "add", "(", "el", ")", "return", ...
Returns a new IntervalSet which represents the union of each of the intervals in this IntervalSet with each of the intervals in the other IntervalSet :param other: An IntervalSet to union with this one.
[ "Returns", "a", "new", "IntervalSet", "which", "represents", "the", "union", "of", "each", "of", "the", "intervals", "in", "this", "IntervalSet", "with", "each", "of", "the", "intervals", "in", "the", "other", "IntervalSet", ":", "param", "other", ":", "An",...
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L2348-L2365
def deltet(epoch, eptype): """ Return the value of Delta ET (ET-UTC) for an input epoch. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/deltet_c.html :param epoch: Input epoch (seconds past J2000). :type epoch: float :param eptype: Type of input epoch ("UTC" or "ET"). :type eptype: str :return: Delta ET (ET-UTC) at input epoch. :rtype: float """ epoch = ctypes.c_double(epoch) eptype = stypes.stringToCharP(eptype) delta = ctypes.c_double() libspice.deltet_c(epoch, eptype, ctypes.byref(delta)) return delta.value
[ "def", "deltet", "(", "epoch", ",", "eptype", ")", ":", "epoch", "=", "ctypes", ".", "c_double", "(", "epoch", ")", "eptype", "=", "stypes", ".", "stringToCharP", "(", "eptype", ")", "delta", "=", "ctypes", ".", "c_double", "(", ")", "libspice", ".", ...
Return the value of Delta ET (ET-UTC) for an input epoch. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/deltet_c.html :param epoch: Input epoch (seconds past J2000). :type epoch: float :param eptype: Type of input epoch ("UTC" or "ET"). :type eptype: str :return: Delta ET (ET-UTC) at input epoch. :rtype: float
[ "Return", "the", "value", "of", "Delta", "ET", "(", "ET", "-", "UTC", ")", "for", "an", "input", "epoch", "." ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1862-L1874
def ancestor(self, *Classes): """Find the most immediate ancestor of the specified type, multiple classes may be specified. Arguments: *Classes: The possible classes (:class:`AbstractElement` or subclasses) to select from. Not instances! Example:: paragraph = word.ancestor(folia.Paragraph) """ for e in self.ancestors(tuple(Classes)): return e raise NoSuchAnnotation
[ "def", "ancestor", "(", "self", ",", "*", "Classes", ")", ":", "for", "e", "in", "self", ".", "ancestors", "(", "tuple", "(", "Classes", ")", ")", ":", "return", "e", "raise", "NoSuchAnnotation" ]
Find the most immediate ancestor of the specified type, multiple classes may be specified. Arguments: *Classes: The possible classes (:class:`AbstractElement` or subclasses) to select from. Not instances! Example:: paragraph = word.ancestor(folia.Paragraph)
[ "Find", "the", "most", "immediate", "ancestor", "of", "the", "specified", "type", "multiple", "classes", "may", "be", "specified", "." ]
python
train
CZ-NIC/yangson
yangson/schpattern.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schpattern.py#L214-L217
def deriv(self, x: str, ctype: ContentType) -> SchemaPattern: """Return derivative of the receiver.""" return Alternative.combine(self.left.deriv(x, ctype), self.right.deriv(x, ctype))
[ "def", "deriv", "(", "self", ",", "x", ":", "str", ",", "ctype", ":", "ContentType", ")", "->", "SchemaPattern", ":", "return", "Alternative", ".", "combine", "(", "self", ".", "left", ".", "deriv", "(", "x", ",", "ctype", ")", ",", "self", ".", "r...
Return derivative of the receiver.
[ "Return", "derivative", "of", "the", "receiver", "." ]
python
train
buzzfeed/caliendo
caliendo/patch.py
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/patch.py#L94-L116
def execute_side_effect(side_effect=UNDEFINED, args=UNDEFINED, kwargs=UNDEFINED): """ Executes a side effect if one is defined. :param side_effect: The side effect to execute :type side_effect: Mixed. If it's an exception it's raised. If it's callable it's called with teh parameters. :param tuple args: The arguments passed to the stubbed out method :param dict kwargs: The kwargs passed to the subbed out method. :rtype: mixed :returns: Whatever the passed side_effect returns :raises: Whatever error is defined as the side_effect """ if args == UNDEFINED: args = tuple() if kwargs == UNDEFINED: kwargs = {} if isinstance(side_effect, (BaseException, Exception, StandardError)): raise side_effect elif hasattr(side_effect, '__call__'): # If it's callable... return side_effect(*args, **kwargs) else: raise Exception("Caliendo doesn't know what to do with your side effect. {0}".format(side_effect))
[ "def", "execute_side_effect", "(", "side_effect", "=", "UNDEFINED", ",", "args", "=", "UNDEFINED", ",", "kwargs", "=", "UNDEFINED", ")", ":", "if", "args", "==", "UNDEFINED", ":", "args", "=", "tuple", "(", ")", "if", "kwargs", "==", "UNDEFINED", ":", "k...
Executes a side effect if one is defined. :param side_effect: The side effect to execute :type side_effect: Mixed. If it's an exception it's raised. If it's callable it's called with teh parameters. :param tuple args: The arguments passed to the stubbed out method :param dict kwargs: The kwargs passed to the subbed out method. :rtype: mixed :returns: Whatever the passed side_effect returns :raises: Whatever error is defined as the side_effect
[ "Executes", "a", "side", "effect", "if", "one", "is", "defined", "." ]
python
train
ANTsX/ANTsPy
ants/segmentation/otsu.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/segmentation/otsu.py#L7-L43
def otsu_segmentation(image, k, mask=None): """ Otsu image segmentation This is a very fast segmentation algorithm good for quick explortation, but does not return probability maps. ANTsR function: `thresholdImage(image, 'Otsu', k)` Arguments --------- image : ANTsImage input image k : integer integer number of classes. Note that a background class will be added to this, so the resulting segmentation will have k+1 unique values. mask : ANTsImage segment inside this mask Returns ------- ANTsImage Example ------- >>> import ants >>> mni = ants.image_read(ants.get_data('mni')) >>> seg = mni.otsu_segmentation(k=3) #0=bg,1=csf,2=gm,3=wm """ if mask is not None: image = image.mask_image(mask) seg = image.threshold_image('Otsu', k) return seg
[ "def", "otsu_segmentation", "(", "image", ",", "k", ",", "mask", "=", "None", ")", ":", "if", "mask", "is", "not", "None", ":", "image", "=", "image", ".", "mask_image", "(", "mask", ")", "seg", "=", "image", ".", "threshold_image", "(", "'Otsu'", ",...
Otsu image segmentation This is a very fast segmentation algorithm good for quick explortation, but does not return probability maps. ANTsR function: `thresholdImage(image, 'Otsu', k)` Arguments --------- image : ANTsImage input image k : integer integer number of classes. Note that a background class will be added to this, so the resulting segmentation will have k+1 unique values. mask : ANTsImage segment inside this mask Returns ------- ANTsImage Example ------- >>> import ants >>> mni = ants.image_read(ants.get_data('mni')) >>> seg = mni.otsu_segmentation(k=3) #0=bg,1=csf,2=gm,3=wm
[ "Otsu", "image", "segmentation" ]
python
train
twilio/twilio-python
twilio/rest/messaging/v1/session/webhook.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/session/webhook.py#L392-L406
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WebhookContext for this WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookContext """ if self._context is None: self._context = WebhookContext( self._version, session_sid=self._solution['session_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "WebhookContext", "(", "self", ".", "_version", ",", "session_sid", "=", "self", ".", "_solution", "[", "'session_sid'", "]", ",", "...
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WebhookContext for this WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
salesking/salesking_python_sdk
salesking/collection.py
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L212-L244
def _build_query_url(self, page = None, verbose = False): """ builds the url to call """ query = [] # # build the filters # for afilter in self.filters.keys(): # value = self.filters[afilter] # print"filter:%s value:%s" % (afilter,value) # value = urlencode(value) # query_str = u"%s=%s" % (afilter, value) if len(self.filters) > 0: query.append(urlencode(self.filters)) if self.sort: query_str = u"%s=%s" % (u"sort", self.sort) query.append(query_str) if self.sort_by: query_str = u"%s=%s" % (u"sort_by", self.sort_by) query.append(query_str) if self.per_page: query_str = u"%s=%s" % (u"per_page", self.per_page) query.append(query_str) if page: query_str = u"%s=%s" % (u"page", page) query.append(query_str) query = u"?%s" % (u"&".join(query)) url = u"%s%s" % (self.get_list_endpoint()['href'],query) url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, url) msg = "_build_query_url: url:%s" % url log.debug(msg) if verbose: print msg return url
[ "def", "_build_query_url", "(", "self", ",", "page", "=", "None", ",", "verbose", "=", "False", ")", ":", "query", "=", "[", "]", "# # build the filters", "# for afilter in self.filters.keys():", "# value = self.filters[afilter]", "# prin...
builds the url to call
[ "builds", "the", "url", "to", "call" ]
python
train
PGower/PyCanvas
pycanvas/apis/pages.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/pages.py#L541-L560
def list_revisions_courses(self, url, course_id): """ List revisions. List the revisions of a page. Callers must have update rights on the page in order to see page history. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - url """ID""" path["url"] = url self.logger.debug("GET /api/v1/courses/{course_id}/pages/{url}/revisions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/pages/{url}/revisions".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_revisions_courses", "(", "self", ",", "url", ",", "course_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_...
List revisions. List the revisions of a page. Callers must have update rights on the page in order to see page history.
[ "List", "revisions", ".", "List", "the", "revisions", "of", "a", "page", ".", "Callers", "must", "have", "update", "rights", "on", "the", "page", "in", "order", "to", "see", "page", "history", "." ]
python
train
jgillick/LendingClub
lendingclub/__init__.py
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/__init__.py#L104-L115
def version(self): """ Return the version number of the Lending Club Investor tool Returns ------- string The version number string """ this_path = os.path.dirname(os.path.realpath(__file__)) version_file = os.path.join(this_path, 'VERSION') return open(version_file).read().strip()
[ "def", "version", "(", "self", ")", ":", "this_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "version_file", "=", "os", ".", "path", ".", "join", "(", "this_path", ",", "'VERSION'",...
Return the version number of the Lending Club Investor tool Returns ------- string The version number string
[ "Return", "the", "version", "number", "of", "the", "Lending", "Club", "Investor", "tool" ]
python
train
saltstack/salt
salt/modules/rabbitmq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rabbitmq.py#L681-L702
def join_cluster(host, user='rabbit', ram_node=None, runas=None): ''' Join a rabbit cluster CLI Example: .. code-block:: bash salt '*' rabbitmq.join_cluster rabbit.example.com rabbit ''' cmd = [RABBITMQCTL, 'join_cluster'] if ram_node: cmd.append('--ram') cmd.append('{0}@{1}'.format(user, host)) if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() stop_app(runas) res = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False) start_app(runas) return _format_response(res, 'Join')
[ "def", "join_cluster", "(", "host", ",", "user", "=", "'rabbit'", ",", "ram_node", "=", "None", ",", "runas", "=", "None", ")", ":", "cmd", "=", "[", "RABBITMQCTL", ",", "'join_cluster'", "]", "if", "ram_node", ":", "cmd", ".", "append", "(", "'--ram'"...
Join a rabbit cluster CLI Example: .. code-block:: bash salt '*' rabbitmq.join_cluster rabbit.example.com rabbit
[ "Join", "a", "rabbit", "cluster" ]
python
train
CodeReclaimers/neat-python
neat/distributed.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/distributed.py#L469-L473
def _reset_em(self): """Resets self.em and the shared instances.""" self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False) self.em.start() self._set_shared_instances()
[ "def", "_reset_em", "(", "self", ")", ":", "self", ".", "em", "=", "_ExtendedManager", "(", "self", ".", "addr", ",", "self", ".", "authkey", ",", "mode", "=", "self", ".", "mode", ",", "start", "=", "False", ")", "self", ".", "em", ".", "start", ...
Resets self.em and the shared instances.
[ "Resets", "self", ".", "em", "and", "the", "shared", "instances", "." ]
python
train
jason-weirather/py-seq-tools
seqtools/errors.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/errors.py#L87-L106
def get_target_context_error_report(self): """Get a report on context-specific errors relative to what is expected on the target strand. :returns: Object with a 'header' and a 'data' where data describes context: before,after ,reference, query. A total is kept for each reference base, and individual errors are finally checked :rtype: dict() """ report = {} report['header'] = ['before','after','reference','query','fraction'] report['data'] = [] r = self.get_target_context_errors() for b in sorted(r.keys()): for a in sorted(r[b].keys()): for t in sorted(r[b][a]): for q in sorted(r[b][a]): v = 0 if r[b][a][t]['total'] > 0: v = float(r[b][a][t]['types'][q])/float(r[b][a][t]['total']) report['data'].append([b,a,t,q,v]) return report
[ "def", "get_target_context_error_report", "(", "self", ")", ":", "report", "=", "{", "}", "report", "[", "'header'", "]", "=", "[", "'before'", ",", "'after'", ",", "'reference'", ",", "'query'", ",", "'fraction'", "]", "report", "[", "'data'", "]", "=", ...
Get a report on context-specific errors relative to what is expected on the target strand. :returns: Object with a 'header' and a 'data' where data describes context: before,after ,reference, query. A total is kept for each reference base, and individual errors are finally checked :rtype: dict()
[ "Get", "a", "report", "on", "context", "-", "specific", "errors", "relative", "to", "what", "is", "expected", "on", "the", "target", "strand", "." ]
python
train
openvax/varlens
varlens/read_evidence/pileup.py
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup.py#L65-L70
def update(self, other): ''' Add all pileup elements from other into self. ''' assert self.locus == other.locus self.elements.update(other.elements)
[ "def", "update", "(", "self", ",", "other", ")", ":", "assert", "self", ".", "locus", "==", "other", ".", "locus", "self", ".", "elements", ".", "update", "(", "other", ".", "elements", ")" ]
Add all pileup elements from other into self.
[ "Add", "all", "pileup", "elements", "from", "other", "into", "self", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mem_cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_cronjobs.py#L118-L137
def ReturnLeasedCronJobs(self, jobs): """Makes leased cron jobs available for leasing again.""" errored_jobs = [] for returned_job in jobs: existing_lease = self.cronjob_leases.get(returned_job.cron_job_id) if existing_lease is None: errored_jobs.append(returned_job) continue if (returned_job.leased_until != existing_lease[0] or returned_job.leased_by != existing_lease[1]): errored_jobs.append(returned_job) continue del self.cronjob_leases[returned_job.cron_job_id] if errored_jobs: raise ValueError("Some jobs could not be returned: %s" % ",".join(job.cron_job_id for job in errored_jobs))
[ "def", "ReturnLeasedCronJobs", "(", "self", ",", "jobs", ")", ":", "errored_jobs", "=", "[", "]", "for", "returned_job", "in", "jobs", ":", "existing_lease", "=", "self", ".", "cronjob_leases", ".", "get", "(", "returned_job", ".", "cron_job_id", ")", "if", ...
Makes leased cron jobs available for leasing again.
[ "Makes", "leased", "cron", "jobs", "available", "for", "leasing", "again", "." ]
python
train
docker/docker-py
docker/api/plugin.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/plugin.py#L30-L53
def create_plugin(self, name, plugin_data_dir, gzip=False): """ Create a new plugin. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. plugin_data_dir (string): Path to the plugin data directory. Plugin data directory must contain the ``config.json`` manifest file and the ``rootfs`` directory. gzip (bool): Compress the context using gzip. Default: False Returns: ``True`` if successful """ url = self._url('/plugins/create') with utils.create_archive( root=plugin_data_dir, gzip=gzip, files=set(utils.build.walk(plugin_data_dir, [])) ) as archv: res = self._post(url, params={'name': name}, data=archv) self._raise_for_status(res) return True
[ "def", "create_plugin", "(", "self", ",", "name", ",", "plugin_data_dir", ",", "gzip", "=", "False", ")", ":", "url", "=", "self", ".", "_url", "(", "'/plugins/create'", ")", "with", "utils", ".", "create_archive", "(", "root", "=", "plugin_data_dir", ",",...
Create a new plugin. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. plugin_data_dir (string): Path to the plugin data directory. Plugin data directory must contain the ``config.json`` manifest file and the ``rootfs`` directory. gzip (bool): Compress the context using gzip. Default: False Returns: ``True`` if successful
[ "Create", "a", "new", "plugin", "." ]
python
train
pricingassistant/mrq
mrq/job.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L371-L390
def kill(self, block=False, reason="unknown"): """ Forcefully kill all greenlets associated with this job """ current_greenletid = id(gevent.getcurrent()) trace = "Job killed: %s" % reason for greenlet, job in context._GLOBAL_CONTEXT["greenlets"].values(): greenletid = id(greenlet) if job and job.id == self.id and greenletid != current_greenletid: greenlet.kill(block=block) trace += "\n\n--- Greenlet %s ---\n" % greenletid trace += "".join(traceback.format_stack(greenlet.gr_frame)) context._GLOBAL_CONTEXT["greenlets"].pop(greenletid, None) if reason == "timeout" and self.data["status"] != "timeout": updates = { "exceptiontype": "TimeoutInterrupt", "traceback": trace } self._save_status("timeout", updates=updates, exception=False)
[ "def", "kill", "(", "self", ",", "block", "=", "False", ",", "reason", "=", "\"unknown\"", ")", ":", "current_greenletid", "=", "id", "(", "gevent", ".", "getcurrent", "(", ")", ")", "trace", "=", "\"Job killed: %s\"", "%", "reason", "for", "greenlet", "...
Forcefully kill all greenlets associated with this job
[ "Forcefully", "kill", "all", "greenlets", "associated", "with", "this", "job" ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/isis_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/isis_state/__init__.py#L110-L133
def _set_global_isis_info(self, v, load=False): """ Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """global_isis_info must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__global_isis_info = t if hasattr(self, '_set'): self._set()
[ "def", "_set_global_isis_info", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global
[ "Setter", "method", "for", "global_isis_info", "mapped", "from", "YANG", "variable", "/", "isis_state", "/", "global_isis_info", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "sourc...
python
train
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L139-L192
def _add_new_init_method(cls): """ Replace the existing cls.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = cls.__init__ def new_init_method(self, *args, **kwargs): logger.debug(f"Initialising new {self} (type: {type(self)})") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. # logger.debug(f" orig_init: {orig_init}") orig_init(self, *args, **kwargs) # # Find field generator templates and spawn them to create # field generators for the new custom generator instance. # field_gens_templates = find_field_generator_templates(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) logger.debug('Spawning field generator templates...') origs = {} spawned = {} dependency_mapping = {} for (name, gen) in field_gens_templates.items(): origs[name] = gen spawned[name] = gen.spawn(dependency_mapping) logger.debug(f'Adding dependency mapping: {gen} -> {spawned[name]}') self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Spawned field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # Add seed generator # #self.seed_generator = SeedGenerator() # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator_class(self) cls.__init__ = new_init_method
[ "def", "_add_new_init_method", "(", "cls", ")", ":", "orig_init", "=", "cls", ".", "__init__", "def", "new_init_method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "f\"Initialising new {self} (type: {type(sel...
Replace the existing cls.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do..
[ "Replace", "the", "existing", "cls", ".", "__init__", "()", "method", "with", "a", "new", "one", "which", "calls", "the", "original", "one", "and", "in", "addition", "performs", "the", "following", "actions", ":" ]
python
train
rsheftel/raccoon
raccoon/series.py
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/series.py#L674-L681
def reset_index(self): """ Resets the index of the Series to simple integer list and the index name to 'index'. :return: nothing """ self.index = list(range(self.__len__())) self.index_name = 'index'
[ "def", "reset_index", "(", "self", ")", ":", "self", ".", "index", "=", "list", "(", "range", "(", "self", ".", "__len__", "(", ")", ")", ")", "self", ".", "index_name", "=", "'index'" ]
Resets the index of the Series to simple integer list and the index name to 'index'. :return: nothing
[ "Resets", "the", "index", "of", "the", "Series", "to", "simple", "integer", "list", "and", "the", "index", "name", "to", "index", "." ]
python
train
rene-aguirre/pywinusb
pywinusb/hid/winapi.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/winapi.py#L484-L508
def get_device_path(h_info, interface_data, ptr_info_data = None): """"Returns Hardware device path Parameters: h_info, interface set info handler interface_data, device interface enumeration data ptr_info_data, pointer to SP_DEVINFO_DATA() instance to receive details """ required_size = c_ulong(0) dev_inter_detail_data = SP_DEVICE_INTERFACE_DETAIL_DATA() dev_inter_detail_data.cb_size = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA) # get actual storage requirement SetupDiGetDeviceInterfaceDetail(h_info, byref(interface_data), None, 0, byref(required_size), None) ctypes.resize(dev_inter_detail_data, required_size.value) # read value SetupDiGetDeviceInterfaceDetail(h_info, byref(interface_data), byref(dev_inter_detail_data), required_size, None, ptr_info_data) # extract string only return dev_inter_detail_data.get_string()
[ "def", "get_device_path", "(", "h_info", ",", "interface_data", ",", "ptr_info_data", "=", "None", ")", ":", "required_size", "=", "c_ulong", "(", "0", ")", "dev_inter_detail_data", "=", "SP_DEVICE_INTERFACE_DETAIL_DATA", "(", ")", "dev_inter_detail_data", ".", "cb_...
Returns Hardware device path Parameters: h_info, interface set info handler interface_data, device interface enumeration data ptr_info_data, pointer to SP_DEVINFO_DATA() instance to receive details
[ "Returns", "Hardware", "device", "path", "Parameters", ":", "h_info", "interface", "set", "info", "handler", "interface_data", "device", "interface", "enumeration", "data", "ptr_info_data", "pointer", "to", "SP_DEVINFO_DATA", "()", "instance", "to", "receive", "detail...
python
train
anomaly/vishnu
vishnu/backend/client/google_cloud_datastore.py
https://github.com/anomaly/vishnu/blob/5b3a6a69beedc8554cc506ddfab273760d61dc65/vishnu/backend/client/google_cloud_datastore.py#L43-L57
def save(self, sync_only=False): """ :param sync_only: :type: bool """ entity = datastore.Entity(key=self._key) entity["last_accessed"] = self.last_accessed # todo: restore sync only entity["data"] = self._data if self.expires: entity["expires"] = self.expires self._client.put(entity)
[ "def", "save", "(", "self", ",", "sync_only", "=", "False", ")", ":", "entity", "=", "datastore", ".", "Entity", "(", "key", "=", "self", ".", "_key", ")", "entity", "[", "\"last_accessed\"", "]", "=", "self", ".", "last_accessed", "# todo: restore sync on...
:param sync_only: :type: bool
[ ":", "param", "sync_only", ":", ":", "type", ":", "bool" ]
python
train
bitesofcode/projexui
projexui/widgets/xpopupbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xpopupbutton.py#L95-L102
def setCentralWidget(self, widget): """ Sets the central widget for this button. :param widget | <QWidget> """ self.setEnabled(widget is not None) self._popupWidget.setCentralWidget(widget)
[ "def", "setCentralWidget", "(", "self", ",", "widget", ")", ":", "self", ".", "setEnabled", "(", "widget", "is", "not", "None", ")", "self", ".", "_popupWidget", ".", "setCentralWidget", "(", "widget", ")" ]
Sets the central widget for this button. :param widget | <QWidget>
[ "Sets", "the", "central", "widget", "for", "this", "button", ".", ":", "param", "widget", "|", "<QWidget", ">" ]
python
train
ttroy50/pyephember
pyephember/pyephember.py
https://github.com/ttroy50/pyephember/blob/3ee159ee82b926b957dae8dcbc7a4bfb6807a9b4/pyephember/pyephember.py#L164-L178
def get_zones(self): """ Get all zones """ home_data = self.get_home() if not home_data['isSuccess']: return [] zones = [] for receiver in home_data['data']['receivers']: for zone in receiver['zones']: zones.append(zone) return zones
[ "def", "get_zones", "(", "self", ")", ":", "home_data", "=", "self", ".", "get_home", "(", ")", "if", "not", "home_data", "[", "'isSuccess'", "]", ":", "return", "[", "]", "zones", "=", "[", "]", "for", "receiver", "in", "home_data", "[", "'data'", "...
Get all zones
[ "Get", "all", "zones" ]
python
train
aconrad/pycobertura
pycobertura/cobertura.py
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L351-L363
def diff_missed_lines(self, filename): """ Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False). """ line_changed = [] for line in self.file_source(filename): if line.status is not None: is_new = not line.status line_changed.append((line.number, is_new)) return line_changed
[ "def", "diff_missed_lines", "(", "self", ",", "filename", ")", ":", "line_changed", "=", "[", "]", "for", "line", "in", "self", ".", "file_source", "(", "filename", ")", ":", "if", "line", ".", "status", "is", "not", "None", ":", "is_new", "=", "not", ...
Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False).
[ "Return", "a", "list", "of", "2", "-", "element", "tuples", "(", "lineno", "is_new", ")", "for", "the", "given", "file", "filename", "where", "lineno", "is", "a", "missed", "line", "number", "and", "is_new", "indicates", "whether", "the", "missed", "line",...
python
train
DiamondLightSource/python-procrunner
procrunner/__init__.py
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L173-L202
def get_output(self): """ Retrieve the stored data in full. This call may block if the reading thread has not yet terminated. """ self._closing = True if not self.has_finished(): if self._debug: # Main thread overtook stream reading thread. underrun_debug_timer = timeit.default_timer() logger.warning("NBSR underrun") self._thread.join() if not self.has_finished(): if self._debug: logger.debug( "NBSR join after %f seconds, underrun not resolved" % (timeit.default_timer() - underrun_debug_timer) ) raise Exception("thread did not terminate") if self._debug: logger.debug( "NBSR underrun resolved after %f seconds" % (timeit.default_timer() - underrun_debug_timer) ) if self._closed: raise Exception("streamreader double-closed") self._closed = True data = self._buffer.getvalue() self._buffer.close() return data
[ "def", "get_output", "(", "self", ")", ":", "self", ".", "_closing", "=", "True", "if", "not", "self", ".", "has_finished", "(", ")", ":", "if", "self", ".", "_debug", ":", "# Main thread overtook stream reading thread.", "underrun_debug_timer", "=", "timeit", ...
Retrieve the stored data in full. This call may block if the reading thread has not yet terminated.
[ "Retrieve", "the", "stored", "data", "in", "full", ".", "This", "call", "may", "block", "if", "the", "reading", "thread", "has", "not", "yet", "terminated", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti_batch.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L907-L923
def indicator(self, indicator_type, summary, **kwargs): """Add Indicator data to Batch object. Args: indicator_type (str): The ThreatConnect define Indicator type. summary (str): The value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Indicator. """ indicator_obj = Indicator(indicator_type, summary, **kwargs) return self._indicator(indicator_obj)
[ "def", "indicator", "(", "self", ",", "indicator_type", ",", "summary", ",", "*", "*", "kwargs", ")", ":", "indicator_obj", "=", "Indicator", "(", "indicator_type", ",", "summary", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_indicator", "(", "...
Add Indicator data to Batch object. Args: indicator_type (str): The ThreatConnect define Indicator type. summary (str): The value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Indicator.
[ "Add", "Indicator", "data", "to", "Batch", "object", "." ]
python
train
python-openxml/python-docx
docx/oxml/xmlchemy.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/xmlchemy.py#L495-L502
def populate_class_members(self, element_cls, prop_name): """ Add the appropriate methods to *element_cls*. """ super(OneAndOnlyOne, self).populate_class_members( element_cls, prop_name ) self._add_getter()
[ "def", "populate_class_members", "(", "self", ",", "element_cls", ",", "prop_name", ")", ":", "super", "(", "OneAndOnlyOne", ",", "self", ")", ".", "populate_class_members", "(", "element_cls", ",", "prop_name", ")", "self", ".", "_add_getter", "(", ")" ]
Add the appropriate methods to *element_cls*.
[ "Add", "the", "appropriate", "methods", "to", "*", "element_cls", "*", "." ]
python
train
pingali/dgit
dgitcore/datasets/common.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/datasets/common.py#L297-L348
def bootstrap_datapackage(repo, force=False, options=None, noinput=False): """ Create the datapackage file.. """ print("Bootstrapping datapackage") # get the directory tsprefix = datetime.now().date().isoformat() # Initial data package json package = OrderedDict([ ('title', ''), ('description', ''), ('username', repo.username), ('reponame', repo.reponame), ('name', str(repo)), ('title', ""), ('description', ""), ('keywords', []), ('resources', []), ('creator', getpass.getuser()), ('createdat', datetime.now().isoformat()), ('remote-url', repo.remoteurl) ]) if options is not None: package['title'] = options['title'] package['description'] = options['description'] else: if noinput: raise IncompleteParameters("Option field with title and description") for var in ['title', 'description']: value = '' while value in ['',None]: value = input('Your Repo ' + var.title() + ": ") if len(value) == 0: print("{} cannot be empty. Please re-enter.".format(var.title())) package[var] = value # Now store the package... (handle, filename) = tempfile.mkstemp() with open(filename, 'w') as fd: fd.write(json.dumps(package, indent=4)) repo.package = package return filename
[ "def", "bootstrap_datapackage", "(", "repo", ",", "force", "=", "False", ",", "options", "=", "None", ",", "noinput", "=", "False", ")", ":", "print", "(", "\"Bootstrapping datapackage\"", ")", "# get the directory", "tsprefix", "=", "datetime", ".", "now", "(...
Create the datapackage file..
[ "Create", "the", "datapackage", "file", ".." ]
python
valid
rochacbruno/flasgger
flasgger/utils.py
https://github.com/rochacbruno/flasgger/blob/fef154f61d7afca548067be0c758c3dd71cc4c97/flasgger/utils.py#L400-L412
def ordered_dict_to_dict(d): """ Converts inner OrderedDict to bare dict """ ret = {} new_d = deepcopy(d) for k, v in new_d.items(): if isinstance(v, OrderedDict): v = dict(v) if isinstance(v, dict): v = ordered_dict_to_dict(v) ret[k] = v return ret
[ "def", "ordered_dict_to_dict", "(", "d", ")", ":", "ret", "=", "{", "}", "new_d", "=", "deepcopy", "(", "d", ")", "for", "k", ",", "v", "in", "new_d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "OrderedDict", ")", ":", "v", ...
Converts inner OrderedDict to bare dict
[ "Converts", "inner", "OrderedDict", "to", "bare", "dict" ]
python
train
magrathealabs/feito
feito/messages.py
https://github.com/magrathealabs/feito/blob/4179e40233ccf6e5a6c9892e528595690ce9ef43/feito/messages.py#L10-L24
def commit_format(self): """ Formats the analysis into a simpler dictionary with the line, file and message values to be commented on a commit. Returns a list of dictionaries """ formatted_analyses = [] for analyze in self.analysis['messages']: formatted_analyses.append({ 'message': f"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}", 'file': analyze['location']['path'], 'line': analyze['location']['line'], }) return formatted_analyses
[ "def", "commit_format", "(", "self", ")", ":", "formatted_analyses", "=", "[", "]", "for", "analyze", "in", "self", ".", "analysis", "[", "'messages'", "]", ":", "formatted_analyses", ".", "append", "(", "{", "'message'", ":", "f\"{analyze['source']}: {analyze['...
Formats the analysis into a simpler dictionary with the line, file and message values to be commented on a commit. Returns a list of dictionaries
[ "Formats", "the", "analysis", "into", "a", "simpler", "dictionary", "with", "the", "line", "file", "and", "message", "values", "to", "be", "commented", "on", "a", "commit", ".", "Returns", "a", "list", "of", "dictionaries" ]
python
train
FactoryBoy/factory_boy
factory/builder.py
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/builder.py#L132-L137
def _items(self): """Extract a list of (key, value) pairs, suitable for our __init__.""" for name in self.declarations: yield name, self.declarations[name] for subkey, value in self.contexts[name].items(): yield self.join(name, subkey), value
[ "def", "_items", "(", "self", ")", ":", "for", "name", "in", "self", ".", "declarations", ":", "yield", "name", ",", "self", ".", "declarations", "[", "name", "]", "for", "subkey", ",", "value", "in", "self", ".", "contexts", "[", "name", "]", ".", ...
Extract a list of (key, value) pairs, suitable for our __init__.
[ "Extract", "a", "list", "of", "(", "key", "value", ")", "pairs", "suitable", "for", "our", "__init__", "." ]
python
train
swilson/aqualogic
aqualogic/core.py
https://github.com/swilson/aqualogic/blob/b6e904363efc4f64c70aae127d040079587ecbc6/aqualogic/core.py#L104-L109
def connect(self, host, port): """Connects via a RS-485 to Ethernet adapter.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) self._reader = sock.makefile(mode='rb') self._writer = sock.makefile(mode='wb')
[ "def", "connect", "(", "self", ",", "host", ",", "port", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "host", ",", "port", ")", ")", "self",...
Connects via a RS-485 to Ethernet adapter.
[ "Connects", "via", "a", "RS", "-", "485", "to", "Ethernet", "adapter", "." ]
python
train
MillionIntegrals/vel
vel/rl/models/stochastic_policy_model_separate.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/stochastic_policy_model_separate.py#L78-L83
def value(self, observations): """ Calculate only value head for given state """ input_data = self.input_block(observations) base_output = self.value_backbone(input_data) value_output = self.value_head(base_output) return value_output
[ "def", "value", "(", "self", ",", "observations", ")", ":", "input_data", "=", "self", ".", "input_block", "(", "observations", ")", "base_output", "=", "self", ".", "value_backbone", "(", "input_data", ")", "value_output", "=", "self", ".", "value_head", "(...
Calculate only value head for given state
[ "Calculate", "only", "value", "head", "for", "given", "state" ]
python
train
tanghaibao/jcvi
jcvi/apps/uclust.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/uclust.py#L771-L781
def alignfast(names, seqs): """ Performs MUSCLE alignments on cluster and returns output as string """ matfile = op.join(datadir, "blosum80.mat") cmd = "poa -read_fasta - -pir stdout {0} -tolower -silent -hb -fuse_all".format(matfile) p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) s = "" for i, j in zip(names, seqs): s += "\n".join((i, j)) + "\n" return p.communicate(s)[0]
[ "def", "alignfast", "(", "names", ",", "seqs", ")", ":", "matfile", "=", "op", ".", "join", "(", "datadir", ",", "\"blosum80.mat\"", ")", "cmd", "=", "\"poa -read_fasta - -pir stdout {0} -tolower -silent -hb -fuse_all\"", ".", "format", "(", "matfile", ")", "p", ...
Performs MUSCLE alignments on cluster and returns output as string
[ "Performs", "MUSCLE", "alignments", "on", "cluster", "and", "returns", "output", "as", "string" ]
python
train
jtpaasch/simplygithub
simplygithub/authentication/profile.py
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/authentication/profile.py#L80-L110
def write_profile(name, repo, token): """Save a profile to the CONFIG_FILE. After you use this method to save a profile, you can load it anytime later with the ``read_profile()`` function defined above. Args: name The name of the profile to save. repo The Github repo you want to connect to. For instance, this repo is ``jtpaasch/simplygithub``. token A personal access token to connect to the repo. It is a hash that looks something like ``ff20ae42dc...`` Returns: A dictionary with the profile's ``repo`` and ``token`` values. """ make_sure_folder_exists(CONFIG_FOLDER) config = configparser.ConfigParser() config.read(CONFIG_FILE) profile = {"repo": repo, "token": token} config[name] = profile with open(CONFIG_FILE, "w") as configfile: config.write(configfile) return profile
[ "def", "write_profile", "(", "name", ",", "repo", ",", "token", ")", ":", "make_sure_folder_exists", "(", "CONFIG_FOLDER", ")", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "CONFIG_FILE", ")", "profile", "=", "{",...
Save a profile to the CONFIG_FILE. After you use this method to save a profile, you can load it anytime later with the ``read_profile()`` function defined above. Args: name The name of the profile to save. repo The Github repo you want to connect to. For instance, this repo is ``jtpaasch/simplygithub``. token A personal access token to connect to the repo. It is a hash that looks something like ``ff20ae42dc...`` Returns: A dictionary with the profile's ``repo`` and ``token`` values.
[ "Save", "a", "profile", "to", "the", "CONFIG_FILE", "." ]
python
train
emc-openstack/storops
storops/unity/resource/filesystem.py
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/filesystem.py#L155-L161
def has_snap(self): """ This method won't count the snaps in "destroying" state! :return: false if no snaps or all snaps are destroying. """ return len(list(filter(lambda s: s.state != SnapStateEnum.DESTROYING, self.snapshots))) > 0
[ "def", "has_snap", "(", "self", ")", ":", "return", "len", "(", "list", "(", "filter", "(", "lambda", "s", ":", "s", ".", "state", "!=", "SnapStateEnum", ".", "DESTROYING", ",", "self", ".", "snapshots", ")", ")", ")", ">", "0" ]
This method won't count the snaps in "destroying" state! :return: false if no snaps or all snaps are destroying.
[ "This", "method", "won", "t", "count", "the", "snaps", "in", "destroying", "state!" ]
python
train