nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/pygments/cmdline.py
python
main
(args=sys.argv)
Main command line entry point.
Main command line entry point.
[ "Main", "command", "line", "entry", "point", "." ]
def main(args=sys.argv): """ Main command line entry point. """ usage = USAGE % ((args[0],) * 6) try: popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:vhVHgs") except getopt.GetoptError: print(usage, file=sys.stderr) return 2 try: return main_inner(popts, args, usage) except Exception: if '-v' in dict(popts): print(file=sys.stderr) print('*' * 65, file=sys.stderr) print('An unhandled exception occurred while highlighting.', file=sys.stderr) print('Please report the whole traceback to the issue tracker at', file=sys.stderr) print('<https://bitbucket.org/birkenfeld/pygments-main/issues>.', file=sys.stderr) print('*' * 65, file=sys.stderr) print(file=sys.stderr) raise import traceback info = traceback.format_exception(*sys.exc_info()) msg = info[-1].strip() if len(info) >= 3: # extract relevant file and position info msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:] print(file=sys.stderr) print('*** Error while highlighting:', file=sys.stderr) print(msg, file=sys.stderr) print('*** If this is a bug you want to report, please rerun with -v.', file=sys.stderr) return 1
[ "def", "main", "(", "args", "=", "sys", ".", "argv", ")", ":", "usage", "=", "USAGE", "%", "(", "(", "args", "[", "0", "]", ",", ")", "*", "6", ")", "try", ":", "popts", ",", "args", "=", "getopt", ".", "getopt", "(", "args", "[", "1", ":",...
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/pygments/cmdline.py#L491-L529
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/scipy/sparse/base.py
python
spmatrix.tobsr
(self, blocksize=None, copy=False)
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
Convert this matrix to Block Sparse Row format. With copy=False, the data/indices may be shared between this matrix and the resultant bsr_matrix. When blocksize=(R, C) is provided, it will be used for construction of the bsr_matrix.
Convert this matrix to Block Sparse Row format.
[ "Convert", "this", "matrix", "to", "Block", "Sparse", "Row", "format", "." ]
def tobsr(self, blocksize=None, copy=False): """Convert this matrix to Block Sparse Row format. With copy=False, the data/indices may be shared between this matrix and the resultant bsr_matrix. When blocksize=(R, C) is provided, it will be used for construction of the bsr_matrix. """ return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
[ "def", "tobsr", "(", "self", ",", "blocksize", "=", "None", ",", "copy", "=", "False", ")", ":", "return", "self", ".", "tocsr", "(", "copy", "=", "False", ")", ".", "tobsr", "(", "blocksize", "=", "blocksize", ",", "copy", "=", "copy", ")" ]
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/scipy/sparse/base.py#L798-L807
SanPen/GridCal
d3f4566d2d72c11c7e910c9d162538ef0e60df31
src/GridCal/Engine/Simulations/ATC/available_transfer_capacity_driver.py
python
AvailableTransferCapacityOptions.__init__
(self, distributed_slack=True, correct_values=True, use_provided_flows=False, bus_idx_from=list(), bus_idx_to=list(), idx_br=list(), sense_br=list(), Pf=None, idx_hvdc_br=list(), sense_hvdc_br=list(), Pf_hvdc=None, dT=100.0, threshold=0.02, mode: AvailableTransferMode = AvailableTransferMode.Generation, max_report_elements=-1, use_clustering=False, cluster_number=100)
:param distributed_slack: :param correct_values: :param use_provided_flows: :param bus_idx_from: :param bus_idx_to: :param idx_br: :param sense_br: :param Pf: :param idx_hvdc_br: :param sense_hvdc_br: :param Pf_hvdc: :param dT: :param threshold: :param mode: :param max_report_elements: maximum number of elements to show in the report (-1 for all) :param use_clustering: :param n_clusters:
[]
def __init__(self, distributed_slack=True, correct_values=True, use_provided_flows=False, bus_idx_from=list(), bus_idx_to=list(), idx_br=list(), sense_br=list(), Pf=None, idx_hvdc_br=list(), sense_hvdc_br=list(), Pf_hvdc=None, dT=100.0, threshold=0.02, mode: AvailableTransferMode = AvailableTransferMode.Generation, max_report_elements=-1, use_clustering=False, cluster_number=100): """ :param distributed_slack: :param correct_values: :param use_provided_flows: :param bus_idx_from: :param bus_idx_to: :param idx_br: :param sense_br: :param Pf: :param idx_hvdc_br: :param sense_hvdc_br: :param Pf_hvdc: :param dT: :param threshold: :param mode: :param max_report_elements: maximum number of elements to show in the report (-1 for all) :param use_clustering: :param n_clusters: """ self.distributed_slack = distributed_slack self.correct_values = correct_values self.use_provided_flows = use_provided_flows self.bus_idx_from = bus_idx_from self.bus_idx_to = bus_idx_to self.inter_area_branch_idx = idx_br self.inter_area_branch_sense = sense_br self.Pf = Pf self.idx_hvdc_br = idx_hvdc_br self.inter_area_hvdc_branch_sense = sense_hvdc_br self.Pf_hvdc = Pf_hvdc self.dT = dT self.threshold = threshold self.mode = mode self.max_report_elements = max_report_elements self.use_clustering = use_clustering self.cluster_number = cluster_number
[ "def", "__init__", "(", "self", ",", "distributed_slack", "=", "True", ",", "correct_values", "=", "True", ",", "use_provided_flows", "=", "False", ",", "bus_idx_from", "=", "list", "(", ")", ",", "bus_idx_to", "=", "list", "(", ")", ",", "idx_br", "=", ...
https://github.com/SanPen/GridCal/blob/d3f4566d2d72c11c7e910c9d162538ef0e60df31/src/GridCal/Engine/Simulations/ATC/available_transfer_capacity_driver.py#L483-L528
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-47/fabmetheus_utilities/geometry/solids/triangle_mesh.py
python
ZoneArrangement.getEmptyZ
(self, z)
Get the first z which is not in the zone table.
Get the first z which is not in the zone table.
[ "Get", "the", "first", "z", "which", "is", "not", "in", "the", "zone", "table", "." ]
def getEmptyZ(self, z): 'Get the first z which is not in the zone table.' zoneIndex = round(z / self.zoneInterval) if zoneIndex not in self.zZoneSet: return z zoneAround = 1 while 1: zoneDown = zoneIndex - zoneAround if zoneDown not in self.zZoneSet: return zoneDown * self.zoneInterval zoneUp = zoneIndex + zoneAround if zoneUp not in self.zZoneSet: return zoneUp * self.zoneInterval zoneAround += 1
[ "def", "getEmptyZ", "(", "self", ",", "z", ")", ":", "zoneIndex", "=", "round", "(", "z", "/", "self", ".", "zoneInterval", ")", "if", "zoneIndex", "not", "in", "self", ".", "zZoneSet", ":", "return", "z", "zoneAround", "=", "1", "while", "1", ":", ...
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-47/fabmetheus_utilities/geometry/solids/triangle_mesh.py#L926-L939
aws-quickstart/quickstart-redhat-openshift
2b87dd38b72e7e4c439a606c5a9ea458d72da612
functions/source/DeleteBucketContents/requests/models.py
python
PreparedRequest.prepare_headers
(self, headers)
Prepares the given HTTP headers.
Prepares the given HTTP headers.
[ "Prepares", "the", "given", "HTTP", "headers", "." ]
def prepare_headers(self, headers): """Prepares the given HTTP headers.""" self.headers = CaseInsensitiveDict() if headers: for header in headers.items(): # Raise exception on invalid header value. check_header_validity(header) name, value = header self.headers[to_native_string(name)] = value
[ "def", "prepare_headers", "(", "self", ",", "headers", ")", ":", "self", ".", "headers", "=", "CaseInsensitiveDict", "(", ")", "if", "headers", ":", "for", "header", "in", "headers", ".", "items", "(", ")", ":", "# Raise exception on invalid header value.", "c...
https://github.com/aws-quickstart/quickstart-redhat-openshift/blob/2b87dd38b72e7e4c439a606c5a9ea458d72da612/functions/source/DeleteBucketContents/requests/models.py#L441-L450
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/Phylo/PAML/_parse_yn00.py
python
parse_ng86
(lines, results)
return (results, sequences)
Parse the Nei & Gojobori (1986) section of the results. Nei_Gojobori results are organized in a lower triangular matrix, with the sequence names labeling the rows and statistics in the format: w (dN dS) per column Example row (2 columns): 0.0000 (0.0000 0.0207) 0.0000 (0.0000 0.0421)
Parse the Nei & Gojobori (1986) section of the results.
[ "Parse", "the", "Nei", "&", "Gojobori", "(", "1986", ")", "section", "of", "the", "results", "." ]
def parse_ng86(lines, results): """Parse the Nei & Gojobori (1986) section of the results. Nei_Gojobori results are organized in a lower triangular matrix, with the sequence names labeling the rows and statistics in the format: w (dN dS) per column Example row (2 columns): 0.0000 (0.0000 0.0207) 0.0000 (0.0000 0.0421) """ sequences = [] for line in lines: # The purpose of this complex regex is to parse the NG86 section for # valid lines of data that are mixed in with citations and comments. # The data lines begin with a taxon name, followed by zero or more # fields containing numeric values, sometimes enclosed in parens. # Taxon names are from 1-30 characters and are usually separated from # the numeric portion of the line by space(s). Long taxon names to are # truncated to 30 characters, and may run into the data fields without # any separator., e.g. some_long_name-1.0000 # This regex is an attempt to cover more pathological cases while also # parsing all existing versions of yn00 output with shorter names. matrix_row_res = re.match( r"^([^\s]+?)(\s+-?\d+\.\d+.*$|\s*$|-1.0000\s*\(.*$)", line ) if matrix_row_res is not None: # Find all floating point numbers in this line, accounting # for the fact that the sequence IDs might have bits that # look like floating point values. line_floats_res = re.findall(r"-*\d+\.\d+", matrix_row_res.group(2)) line_floats = [float(val) for val in line_floats_res] seq_name = matrix_row_res.group(1).strip() sequences.append(seq_name) results[seq_name] = {} for i in range(0, len(line_floats), 3): NG86 = {} NG86["omega"] = line_floats[i] NG86["dN"] = line_floats[i + 1] NG86["dS"] = line_floats[i + 2] results[seq_name][sequences[i // 3]] = {"NG86": NG86} results[sequences[i // 3]][seq_name] = {"NG86": NG86} return (results, sequences)
[ "def", "parse_ng86", "(", "lines", ",", "results", ")", ":", "sequences", "=", "[", "]", "for", "line", "in", "lines", ":", "# The purpose of this complex regex is to parse the NG86 section for", "# valid lines of data that are mixed in with citations and comments.", "# The dat...
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/Phylo/PAML/_parse_yn00.py#L13-L56
rconradharris/envparse
e67e70307af19d925e194b2a163e0608dae7eb55
envparse.py
python
Env.__init__
(self, **schema)
[]
def __init__(self, **schema): self.schema = schema
[ "def", "__init__", "(", "self", ",", "*", "*", "schema", ")", ":", "self", ".", "schema", "=", "schema" ]
https://github.com/rconradharris/envparse/blob/e67e70307af19d925e194b2a163e0608dae7eb55/envparse.py#L56-L57
taowen/es-monitor
c4deceb4964857f495d13bfaf2d92f36734c9e1c
es_sql/executors/translators/case_when_translator.py
python
CaseWhenFiltersTranslator.build
(self)
return agg
[]
def build(self): if not self.filters: raise Exception('internal error') agg = {'filters': {'filters': self.filters}} if self.other_bucket_key: agg['filters']['other_bucket_key'] = self.other_bucket_key return agg
[ "def", "build", "(", "self", ")", ":", "if", "not", "self", ".", "filters", ":", "raise", "Exception", "(", "'internal error'", ")", "agg", "=", "{", "'filters'", ":", "{", "'filters'", ":", "self", ".", "filters", "}", "}", "if", "self", ".", "other...
https://github.com/taowen/es-monitor/blob/c4deceb4964857f495d13bfaf2d92f36734c9e1c/es_sql/executors/translators/case_when_translator.py#L150-L156
pallets/flask
660994efc761efdfd49ca442b73f6712dc77b6cf
src/flask/scaffold.py
python
Scaffold.route
(self, rule: str, **options: t.Any)
return decorator
Decorate a view function to register it with the given URL rule and options. Calls :meth:`add_url_rule`, which has more details about the implementation. .. code-block:: python @app.route("/") def index(): return "Hello, World!" See :ref:`url-route-registrations`. The endpoint name for the route defaults to the name of the view function if the ``endpoint`` parameter isn't passed. The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` and ``OPTIONS`` are added automatically. :param rule: The URL rule string. :param options: Extra options passed to the :class:`~werkzeug.routing.Rule` object.
Decorate a view function to register it with the given URL rule and options. Calls :meth:`add_url_rule`, which has more details about the implementation.
[ "Decorate", "a", "view", "function", "to", "register", "it", "with", "the", "given", "URL", "rule", "and", "options", ".", "Calls", ":", "meth", ":", "add_url_rule", "which", "has", "more", "details", "about", "the", "implementation", "." ]
def route(self, rule: str, **options: t.Any) -> t.Callable: """Decorate a view function to register it with the given URL rule and options. Calls :meth:`add_url_rule`, which has more details about the implementation. .. code-block:: python @app.route("/") def index(): return "Hello, World!" See :ref:`url-route-registrations`. The endpoint name for the route defaults to the name of the view function if the ``endpoint`` parameter isn't passed. The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` and ``OPTIONS`` are added automatically. :param rule: The URL rule string. :param options: Extra options passed to the :class:`~werkzeug.routing.Rule` object. """ def decorator(f: t.Callable) -> t.Callable: endpoint = options.pop("endpoint", None) self.add_url_rule(rule, endpoint, f, **options) return f return decorator
[ "def", "route", "(", "self", ",", "rule", ":", "str", ",", "*", "*", "options", ":", "t", ".", "Any", ")", "->", "t", ".", "Callable", ":", "def", "decorator", "(", "f", ":", "t", ".", "Callable", ")", "->", "t", ".", "Callable", ":", "endpoint...
https://github.com/pallets/flask/blob/660994efc761efdfd49ca442b73f6712dc77b6cf/src/flask/scaffold.py#L413-L442
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
cpython/Lib/urllib.py
python
localhost
()
return _localhost
Return the IP address of the magic hostname 'localhost'.
Return the IP address of the magic hostname 'localhost'.
[ "Return", "the", "IP", "address", "of", "the", "magic", "hostname", "localhost", "." ]
def localhost(): """Return the IP address of the magic hostname 'localhost'.""" global _localhost if _localhost is None: _localhost = socket.gethostbyname('localhost') return _localhost
[ "def", "localhost", "(", ")", ":", "global", "_localhost", "if", "_localhost", "is", "None", ":", "_localhost", "=", "socket", ".", "gethostbyname", "(", "'localhost'", ")", "return", "_localhost" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/cpython/Lib/urllib.py#L808-L813
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/numpy/ma/core.py
python
masked_inside
(x, v1, v2, copy=True)
return masked_where(condition, x, copy=copy)
Mask an array inside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` inside the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20)
Mask an array inside a given interval.
[ "Mask", "an", "array", "inside", "a", "given", "interval", "." ]
def masked_inside(x, v1, v2, copy=True): """ Mask an array inside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` inside the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf >= v1) & (xf <= v2) return masked_where(condition, x, copy=copy)
[ "def", "masked_inside", "(", "x", ",", "v1", ",", "v2", ",", "copy", "=", "True", ")", ":", "if", "v2", "<", "v1", ":", "(", "v1", ",", "v2", ")", "=", "(", "v2", ",", "v1", ")", "xf", "=", "filled", "(", "x", ")", "condition", "=", "(", ...
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/ma/core.py#L2082-L2119
wbond/oscrypto
d40c62577706682a0f6da5616ad09964f1c9137d
oscrypto/_win/symmetric.py
python
aes_cbc_no_padding_encrypt
(key, data, iv)
return (iv, _encrypt('aes', key, data, iv, False))
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and no padding. This means the ciphertext must be an exact multiple of 16 bytes long. :param key: The encryption key - a byte string either 16, 24 or 32 bytes long :param data: The plaintext - a byte string :param iv: The initialization vector - either a byte string 16-bytes long or None to generate an IV :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A tuple of two byte strings (iv, ciphertext)
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and no padding. This means the ciphertext must be an exact multiple of 16 bytes long.
[ "Encrypts", "plaintext", "using", "AES", "in", "CBC", "mode", "with", "a", "128", "192", "or", "256", "bit", "key", "and", "no", "padding", ".", "This", "means", "the", "ciphertext", "must", "be", "an", "exact", "multiple", "of", "16", "bytes", "long", ...
def aes_cbc_no_padding_encrypt(key, data, iv): """ Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and no padding. This means the ciphertext must be an exact multiple of 16 bytes long. :param key: The encryption key - a byte string either 16, 24 or 32 bytes long :param data: The plaintext - a byte string :param iv: The initialization vector - either a byte string 16-bytes long or None to generate an IV :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A tuple of two byte strings (iv, ciphertext) """ if len(key) not in [16, 24, 32]: raise ValueError(pretty_message( ''' key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s ''', len(key) )) if not iv: iv = rand_bytes(16) elif len(iv) != 16: raise ValueError(pretty_message( ''' iv must be 16 bytes long - is %s ''', len(iv) )) if len(data) % 16 != 0: raise ValueError(pretty_message( ''' data must be a multiple of 16 bytes long - is %s ''', len(data) )) return (iv, _encrypt('aes', key, data, iv, False))
[ "def", "aes_cbc_no_padding_encrypt", "(", "key", ",", "data", ",", "iv", ")", ":", "if", "len", "(", "key", ")", "not", "in", "[", "16", ",", "24", ",", "32", "]", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n key must be either...
https://github.com/wbond/oscrypto/blob/d40c62577706682a0f6da5616ad09964f1c9137d/oscrypto/_win/symmetric.py#L45-L97
alanhamlett/pip-update-requirements
ce875601ef278c8ce00ad586434a978731525561
pur/packages/pip/_internal/vcs/__init__.py
python
VersionControl.obtain
(self, dest)
Install or update in editable mode the package represented by this VersionControl object. Args: dest: the repository directory in which to install or update.
Install or update in editable mode the package represented by this VersionControl object.
[ "Install", "or", "update", "in", "editable", "mode", "the", "package", "represented", "by", "this", "VersionControl", "object", "." ]
def obtain(self, dest): # type: (str) -> None """ Install or update in editable mode the package represented by this VersionControl object. Args: dest: the repository directory in which to install or update. """ url, rev_options = self.get_url_rev_options(self.url) if not os.path.exists(dest): self.fetch_new(dest, url, rev_options) return rev_display = rev_options.to_display() if self.is_repository_directory(dest): existing_url = self.get_remote_url(dest) if self.compare_urls(existing_url, url): logger.debug( '%s in %s exists, and has correct URL (%s)', self.repo_name.title(), display_path(dest), url, ) if not self.is_commit_id_equal(dest, rev_options.rev): logger.info( 'Updating %s %s%s', display_path(dest), self.repo_name, rev_display, ) self.update(dest, url, rev_options) else: logger.info('Skipping because already up-to-date.') return logger.warning( '%s %s in %s exists with URL %s', self.name, self.repo_name, display_path(dest), existing_url, ) prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b')) else: logger.warning( 'Directory %s already exists, and is not a %s %s.', dest, self.name, self.repo_name, ) # https://github.com/python/mypy/issues/1174 prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore ('i', 'w', 'b')) logger.warning( 'The plan is to install the %s repository %s', self.name, url, ) response = ask_path_exists('What to do? %s' % prompt[0], prompt[1]) if response == 'a': sys.exit(-1) if response == 'w': logger.warning('Deleting %s', display_path(dest)) rmtree(dest) self.fetch_new(dest, url, rev_options) return if response == 'b': dest_dir = backup_dir(dest) logger.warning( 'Backing up %s to %s', display_path(dest), dest_dir, ) shutil.move(dest, dest_dir) self.fetch_new(dest, url, rev_options) return # Do nothing if the response is "i". if response == 's': logger.info( 'Switching %s %s to %s%s', self.repo_name, display_path(dest), url, rev_display, ) self.switch(dest, url, rev_options)
[ "def", "obtain", "(", "self", ",", "dest", ")", ":", "# type: (str) -> None", "url", ",", "rev_options", "=", "self", ".", "get_url_rev_options", "(", "self", ".", "url", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "self"...
https://github.com/alanhamlett/pip-update-requirements/blob/ce875601ef278c8ce00ad586434a978731525561/pur/packages/pip/_internal/vcs/__init__.py#L345-L436
xybu/onedrived-dev
7189214d311a1f863c68c7fa808ce41d96dae958
onedrived/od_tasks/merge_dir.py
python
MergeDirectoryTask._handle_local_file
(self, item_name, item_record, item_stat, item_local_abspath)
:param str item_name: :param onedrived.od_repo.ItemRecord | None item_record: :param posix.stat_result | None item_stat: :param str item_local_abspath:
:param str item_name: :param onedrived.od_repo.ItemRecord | None item_record: :param posix.stat_result | None item_stat: :param str item_local_abspath:
[ ":", "param", "str", "item_name", ":", ":", "param", "onedrived", ".", "od_repo", ".", "ItemRecord", "|", "None", "item_record", ":", ":", "param", "posix", ".", "stat_result", "|", "None", "item_stat", ":", ":", "param", "str", "item_local_abspath", ":" ]
def _handle_local_file(self, item_name, item_record, item_stat, item_local_abspath): """ :param str item_name: :param onedrived.od_repo.ItemRecord | None item_record: :param posix.stat_result | None item_stat: :param str item_local_abspath: """ if self.repo.path_filter.should_ignore(self.rel_path + '/' + item_name, False): logging.debug('Ignored local file "%s/%s".', self.rel_path, item_name) return if item_stat is None: logging.info('Local-only file "%s" existed when scanning but is now gone. Skip it.', item_local_abspath) if item_record is not None: self.repo.delete_item(item_record.item_name, item_record.parent_path, False) if self.assume_remote_unchanged: self.task_pool.add_task(delete_item.DeleteRemoteItemTask( repo=self.repo, task_pool=self.task_pool, parent_relpath=self.rel_path, item_name=item_name, item_id=item_record.item_id, is_folder=False)) return if item_record is not None and item_record.type == ItemRecordType.FILE: record_ts = datetime_to_timestamp(item_record.modified_time) equal_ts = diff_timestamps(item_stat.st_mtime, record_ts) == 0 if item_stat.st_size == item_record.size_local and \ (equal_ts or item_record.sha1_hash and item_record.sha1_hash == sha1_value(item_local_abspath)): # Local file matches record. if self.assume_remote_unchanged: if not equal_ts: fix_owner_and_timestamp(item_local_abspath, self.repo.context.user_uid, record_ts) else: logging.debug('Local file "%s" used to exist remotely but not found. Delete it.', item_local_abspath) send2trash(item_local_abspath) self.repo.delete_item(item_record.item_name, item_record.parent_path, False) return logging.debug('Local file "%s" is different from when it was last synced. Upload it.', item_local_abspath) elif item_record is not None: # Record is a dir but local entry is a file. if self.assume_remote_unchanged: logging.info('Remote item for local file "%s" is a directory that has been deleted locally. ' 'Delete the remote item and upload the file.', item_local_abspath) if not delete_item.DeleteRemoteItemTask( repo=self.repo, task_pool=self.task_pool, parent_relpath=self.rel_path, item_name=item_name, item_id=item_record.item_id, is_folder=True).handle(): logging.error('Failed to delete outdated remote directory "%s/%s" of Drive %s.', self.rel_path, item_name, self.repo.drive.id) # Keep the record so that the branch can be revisited next time. return logging.debug('Local file "%s" is new to OneDrive. Upload it.', item_local_abspath) self.task_pool.add_task(upload_file.UploadFileTask( self.repo, self.task_pool, self.item_request, self.rel_path, item_name))
[ "def", "_handle_local_file", "(", "self", ",", "item_name", ",", "item_record", ",", "item_stat", ",", "item_local_abspath", ")", ":", "if", "self", ".", "repo", ".", "path_filter", ".", "should_ignore", "(", "self", ".", "rel_path", "+", "'/'", "+", "item_n...
https://github.com/xybu/onedrived-dev/blob/7189214d311a1f863c68c7fa808ce41d96dae958/onedrived/od_tasks/merge_dir.py#L469-L521
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_oa_openshift/library/oc_secret.py
python
OpenShiftCLI._create
(self, fname)
return self.openshift_cmd(['create', '-f', fname])
call oc create on a filename
call oc create on a filename
[ "call", "oc", "create", "on", "a", "filename" ]
def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname])
[ "def", "_create", "(", "self", ",", "fname", ")", ":", "return", "self", ".", "openshift_cmd", "(", "[", "'create'", ",", "'-f'", ",", "fname", "]", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_oa_openshift/library/oc_secret.py#L1001-L1003
nteract/papermill
6a942a1b930ea051520e45abf6d55fe868a8614f
papermill/parameterize.py
python
add_builtin_parameters
(parameters)
return with_builtin_parameters
Add built-in parameters to a dictionary of parameters Parameters ---------- parameters : dict Dictionary of parameters provided by the user
Add built-in parameters to a dictionary of parameters
[ "Add", "built", "-", "in", "parameters", "to", "a", "dictionary", "of", "parameters" ]
def add_builtin_parameters(parameters): """Add built-in parameters to a dictionary of parameters Parameters ---------- parameters : dict Dictionary of parameters provided by the user """ with_builtin_parameters = { "pm": { "run_uuid": str(uuid4()), "current_datetime_local": datetime.now(), "current_datetime_utc": datetime.utcnow(), } } if parameters is not None: with_builtin_parameters.update(parameters) return with_builtin_parameters
[ "def", "add_builtin_parameters", "(", "parameters", ")", ":", "with_builtin_parameters", "=", "{", "\"pm\"", ":", "{", "\"run_uuid\"", ":", "str", "(", "uuid4", "(", ")", ")", ",", "\"current_datetime_local\"", ":", "datetime", ".", "now", "(", ")", ",", "\"...
https://github.com/nteract/papermill/blob/6a942a1b930ea051520e45abf6d55fe868a8614f/papermill/parameterize.py#L14-L33
Mendeley/mrec
d299e3b9490703843b041e6585643b7e42e229f0
mrec/sparse.py
python
loadz
(file)
return coo_matrix((y['data'],(y['row'],y['col'])),shape=y['shape'])
Load a sparse matrix saved to file with savez. Parameters ---------- file : str The open file or filepath to read from. Returns ------- mat : scipy.sparse.coo_matrix The sparse matrix.
Load a sparse matrix saved to file with savez.
[ "Load", "a", "sparse", "matrix", "saved", "to", "file", "with", "savez", "." ]
def loadz(file): """ Load a sparse matrix saved to file with savez. Parameters ---------- file : str The open file or filepath to read from. Returns ------- mat : scipy.sparse.coo_matrix The sparse matrix. """ y = np.load(file) return coo_matrix((y['data'],(y['row'],y['col'])),shape=y['shape'])
[ "def", "loadz", "(", "file", ")", ":", "y", "=", "np", ".", "load", "(", "file", ")", "return", "coo_matrix", "(", "(", "y", "[", "'data'", "]", ",", "(", "y", "[", "'row'", "]", ",", "y", "[", "'col'", "]", ")", ")", ",", "shape", "=", "y"...
https://github.com/Mendeley/mrec/blob/d299e3b9490703843b041e6585643b7e42e229f0/mrec/sparse.py#L63-L78
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/socketserver.py
python
BaseServer.verify_request
(self, request, client_address)
return True
Verify the request. May be overridden. Return True if we should proceed with this request.
Verify the request. May be overridden.
[ "Verify", "the", "request", ".", "May", "be", "overridden", "." ]
def verify_request(self, request, client_address): """Verify the request. May be overridden. Return True if we should proceed with this request. """ return True
[ "def", "verify_request", "(", "self", ",", "request", ",", "client_address", ")", ":", "return", "True" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/socketserver.py#L318-L324
quantopian/qdb
c25018d2f0979589a38a07667478cb6022d57ed9
qdb/comm.py
python
TerminalCommandManager.do_unwatch
(self, arg, tracer)
return self.next_command.tailcall(tracer)
unw(atch) EXPR Removes an expression from the watchlist if it is already being watched, otherwise does nothing.
unw(atch) EXPR Removes an expression from the watchlist if it is already being watched, otherwise does nothing.
[ "unw", "(", "atch", ")", "EXPR", "Removes", "an", "expression", "from", "the", "watchlist", "if", "it", "is", "already", "being", "watched", "otherwise", "does", "nothing", "." ]
def do_unwatch(self, arg, tracer): """ unw(atch) EXPR Removes an expression from the watchlist if it is already being watched, otherwise does nothing. """ if not arg: return self.missing_argument('unw(atch)') tracer.watchlist.pop(arg, None) return self.next_command.tailcall(tracer)
[ "def", "do_unwatch", "(", "self", ",", "arg", ",", "tracer", ")", ":", "if", "not", "arg", ":", "return", "self", ".", "missing_argument", "(", "'unw(atch)'", ")", "tracer", ".", "watchlist", ".", "pop", "(", "arg", ",", "None", ")", "return", "self", ...
https://github.com/quantopian/qdb/blob/c25018d2f0979589a38a07667478cb6022d57ed9/qdb/comm.py#L968-L977
DXsmiley/mathbot
9c0719f430470e522101603f64aeda0c69b5fffe
mathbot/calculator/interpereter.py
python
Interpereter.push
(self, item)
Push an item to the stop of the stack
Push an item to the stop of the stack
[ "Push", "an", "item", "to", "the", "stop", "of", "the", "stack" ]
def push(self, item): '''Push an item to the stop of the stack''' self.stack.append(item)
[ "def", "push", "(", "self", ",", "item", ")", ":", "self", ".", "stack", ".", "append", "(", "item", ")" ]
https://github.com/DXsmiley/mathbot/blob/9c0719f430470e522101603f64aeda0c69b5fffe/mathbot/calculator/interpereter.py#L306-L308
UniShared/videonotes
803cdd97b90823fb17f50dd55999aa7d1fec6c3a
lib/oauth2client/multistore_file.py
python
_MultiStore._write
(self)
Write the cached data back out. The multistore must be locked.
Write the cached data back out.
[ "Write", "the", "cached", "data", "back", "out", "." ]
def _write(self): """Write the cached data back out. The multistore must be locked. """ raw_data = {'file_version': 1} raw_creds = [] raw_data['data'] = raw_creds for (cred_key, cred) in self._data.items(): raw_key = dict(cred_key) raw_cred = simplejson.loads(cred.to_json()) raw_creds.append({'key': raw_key, 'credential': raw_cred}) self._locked_json_write(raw_data)
[ "def", "_write", "(", "self", ")", ":", "raw_data", "=", "{", "'file_version'", ":", "1", "}", "raw_creds", "=", "[", "]", "raw_data", "[", "'data'", "]", "=", "raw_creds", "for", "(", "cred_key", ",", "cred", ")", "in", "self", ".", "_data", ".", ...
https://github.com/UniShared/videonotes/blob/803cdd97b90823fb17f50dd55999aa7d1fec6c3a/lib/oauth2client/multistore_file.py#L345-L357
bububa/MongoHub
7f761c424741d88bb234426b74b4092c40fb4987
Resources/pymongo/cursor.py
python
Cursor.skip
(self, skip)
return self
Skips the first `skip` results of this cursor. Raises TypeError if skip is not an instance of int. Raises InvalidOperation if this cursor has already been used. The last `skip` applied to this cursor takes precedence. :Parameters: - `skip`: the number of results to skip
Skips the first `skip` results of this cursor.
[ "Skips", "the", "first", "skip", "results", "of", "this", "cursor", "." ]
def skip(self, skip): """Skips the first `skip` results of this cursor. Raises TypeError if skip is not an instance of int. Raises InvalidOperation if this cursor has already been used. The last `skip` applied to this cursor takes precedence. :Parameters: - `skip`: the number of results to skip """ if not isinstance(skip, (types.IntType, types.LongType)): raise TypeError("skip must be an int") self.__check_okay_to_chain() self.__skip = skip return self
[ "def", "skip", "(", "self", ",", "skip", ")", ":", "if", "not", "isinstance", "(", "skip", ",", "(", "types", ".", "IntType", ",", "types", ".", "LongType", ")", ")", ":", "raise", "TypeError", "(", "\"skip must be an int\"", ")", "self", ".", "__check...
https://github.com/bububa/MongoHub/blob/7f761c424741d88bb234426b74b4092c40fb4987/Resources/pymongo/cursor.py#L171-L186
privacyidea/privacyidea
9490c12ddbf77a34ac935b082d09eb583dfafa2c
privacyidea/lib/decorators.py
python
check_token_locked
(func)
return token_locked_wrapper
Decorator to check if a token is locked or not. The decorator is to be used in token class methods. It can be used to avoid performing an action on a locked token. If the token is locked, a TokenAdminError is raised.
Decorator to check if a token is locked or not. The decorator is to be used in token class methods. It can be used to avoid performing an action on a locked token.
[ "Decorator", "to", "check", "if", "a", "token", "is", "locked", "or", "not", ".", "The", "decorator", "is", "to", "be", "used", "in", "token", "class", "methods", ".", "It", "can", "be", "used", "to", "avoid", "performing", "an", "action", "on", "a", ...
def check_token_locked(func): """ Decorator to check if a token is locked or not. The decorator is to be used in token class methods. It can be used to avoid performing an action on a locked token. If the token is locked, a TokenAdminError is raised. """ @functools.wraps(func) def token_locked_wrapper(*args, **kwds): # The token object token = args[0] if token.is_locked(): raise TokenAdminError(_("This action is not possible, since the " "token is locked"), id=1007) f_result = func(*args, **kwds) return f_result return token_locked_wrapper
[ "def", "check_token_locked", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "token_locked_wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# The token object", "token", "=", "args", "[", "0", "]", "if", "to...
https://github.com/privacyidea/privacyidea/blob/9490c12ddbf77a34ac935b082d09eb583dfafa2c/privacyidea/lib/decorators.py#L30-L48
man-group/mdf
4b2c78084467791ad883c0b4c53832ad70fc96ef
mdf/regression/differs.py
python
DataFrameDiffer.__write_xls
(self, rhs_differ, different_nodes, lhs_data, rhs_data, filename, lhs_ctx, rhs_ctx)
write the diffs to a spreadsheet
write the diffs to a spreadsheet
[ "write", "the", "diffs", "to", "a", "spreadsheet" ]
def __write_xls(self, rhs_differ, different_nodes, lhs_data, rhs_data, filename, lhs_ctx, rhs_ctx): """write the diffs to a spreadsheet""" wb = xlwt.Workbook() date_style = xlwt.easyxf(num_format_str='YYYY-MM-DD') nsheets = 0 for node in different_nodes: lhs_columns = sorted(self.get_columns(node, lhs_ctx)) lhs_df = lhs_data[lhs_columns] rhs_columns = sorted(rhs_differ.get_columns(node, rhs_ctx)) rhs_df = rhs_data[rhs_columns] if len(lhs_df.columns) > 255 or len(rhs_df.columns) > 255: # xlwt has a limit of 256 columns # just dump data into two separate CSV if its too big for a nice XLS report fname = "%s__%s" % (node.short_name, os.path.splitext(os.path.basename(filename))[0]) csv_fpath = os.path.join(os.path.dirname(filename), fname) _log.info("Node %s has mare than 255 columns, can't use xlwt, writing CSV to " "%s[_LHS|_RHS].csv" % (node.name, csv_fpath)) lhs_df.to_csv(csv_fpath+"_LHS.csv") rhs_df.to_csv(csv_fpath+"_RHS.csv") else: _log.info("Writing Excel sheet for %s" % node.name) nsheets += 1 diffs_ws = wb.add_sheet(("%s_DIFFS" % node.short_name)[-31:]) lhs_ws = wb.add_sheet(("%s_LHS" % node.short_name)[-31:]) rhs_ws = wb.add_sheet(("%s_RHS" % node.short_name)[-31:]) for ws, df in ((lhs_ws, lhs_df), (rhs_ws, rhs_df)): for row, value in enumerate(df.index): ws.write(row + 1, 0, value, date_style) for col_i, col_name in enumerate(df.columns): ws.write(0, col_i + 1, str(col_name)) col = df[col_name] for row_i, value in enumerate(col): if np.isnan(value): ws.row(row_i + 1).set_cell_error(col_i + 1, "#NUM!") else: ws.write(row_i + 1, col_i + 1, value) max_cols = max(len(lhs_columns), len(rhs_columns)) max_rows = max(len(lhs_df.index), len(rhs_df.index)) tolerance, is_abs = self.get_tolerance(node) for row, value in enumerate(lhs_df.index): diffs_ws.write(row + 1, 0, xlwt.Formula("IF(EXACT(%(l)s,%(r)s),%(l)s,\"ERROR\")" % { "l" : _to_range(row + 1, 0, lhs_ws), "r" : _to_range(row + 1, 0, rhs_ws)}), date_style) for col_i, col_name in enumerate(lhs_df.columns): diffs_ws.write(0, col_i + 1, xlwt.Formula("IF(EXACT(%(l)s,%(r)s),%(l)s,\"ERROR\")" % { "l" : _to_range(0, col_i + 1, lhs_ws), "r" : _to_range(0, col_i + 1, rhs_ws)})) for col_i in xrange(1, max_cols + 1): for row_i in xrange(1, max_rows + 1): if is_abs: diffs_ws.write(row_i, col_i, xlwt.Formula("ABS(%s-%s)" % (_to_range(row_i, col_i, lhs_ws), _to_range(row_i, col_i, rhs_ws)))) else: diffs_ws.write(row_i, col_i, xlwt.Formula("ABS((%s/%s)-1)" % (_to_range(row_i, col_i, lhs_ws), _to_range(row_i, col_i, rhs_ws)))) if nsheets: wb.save(filename)
[ "def", "__write_xls", "(", "self", ",", "rhs_differ", ",", "different_nodes", ",", "lhs_data", ",", "rhs_data", ",", "filename", ",", "lhs_ctx", ",", "rhs_ctx", ")", ":", "wb", "=", "xlwt", ".", "Workbook", "(", ")", "date_style", "=", "xlwt", ".", "easy...
https://github.com/man-group/mdf/blob/4b2c78084467791ad883c0b4c53832ad70fc96ef/mdf/regression/differs.py#L364-L437
etetoolkit/ete
2b207357dc2a40ccad7bfd8f54964472c72e4726
ete3/phyloxml/_phyloxml.py
python
Taxonomy.export
(self, outfile, level, namespace_='phy:', name_='Taxonomy', namespacedef_='')
[]
def export(self, outfile, level, namespace_='phy:', name_='Taxonomy', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = [] self.exportAttributes(outfile, level, already_processed, namespace_, name_='Taxonomy') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write('/>\n')
[ "def", "export", "(", "self", ",", "outfile", ",", "level", ",", "namespace_", "=", "'phy:'", ",", "name_", "=", "'Taxonomy'", ",", "namespacedef_", "=", "''", ")", ":", "showIndent", "(", "outfile", ",", "level", ")", "outfile", ".", "write", "(", "'<...
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/phyloxml/_phyloxml.py#L1256-L1267
maurosoria/dirsearch
b83e68c8fdf360ab06be670d7b92b263262ee5b1
thirdparty/pyparsing/diagram/__init__.py
python
ConverterState.extract_into_diagram
(self, el_id: int)
Used when we encounter the same token twice in the same tree. When this happens, we replace all instances of that token with a terminal, and create a new subdiagram for the token
Used when we encounter the same token twice in the same tree. When this happens, we replace all instances of that token with a terminal, and create a new subdiagram for the token
[ "Used", "when", "we", "encounter", "the", "same", "token", "twice", "in", "the", "same", "tree", ".", "When", "this", "happens", "we", "replace", "all", "instances", "of", "that", "token", "with", "a", "terminal", "and", "create", "a", "new", "subdiagram",...
def extract_into_diagram(self, el_id: int): """ Used when we encounter the same token twice in the same tree. When this happens, we replace all instances of that token with a terminal, and create a new subdiagram for the token """ position = self.first[el_id] # Replace the original definition of this element with a regular block if position.parent: ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) if "item" in position.parent.kwargs: position.parent.kwargs["item"] = ret else: position.parent.kwargs["items"][position.parent_index] = ret # If the element we're extracting is a group, skip to its content but keep the title if position.converted.func == railroad.Group: content = position.converted.kwargs["item"] else: content = position.converted self.diagrams[el_id] = EditablePartial.from_call( NamedDiagram, name=position.name, diagram=EditablePartial.from_call( railroad.Diagram, content, **self.diagram_kwargs ), index=position.number, ) del self.first[el_id]
[ "def", "extract_into_diagram", "(", "self", ",", "el_id", ":", "int", ")", ":", "position", "=", "self", ".", "first", "[", "el_id", "]", "# Replace the original definition of this element with a regular block", "if", "position", ".", "parent", ":", "ret", "=", "E...
https://github.com/maurosoria/dirsearch/blob/b83e68c8fdf360ab06be670d7b92b263262ee5b1/thirdparty/pyparsing/diagram/__init__.py#L229-L258
readthedocs/sphinx-autoapi
71c6ceebe0b02c34027fcd3d56c8641e9b94c7af
autoapi/mappers/go.py
python
GoMethod.__init__
(self, obj, **kwargs)
[]
def __init__(self, obj, **kwargs): super(GoMethod, self).__init__(obj, **kwargs) self.receiver = obj.get("recv")
[ "def", "__init__", "(", "self", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "super", "(", "GoMethod", ",", "self", ")", ".", "__init__", "(", "obj", ",", "*", "*", "kwargs", ")", "self", ".", "receiver", "=", "obj", ".", "get", "(", "\"recv\""...
https://github.com/readthedocs/sphinx-autoapi/blob/71c6ceebe0b02c34027fcd3d56c8641e9b94c7af/autoapi/mappers/go.py#L188-L190
pytorch/botorch
f85fb8ff36d21e21bdb881d107982fb6d5d78704
botorch/acquisition/multi_objective/monte_carlo.py
python
qNoisyExpectedHypervolumeImprovement._cache_root_decomposition
( self, posterior: GPyTorchPosterior, )
r"""Cache the root decomposition of the covariance of `f(X_baseline)`. Args: posterior: The posterior over f(X_baseline).
r"""Cache the root decomposition of the covariance of `f(X_baseline)`.
[ "r", "Cache", "the", "root", "decomposition", "of", "the", "covariance", "of", "f", "(", "X_baseline", ")", "." ]
def _cache_root_decomposition( self, posterior: GPyTorchPosterior, ) -> None: r"""Cache the root decomposition of the covariance of `f(X_baseline)`. Args: posterior: The posterior over f(X_baseline). """ lazy_covar = extract_batch_covar(posterior.mvn) with gpt_settings.fast_computations.covar_root_decomposition(False): lazy_covar_root = lazy_covar.root_decomposition() baseline_L = lazy_covar_root.root.evaluate() self.register_buffer("_baseline_L", baseline_L)
[ "def", "_cache_root_decomposition", "(", "self", ",", "posterior", ":", "GPyTorchPosterior", ",", ")", "->", "None", ":", "lazy_covar", "=", "extract_batch_covar", "(", "posterior", ".", "mvn", ")", "with", "gpt_settings", ".", "fast_computations", ".", "covar_roo...
https://github.com/pytorch/botorch/blob/f85fb8ff36d21e21bdb881d107982fb6d5d78704/botorch/acquisition/multi_objective/monte_carlo.py#L509-L522
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
py/_io/saferepr.py
python
saferepr
(obj, maxsize=240)
return srepr.repr(obj)
return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the standard 2.6 lib.
return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the standard 2.6 lib.
[ "return", "a", "size", "-", "limited", "safe", "repr", "-", "string", "for", "the", "given", "object", ".", "Failing", "__repr__", "functions", "of", "user", "instances", "will", "be", "represented", "with", "a", "short", "exception", "info", "and", "saferep...
def saferepr(obj, maxsize=240): """ return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the standard 2.6 lib. """ # review exception handling srepr = SafeRepr() srepr.maxstring = maxsize srepr.maxsize = maxsize srepr.maxother = 160 return srepr.repr(obj)
[ "def", "saferepr", "(", "obj", ",", "maxsize", "=", "240", ")", ":", "# review exception handling", "srepr", "=", "SafeRepr", "(", ")", "srepr", ".", "maxstring", "=", "maxsize", "srepr", ".", "maxsize", "=", "maxsize", "srepr", ".", "maxother", "=", "160"...
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/py/_io/saferepr.py#L64-L76
Arachnid/bloggart
ba2b60417102fe14a77b1bcd809b9b801d3a96e2
lib/docutils/writers/latex2e/__init__.py
python
LaTeXTranslator.visit_term
(self, node)
definition list term
definition list term
[ "definition", "list", "term" ]
def visit_term(self, node): """definition list term""" # Commands with optional args inside an optional arg must be put # in a group, e.g. ``\item[{\hyperref[label]{text}}]``. self.out.append('\\item[{')
[ "def", "visit_term", "(", "self", ",", "node", ")", ":", "# Commands with optional args inside an optional arg must be put", "# in a group, e.g. ``\\item[{\\hyperref[label]{text}}]``.", "self", ".", "out", ".", "append", "(", "'\\\\item[{'", ")" ]
https://github.com/Arachnid/bloggart/blob/ba2b60417102fe14a77b1bcd809b9b801d3a96e2/lib/docutils/writers/latex2e/__init__.py#L2467-L2471
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/wagtail/wagtailsearch/backends/base.py
python
BaseSearchResults.results
(self)
return self._results_cache
[]
def results(self): if self._results_cache is None: self._results_cache = self._do_search() return self._results_cache
[ "def", "results", "(", "self", ")", ":", "if", "self", ".", "_results_cache", "is", "None", ":", "self", ".", "_results_cache", "=", "self", ".", "_do_search", "(", ")", "return", "self", ".", "_results_cache" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/wagtail/wagtailsearch/backends/base.py#L136-L139
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/freebsdservice.py
python
get_disabled
(jail=None)
return sorted(set(all_) - set(en_))
Return what services are available but not enabled to start at boot .. versionchanged:: 2016.3.4 Support for jail (representing jid or jail name) keyword argument in kwargs CLI Example: .. code-block:: bash salt '*' service.get_disabled
Return what services are available but not enabled to start at boot
[ "Return", "what", "services", "are", "available", "but", "not", "enabled", "to", "start", "at", "boot" ]
def get_disabled(jail=None): """ Return what services are available but not enabled to start at boot .. versionchanged:: 2016.3.4 Support for jail (representing jid or jail name) keyword argument in kwargs CLI Example: .. code-block:: bash salt '*' service.get_disabled """ en_ = get_enabled(jail) all_ = get_all(jail) return sorted(set(all_) - set(en_))
[ "def", "get_disabled", "(", "jail", "=", "None", ")", ":", "en_", "=", "get_enabled", "(", "jail", ")", "all_", "=", "get_all", "(", "jail", ")", "return", "sorted", "(", "set", "(", "all_", ")", "-", "set", "(", "en_", ")", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/freebsdservice.py#L155-L171
PokemonGoF/PokemonGo-Bot-Desktop
4bfa94f0183406c6a86f93645eff7abd3ad4ced8
build/pywin/Lib/nntplib.py
python
NNTP.next
(self)
return self.statcmd('NEXT')
Process a NEXT command. No arguments. Return as for STAT.
Process a NEXT command. No arguments. Return as for STAT.
[ "Process", "a", "NEXT", "command", ".", "No", "arguments", ".", "Return", "as", "for", "STAT", "." ]
def next(self): """Process a NEXT command. No arguments. Return as for STAT.""" return self.statcmd('NEXT')
[ "def", "next", "(", "self", ")", ":", "return", "self", ".", "statcmd", "(", "'NEXT'", ")" ]
https://github.com/PokemonGoF/PokemonGo-Bot-Desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/nntplib.py#L406-L408
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py
python
WrappedSocket.fileno
(self)
return self.socket.fileno()
[]
def fileno(self): return self.socket.fileno()
[ "def", "fileno", "(", "self", ")", ":", "return", "self", ".", "socket", ".", "fileno", "(", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py#L182-L183
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/algebras/lie_algebras/rank_two_heisenberg_virasoro.py
python
RankTwoHeisenbergVirasoro.t
(self, a, b)
return self.monomial( ('t', self._v(a,b)) )
r""" Return the basis element `t^{(a,b)}` of ``self``. EXAMPLES:: sage: L = lie_algebras.RankTwoHeisenbergVirasoro(QQ) sage: L.t(1,-2) t(1, -2)
r""" Return the basis element `t^{(a,b)}` of ``self``.
[ "r", "Return", "the", "basis", "element", "t^", "{", "(", "a", "b", ")", "}", "of", "self", "." ]
def t(self, a, b): r""" Return the basis element `t^{(a,b)}` of ``self``. EXAMPLES:: sage: L = lie_algebras.RankTwoHeisenbergVirasoro(QQ) sage: L.t(1,-2) t(1, -2) """ if a == b == 0: raise ValueError("no t(0, 0) element") return self.monomial( ('t', self._v(a,b)) )
[ "def", "t", "(", "self", ",", "a", ",", "b", ")", ":", "if", "a", "==", "b", "==", "0", ":", "raise", "ValueError", "(", "\"no t(0, 0) element\"", ")", "return", "self", ".", "monomial", "(", "(", "'t'", ",", "self", ".", "_v", "(", "a", ",", "...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/algebras/lie_algebras/rank_two_heisenberg_virasoro.py#L216-L228
mailgun/talon
a8c7e6a97281ba506f741f7d8a81f886ffdb44b2
talon/signature/learning/helpers.py
python
flatten_list
(list_to_flatten)
return [e for sublist in list_to_flatten for e in sublist]
Simple list comprehension to flatten list. >>> flatten_list([[1, 2], [3, 4, 5]]) [1, 2, 3, 4, 5] >>> flatten_list([[1], [[2]]]) [1, [2]] >>> flatten_list([1, [2]]) Traceback (most recent call last): ... TypeError: 'int' object is not iterable
Simple list comprehension to flatten list.
[ "Simple", "list", "comprehension", "to", "flatten", "list", "." ]
def flatten_list(list_to_flatten): """Simple list comprehension to flatten list. >>> flatten_list([[1, 2], [3, 4, 5]]) [1, 2, 3, 4, 5] >>> flatten_list([[1], [[2]]]) [1, [2]] >>> flatten_list([1, [2]]) Traceback (most recent call last): ... TypeError: 'int' object is not iterable """ return [e for sublist in list_to_flatten for e in sublist]
[ "def", "flatten_list", "(", "list_to_flatten", ")", ":", "return", "[", "e", "for", "sublist", "in", "list_to_flatten", "for", "e", "in", "sublist", "]" ]
https://github.com/mailgun/talon/blob/a8c7e6a97281ba506f741f7d8a81f886ffdb44b2/talon/signature/learning/helpers.py#L89-L101
spack/spack
675210bd8bd1c5d32ad1cc83d898fb43b569ed74
lib/spack/spack/stage.py
python
ResourceStage._add_to_root_stage
(self)
Move the extracted resource to the root stage (according to placement).
Move the extracted resource to the root stage (according to placement).
[ "Move", "the", "extracted", "resource", "to", "the", "root", "stage", "(", "according", "to", "placement", ")", "." ]
def _add_to_root_stage(self): """ Move the extracted resource to the root stage (according to placement). """ root_stage = self.root_stage resource = self.resource if resource.placement: placement = resource.placement elif self.srcdir: placement = self.srcdir else: placement = self.source_path if not isinstance(placement, dict): placement = {'': placement} target_path = os.path.join( root_stage.source_path, resource.destination) try: os.makedirs(target_path) except OSError as err: tty.debug(err) if err.errno == errno.EEXIST and os.path.isdir(target_path): pass else: raise for key, value in iteritems(placement): destination_path = os.path.join(target_path, value) source_path = os.path.join(self.source_path, key) if not os.path.exists(destination_path): tty.info('Moving resource stage\n\tsource : ' '{stage}\n\tdestination : {destination}'.format( stage=source_path, destination=destination_path )) src = os.path.realpath(source_path) if os.path.isdir(src): install_tree(src, destination_path) else: install(src, destination_path)
[ "def", "_add_to_root_stage", "(", "self", ")", ":", "root_stage", "=", "self", ".", "root_stage", "resource", "=", "self", ".", "resource", "if", "resource", ".", "placement", ":", "placement", "=", "resource", ".", "placement", "elif", "self", ".", "srcdir"...
https://github.com/spack/spack/blob/675210bd8bd1c5d32ad1cc83d898fb43b569ed74/lib/spack/spack/stage.py#L673-L717
owid/covid-19-data
936aeae6cfbdc0163939ed7bd8ecdbb2582c0a92
scripts/src/cowidev/vax/incremental/singapore.py
python
Singapore._parse_text_national
(self, soup)
return national_doses, national_boosters, national_people_vaccinated
[]
def _parse_text_national(self, soup): national_program = ( r"We have administered a total of ([\d,]+) doses of COVID-19 vaccines under the.*" r"In total, ([\d,]+) individuals have received at least one dose of vaccine under the national vaccination" r" programme,.* ([\d,]+) (?:individuals )?have (?:received|taken) their booster shots" ) data = re.search(national_program, soup.text).groups() national_doses = clean_count(data[0]) national_people_vaccinated = clean_count(data[1]) national_boosters = clean_count(data[2]) return national_doses, national_boosters, national_people_vaccinated
[ "def", "_parse_text_national", "(", "self", ",", "soup", ")", ":", "national_program", "=", "(", "r\"We have administered a total of ([\\d,]+) doses of COVID-19 vaccines under the.*\"", "r\"In total, ([\\d,]+) individuals have received at least one dose of vaccine under the national vaccinat...
https://github.com/owid/covid-19-data/blob/936aeae6cfbdc0163939ed7bd8ecdbb2582c0a92/scripts/src/cowidev/vax/incremental/singapore.py#L62-L72
mesalock-linux/mesapy
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
pypy/module/sys/initpath.py
python
find_stdlib
(state, executable)
Find and compute the stdlib path, starting from the directory where ``executable`` is and going one level up until we find it. Return a tuple (path, prefix), where ``prefix`` is the root directory which contains the stdlib. If it cannot be found, return (None, None).
Find and compute the stdlib path, starting from the directory where ``executable`` is and going one level up until we find it. Return a tuple (path, prefix), where ``prefix`` is the root directory which contains the stdlib. If it cannot be found, return (None, None).
[ "Find", "and", "compute", "the", "stdlib", "path", "starting", "from", "the", "directory", "where", "executable", "is", "and", "going", "one", "level", "up", "until", "we", "find", "it", ".", "Return", "a", "tuple", "(", "path", "prefix", ")", "where", "...
def find_stdlib(state, executable): """ Find and compute the stdlib path, starting from the directory where ``executable`` is and going one level up until we find it. Return a tuple (path, prefix), where ``prefix`` is the root directory which contains the stdlib. If it cannot be found, return (None, None). """ search = 'pypy-c' if executable == '' else executable while True: dirname = resolvedirof(search) if dirname == search: return None, None # not found :-( newpath = compute_stdlib_path_maybe(state, dirname) if newpath is not None: return newpath, dirname search = dirname
[ "def", "find_stdlib", "(", "state", ",", "executable", ")", ":", "search", "=", "'pypy-c'", "if", "executable", "==", "''", "else", "executable", "while", "True", ":", "dirname", "=", "resolvedirof", "(", "search", ")", "if", "dirname", "==", "search", ":"...
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/pypy/module/sys/initpath.py#L75-L90
mpastell/Pweave
45b56ec60c5badb4e40796178397a4eae5bdddcb
pweave/pweb.py
python
Pweb.run
(self, Processor = None)
Execute code in the document
Execute code in the document
[ "Execute", "code", "in", "the", "document" ]
def run(self, Processor = None): """Execute code in the document""" if Processor is None: Processor = PwebProcessors.getprocessor(self.kernel) proc = Processor(copy.deepcopy(self.parsed), self.kernel, self.source, self.documentationmode, self.figdir, self.wd ) proc.run() self.executed = proc.getresults()
[ "def", "run", "(", "self", ",", "Processor", "=", "None", ")", ":", "if", "Processor", "is", "None", ":", "Processor", "=", "PwebProcessors", ".", "getprocessor", "(", "self", ".", "kernel", ")", "proc", "=", "Processor", "(", "copy", ".", "deepcopy", ...
https://github.com/mpastell/Pweave/blob/45b56ec60c5badb4e40796178397a4eae5bdddcb/pweave/pweb.py#L117-L130
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/spm/__init__.py
python
SPMClient._get_info
(self, formula_def)
return ( "Name: {name}\n" "Version: {version}\n" "Release: {release}\n" "Install Date: {installed}\n" "Supported OSes: {os}\n" "Supported OS families: {os_family}\n" "Dependencies: {dependencies}\n" "OS Dependencies: {os_dependencies}\n" "OS Family Dependencies: {os_family_dependencies}\n" "Summary: {summary}\n" "Description:\n" "{description}".format(**formula_def) )
Get package info
Get package info
[ "Get", "package", "info" ]
def _get_info(self, formula_def): """ Get package info """ fields = ( "name", "os", "os_family", "release", "version", "dependencies", "os_dependencies", "os_family_dependencies", "summary", "description", ) for item in fields: if item not in formula_def: formula_def[item] = "None" if "installed" not in formula_def: formula_def["installed"] = "Not installed" return ( "Name: {name}\n" "Version: {version}\n" "Release: {release}\n" "Install Date: {installed}\n" "Supported OSes: {os}\n" "Supported OS families: {os_family}\n" "Dependencies: {dependencies}\n" "OS Dependencies: {os_dependencies}\n" "OS Family Dependencies: {os_family_dependencies}\n" "Summary: {summary}\n" "Description:\n" "{description}".format(**formula_def) )
[ "def", "_get_info", "(", "self", ",", "formula_def", ")", ":", "fields", "=", "(", "\"name\"", ",", "\"os\"", ",", "\"os_family\"", ",", "\"release\"", ",", "\"version\"", ",", "\"dependencies\"", ",", "\"os_dependencies\"", ",", "\"os_family_dependencies\"", ",",...
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/spm/__init__.py#L963-L999
renxingkai/BERT_Chinese_Classification
8ed7b0ae09c81d0a40f76fdebc986bf3b6ebcded
run_squad.py
python
_improve_answer_span
(doc_tokens, input_start, input_end, tokenizer, orig_answer_text)
return (input_start, input_end)
Returns tokenized answer spans that better match the annotated answer.
Returns tokenized answer spans that better match the annotated answer.
[ "Returns", "tokenized", "answer", "spans", "that", "better", "match", "the", "annotated", "answer", "." ]
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end)
[ "def", "_improve_answer_span", "(", "doc_tokens", ",", "input_start", ",", "input_end", ",", "tokenizer", ",", "orig_answer_text", ")", ":", "# The SQuAD annotations are character based. We first project them to", "# whitespace-tokenized words. But then after WordPiece tokenization, we...
https://github.com/renxingkai/BERT_Chinese_Classification/blob/8ed7b0ae09c81d0a40f76fdebc986bf3b6ebcded/run_squad.py#L476-L510
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1_git_repo_volume_source.py
python
V1GitRepoVolumeSource.repository
(self)
return self._repository
Gets the repository of this V1GitRepoVolumeSource. Repository URL :return: The repository of this V1GitRepoVolumeSource. :rtype: str
Gets the repository of this V1GitRepoVolumeSource. Repository URL
[ "Gets", "the", "repository", "of", "this", "V1GitRepoVolumeSource", ".", "Repository", "URL" ]
def repository(self): """ Gets the repository of this V1GitRepoVolumeSource. Repository URL :return: The repository of this V1GitRepoVolumeSource. :rtype: str """ return self._repository
[ "def", "repository", "(", "self", ")", ":", "return", "self", ".", "_repository" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_git_repo_volume_source.py#L73-L81
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/ultimatelistctrl.py
python
UltimateListMainWindow.ScrollList
(self, dx, dy)
return True
Scrolls the :class:`UltimateListCtrl`. :param `dx`: if in icon, small icon or report view mode, specifies the number of pixels to scroll. If in list view mode, `dx` specifies the number of columns to scroll. :param `dy`: always specifies the number of pixels to scroll vertically.
Scrolls the :class:`UltimateListCtrl`.
[ "Scrolls", "the", ":", "class", ":", "UltimateListCtrl", "." ]
def ScrollList(self, dx, dy): """ Scrolls the :class:`UltimateListCtrl`. :param `dx`: if in icon, small icon or report view mode, specifies the number of pixels to scroll. If in list view mode, `dx` specifies the number of columns to scroll. :param `dy`: always specifies the number of pixels to scroll vertically. """ if not self.InReportView(): # TODO: this should work in all views but is not implemented now return False top, bottom = self.GetVisibleLinesRange() if bottom == -1: return 0 self.ResetVisibleLinesRange() if not self.HasAGWFlag(ULC_HAS_VARIABLE_ROW_HEIGHT): hLine = self.GetLineHeight() self.Scroll(-1, top + dy/hLine) else: self.Scroll(-1, top + dy/SCROLL_UNIT_Y) if wx.Platform == "__WXMAC__": # see comment in MoveToItem() for why we do this self.ResetVisibleLinesRange() return True
[ "def", "ScrollList", "(", "self", ",", "dx", ",", "dy", ")", ":", "if", "not", "self", ".", "InReportView", "(", ")", ":", "# TODO: this should work in all views but is not implemented now", "return", "False", "top", ",", "bottom", "=", "self", ".", "GetVisibleL...
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/ultimatelistctrl.py#L10753-L10784
seasonSH/WarpGAN
794e24d9c3abce08c0e95f975ce5914ccaa2e1bb
align/mtcnntf/detect_face.py
python
Network.load
(self, data_path, session, ignore_missing=False)
Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored.
Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored.
[ "Load", "network", "weights", ".", "data_path", ":", "The", "path", "to", "the", "numpy", "-", "serialized", "network", "weights", "session", ":", "The", "current", "TensorFlow", "session", "ignore_missing", ":", "If", "true", "serialized", "weights", "for", "...
def load(self, data_path, session, ignore_missing=False): """Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored. """ data_dict = np.load(data_path, encoding='latin1').item() #pylint: disable=no-member for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): for param_name, data in iteritems(data_dict[op_name]): try: var = tf.get_variable(param_name) session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
[ "def", "load", "(", "self", ",", "data_path", ",", "session", ",", "ignore_missing", "=", "False", ")", ":", "data_dict", "=", "np", ".", "load", "(", "data_path", ",", "encoding", "=", "'latin1'", ")", ".", "item", "(", ")", "#pylint: disable=no-member", ...
https://github.com/seasonSH/WarpGAN/blob/794e24d9c3abce08c0e95f975ce5914ccaa2e1bb/align/mtcnntf/detect_face.py#L79-L95
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/numpy/lib/function_base.py
python
angle
(z, deg=False)
return a
Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. ..versionchanged:: 1.16.0 This function works on subclasses of ndarray like `ma.array`. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0
Return the angle of the complex argument.
[ "Return", "the", "angle", "of", "the", "complex", "argument", "." ]
def angle(z, deg=False): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. ..versionchanged:: 1.16.0 This function works on subclasses of ndarray like `ma.array`. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ z = asanyarray(z) if issubclass(z.dtype.type, _nx.complexfloating): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z a = arctan2(zimag, zreal) if deg: a *= 180/pi return a
[ "def", "angle", "(", "z", ",", "deg", "=", "False", ")", ":", "z", "=", "asanyarray", "(", "z", ")", "if", "issubclass", "(", "z", ".", "dtype", ".", "type", ",", "_nx", ".", "complexfloating", ")", ":", "zimag", "=", "z", ".", "imag", "zreal", ...
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/numpy/lib/function_base.py#L1420-L1464
jeetsukumaran/DendroPy
29fd294bf05d890ebf6a8d576c501e471db27ca1
src/dendropy/datamodel/treemodel.py
python
Bipartition.compile_bipartition
(self, is_mutable=None)
Updates the values of the various masks specified and calculates the normalized bipartition bitmask. If a rooted bipartition, then this is set to the value of the leafset bitmask. If an unrooted bipartition, then the leafset bitmask is normalized such that the lowest-significant bit (i.e., the group to which the first taxon belongs) is set to '0'. Also makes this bipartition immutable (unless ``is_mutable`` is |False|), which facilitates it being used in dictionaries and sets. Note that this requires full population of the following fields: - self._leafset_bitmask - self._tree_leafset_bitmask
Updates the values of the various masks specified and calculates the normalized bipartition bitmask.
[ "Updates", "the", "values", "of", "the", "various", "masks", "specified", "and", "calculates", "the", "normalized", "bipartition", "bitmask", "." ]
def compile_bipartition(self, is_mutable=None): """ Updates the values of the various masks specified and calculates the normalized bipartition bitmask. If a rooted bipartition, then this is set to the value of the leafset bitmask. If an unrooted bipartition, then the leafset bitmask is normalized such that the lowest-significant bit (i.e., the group to which the first taxon belongs) is set to '0'. Also makes this bipartition immutable (unless ``is_mutable`` is |False|), which facilitates it being used in dictionaries and sets. Note that this requires full population of the following fields: - self._leafset_bitmask - self._tree_leafset_bitmask """ self.compile_split_bitmask(self, leafset_bitmask=self._leafset_bitmask, tree_leafset_bitmask=self._tree_leafset_bitmask, is_rooted=self._is_rooted, is_mutable=is_mutable)
[ "def", "compile_bipartition", "(", "self", ",", "is_mutable", "=", "None", ")", ":", "self", ".", "compile_split_bitmask", "(", "self", ",", "leafset_bitmask", "=", "self", ".", "_leafset_bitmask", ",", "tree_leafset_bitmask", "=", "self", ".", "_tree_leafset_bitm...
https://github.com/jeetsukumaran/DendroPy/blob/29fd294bf05d890ebf6a8d576c501e471db27ca1/src/dendropy/datamodel/treemodel.py#L489-L511
mozman/ezdxf
59d0fc2ea63f5cf82293428f5931da7e9f9718e9
src/ezdxf/sections/header.py
python
HeaderSection.__setitem__
(self, key: str, value: Any)
Set header variable `key` to `value` by index operator like: :code:`drawing.header['$ANGDIR'] = 1`
Set header variable `key` to `value` by index operator like: :code:`drawing.header['$ANGDIR'] = 1`
[ "Set", "header", "variable", "key", "to", "value", "by", "index", "operator", "like", ":", ":", "code", ":", "drawing", ".", "header", "[", "$ANGDIR", "]", "=", "1" ]
def __setitem__(self, key: str, value: Any) -> None: """Set header variable `key` to `value` by index operator like: :code:`drawing.header['$ANGDIR'] = 1` """ try: tags = self._headervar_factory(key, value) except (IndexError, ValueError): raise const.DXFValueError(str(value)) self.hdrvars[key] = HeaderVar(tags)
[ "def", "__setitem__", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Any", ")", "->", "None", ":", "try", ":", "tags", "=", "self", ".", "_headervar_factory", "(", "key", ",", "value", ")", "except", "(", "IndexError", ",", "ValueError", ")...
https://github.com/mozman/ezdxf/blob/59d0fc2ea63f5cf82293428f5931da7e9f9718e9/src/ezdxf/sections/header.py#L295-L304
Huangying-Zhan/DF-VO
6a2ec43fc6209d9058ae1709d779c5ada68a31f3
libs/flowlib/png.py
python
write_chunks
(out, chunks)
Create a PNG file by writing out the chunks.
Create a PNG file by writing out the chunks.
[ "Create", "a", "PNG", "file", "by", "writing", "out", "the", "chunks", "." ]
def write_chunks(out, chunks): """Create a PNG file by writing out the chunks.""" out.write(_signature) for chunk in chunks: write_chunk(out, *chunk)
[ "def", "write_chunks", "(", "out", ",", "chunks", ")", ":", "out", ".", "write", "(", "_signature", ")", "for", "chunk", "in", "chunks", ":", "write_chunk", "(", "out", ",", "*", "chunk", ")" ]
https://github.com/Huangying-Zhan/DF-VO/blob/6a2ec43fc6209d9058ae1709d779c5ada68a31f3/libs/flowlib/png.py#L969-L974
snarfed/granary
ab085de2aef0cff8ac31a99b5e21443a249e8419
granary/facebook.py
python
Facebook.parse_id
(id, is_comment=False)
return fbid
Parses a Facebook post or comment id. Facebook ids come in different formats: * Simple number, usually a user or post: 12 * Two numbers with underscore, usually POST_COMMENT or USER_POST: 12_34 * Three numbers with underscores, USER_POST_COMMENT: 12_34_56 * Three numbers with colons, USER:POST:SHARD: 12:34:63 (We're guessing that the third part is a shard in some FB internal system. In our experience so far, it's always either 63 or the app-scoped user id for 63.) * Two numbers with colon, POST:SHARD: 12:34 (We've seen 0 as shard in this format.) * Four numbers with colons/underscore, USER:POST:SHARD_COMMENT: 12:34:63_56 * Five numbers with colons/underscore, USER:EVENT:UNKNOWN:UNKNOWN_UNKNOWN Not currently supported! Examples: 111599105530674:998145346924699:10102446236688861:10207188792305341_998153510257216 111599105530674:195181727490727:10102446236688861:10205257726909910_195198790822354 Background: * https://github.com/snarfed/bridgy/issues/305 * https://developers.facebook.com/bugs/786903278061433/ Args: id: string or integer is_comment: boolean Returns: FacebookId: Some or all fields may be None.
Parses a Facebook post or comment id.
[ "Parses", "a", "Facebook", "post", "or", "comment", "id", "." ]
def parse_id(id, is_comment=False): """Parses a Facebook post or comment id. Facebook ids come in different formats: * Simple number, usually a user or post: 12 * Two numbers with underscore, usually POST_COMMENT or USER_POST: 12_34 * Three numbers with underscores, USER_POST_COMMENT: 12_34_56 * Three numbers with colons, USER:POST:SHARD: 12:34:63 (We're guessing that the third part is a shard in some FB internal system. In our experience so far, it's always either 63 or the app-scoped user id for 63.) * Two numbers with colon, POST:SHARD: 12:34 (We've seen 0 as shard in this format.) * Four numbers with colons/underscore, USER:POST:SHARD_COMMENT: 12:34:63_56 * Five numbers with colons/underscore, USER:EVENT:UNKNOWN:UNKNOWN_UNKNOWN Not currently supported! Examples: 111599105530674:998145346924699:10102446236688861:10207188792305341_998153510257216 111599105530674:195181727490727:10102446236688861:10205257726909910_195198790822354 Background: * https://github.com/snarfed/bridgy/issues/305 * https://developers.facebook.com/bugs/786903278061433/ Args: id: string or integer is_comment: boolean Returns: FacebookId: Some or all fields may be None. """ assert is_comment in (True, False), is_comment blank = FacebookId(None, None, None) if id in (None, '', 'login.php'): # some FB permalinks redirect to login.php, e.g. group and non-public posts return blank id = str(id) user = None post = None comment = None by_colon = id.split(':') by_underscore = id.split('_') # colon id? if len(by_colon) in (2, 3) and all(by_colon): if len(by_colon) == 3: user = by_colon.pop(0) post, shard = by_colon parts = shard.split('_') if len(parts) >= 2 and parts[-1]: comment = parts[-1] elif len(by_colon) == 2 and all(by_colon): post = by_colon[0] # underscore id? elif len(by_underscore) == 3 and all(by_underscore): user, post, comment = by_underscore elif len(by_underscore) == 2 and all(by_underscore): if is_comment: post, comment = by_underscore else: user, post = by_underscore # plain number? elif util.is_int(id): if is_comment: comment = id else: post = id fbid = FacebookId(user, post, comment) for sub_id in user, post, comment: if sub_id and not re.match(r'^[0-9a-zA-Z]+$', sub_id): fbid = blank if fbid == blank: logging.error(f'Cowardly refusing Facebook id with unknown format: {id}') return fbid
[ "def", "parse_id", "(", "id", ",", "is_comment", "=", "False", ")", ":", "assert", "is_comment", "in", "(", "True", ",", "False", ")", ",", "is_comment", "blank", "=", "FacebookId", "(", "None", ",", "None", ",", "None", ")", "if", "id", "in", "(", ...
https://github.com/snarfed/granary/blob/ab085de2aef0cff8ac31a99b5e21443a249e8419/granary/facebook.py#L2367-L2448
respeaker/get_started_with_respeaker
ec859759fcec7e683a5e09328a8ea307046f353d
files/usr/lib/python2.7/site-packages/tornado/httpserver.py
python
HTTPRequest.cookies
(self)
return self._cookies
A dictionary of Cookie.Morsel objects.
A dictionary of Cookie.Morsel objects.
[ "A", "dictionary", "of", "Cookie", ".", "Morsel", "objects", "." ]
def cookies(self): """A dictionary of Cookie.Morsel objects.""" if not hasattr(self, "_cookies"): self._cookies = Cookie.SimpleCookie() if "Cookie" in self.headers: try: self._cookies.load( native_str(self.headers["Cookie"])) except Exception: self._cookies = {} return self._cookies
[ "def", "cookies", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_cookies\"", ")", ":", "self", ".", "_cookies", "=", "Cookie", ".", "SimpleCookie", "(", ")", "if", "\"Cookie\"", "in", "self", ".", "headers", ":", "try", ":", "se...
https://github.com/respeaker/get_started_with_respeaker/blob/ec859759fcec7e683a5e09328a8ea307046f353d/files/usr/lib/python2.7/site-packages/tornado/httpserver.py#L455-L465
py2neo-org/py2neo
2e46bbf4d622f53282e796ffc521fc4bc6d0b60d
py2neo/vendor/bottle.py
python
_re_flatten
(p)
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
Turn all capturing groups in a regular expression pattern into non-capturing groups.
Turn all capturing groups in a regular expression pattern into non-capturing groups.
[ "Turn", "all", "capturing", "groups", "in", "a", "regular", "expression", "pattern", "into", "non", "-", "capturing", "groups", "." ]
def _re_flatten(p): ''' Turn all capturing groups in a regular expression pattern into non-capturing groups. ''' if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
[ "def", "_re_flatten", "(", "p", ")", ":", "if", "'('", "not", "in", "p", ":", "return", "p", "return", "re", ".", "sub", "(", "r'(\\\\*)(\\(\\?P<[^>]+>|\\((?!\\?))'", ",", "lambda", "m", ":", "m", ".", "group", "(", "0", ")", "if", "len", "(", "m", ...
https://github.com/py2neo-org/py2neo/blob/2e46bbf4d622f53282e796ffc521fc4bc6d0b60d/py2neo/vendor/bottle.py#L253-L258
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/mail/imap4.py
python
MessageSet.clean
(self)
Clean ranges list, combining adjacent ranges
Clean ranges list, combining adjacent ranges
[ "Clean", "ranges", "list", "combining", "adjacent", "ranges" ]
def clean(self): """ Clean ranges list, combining adjacent ranges """ self.ranges.sort() oldl, oldh = None, None for i,(l, h) in enumerate(self.ranges): if l is None: continue # l is >= oldl and h is >= oldh due to sort() if oldl is not None and l <= oldh + 1: l = oldl h = max(oldh, h) self.ranges[i - 1] = None self.ranges[i] = (l, h) oldl, oldh = l, h self.ranges = filter(None, self.ranges)
[ "def", "clean", "(", "self", ")", ":", "self", ".", "ranges", ".", "sort", "(", ")", "oldl", ",", "oldh", "=", "None", ",", "None", "for", "i", ",", "(", "l", ",", "h", ")", "in", "enumerate", "(", "self", ".", "ranges", ")", ":", "if", "l", ...
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/mail/imap4.py#L181-L201
terrencepreilly/darglint
abc26b768cd7135d848223ba53f68323593c33d5
darglint/parse/identifiers.py
python
Continuation.of
(self, path)
return self
[]
def of(self, path): # type: (str) -> Continuation Assert( not self._sealed, 'Sealed continuations shouldn\'t be extended!', ) if isinstance(self.child, Continuation): self.child.of(path) elif self.child is None: self.child = Continuation(path, lambda _: True, None) return self
[ "def", "of", "(", "self", ",", "path", ")", ":", "# type: (str) -> Continuation", "Assert", "(", "not", "self", ".", "_sealed", ",", "'Sealed continuations shouldn\\'t be extended!'", ",", ")", "if", "isinstance", "(", "self", ".", "child", ",", "Continuation", ...
https://github.com/terrencepreilly/darglint/blob/abc26b768cd7135d848223ba53f68323593c33d5/darglint/parse/identifiers.py#L33-L43
prody/ProDy
b24bbf58aa8fffe463c8548ae50e3955910e5b7f
prody/atomic/atomgroup.py
python
AtomGroup.setImpropers
(self, impropers)
Set covalent impropers between atoms. *impropers* must be a list or an array of triplets of indices. All impropers must be set at once. Improper information can be used to make atom selections, e.g. ``"improper to index 1"``. See :mod:`.select` module documentation for details. Also, a data array with number of impropers will be generated and stored with label *numimpropers*. This can be used in atom selections, e.g. ``'numimpropers 0'`` can be used to select ions in a system.
Set covalent impropers between atoms. *impropers* must be a list or an array of triplets of indices. All impropers must be set at once. Improper information can be used to make atom selections, e.g. ``"improper to index 1"``. See :mod:`.select` module documentation for details. Also, a data array with number of impropers will be generated and stored with label *numimpropers*. This can be used in atom selections, e.g. ``'numimpropers 0'`` can be used to select ions in a system.
[ "Set", "covalent", "impropers", "between", "atoms", ".", "*", "impropers", "*", "must", "be", "a", "list", "or", "an", "array", "of", "triplets", "of", "indices", ".", "All", "impropers", "must", "be", "set", "at", "once", ".", "Improper", "information", ...
def setImpropers(self, impropers): """Set covalent impropers between atoms. *impropers* must be a list or an array of triplets of indices. All impropers must be set at once. Improper information can be used to make atom selections, e.g. ``"improper to index 1"``. See :mod:`.select` module documentation for details. Also, a data array with number of impropers will be generated and stored with label *numimpropers*. This can be used in atom selections, e.g. ``'numimpropers 0'`` can be used to select ions in a system.""" if isinstance(impropers, list): impropers = np.array(impropers, int) if impropers.ndim != 2: raise ValueError('impropers.ndim must be 2') if impropers.shape[1] != 4: raise ValueError('impropers.shape must be (n_impropers, 4)') if impropers.min() < 0: raise ValueError('negative atom indices are not valid') n_atoms = self._n_atoms if impropers.max() >= n_atoms: raise ValueError('atom indices are out of range') impropers.sort(1) impropers = impropers[impropers[:, 3].argsort(), ] impropers = impropers[impropers[:, 2].argsort(), ] impropers = impropers[impropers[:, 1].argsort(), ] impropers = impropers[impropers[:, 0].argsort(), ] self._imap, self._data['numimpropers'] = evalImpropers( impropers, n_atoms) self._impropers = impropers
[ "def", "setImpropers", "(", "self", ",", "impropers", ")", ":", "if", "isinstance", "(", "impropers", ",", "list", ")", ":", "impropers", "=", "np", ".", "array", "(", "impropers", ",", "int", ")", "if", "impropers", ".", "ndim", "!=", "2", ":", "rai...
https://github.com/prody/ProDy/blob/b24bbf58aa8fffe463c8548ae50e3955910e5b7f/prody/atomic/atomgroup.py#L1301-L1329
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/registry/rest.py
python
RESTResponse.getheader
(self, name, default=None)
return self.urllib3_response.getheader(name, default)
Returns a given response header.
Returns a given response header.
[ "Returns", "a", "given", "response", "header", "." ]
def getheader(self, name, default=None): """ Returns a given response header. """ return self.urllib3_response.getheader(name, default)
[ "def", "getheader", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "return", "self", ".", "urllib3_response", ".", "getheader", "(", "name", ",", "default", ")" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/registry/rest.py#L52-L56
GoogleCloudPlatform/gsutil
5be882803e76608e2fd29cf8c504ccd1fe0a7746
gslib/commands/perfdiag.py
python
PerfDiagCommand.PerformSlicedUpload
(self, file_name, object_name, use_file, gsutil_api, gzip_encoded=False)
Performs a parallel upload of a file using the slice strategy. The metadata for file_name should be present in temp_file_dict prior to calling. Also, the data from for file_name should be present in temp_file_dict if use_file is specified as False. Args: file_name: The name of the file to upload. object_name: The name of the object to upload to. use_file: If true, use disk I/O, otherwise read upload data from memory. gsutil_api: CloudApi instance to use for operations in this thread. gzip_encoded: Flag for if the file will be uploaded with the gzip transport encoding. If true, a semaphore is used to limit resource usage.
Performs a parallel upload of a file using the slice strategy.
[ "Performs", "a", "parallel", "upload", "of", "a", "file", "using", "the", "slice", "strategy", "." ]
def PerformSlicedUpload(self, file_name, object_name, use_file, gsutil_api, gzip_encoded=False): """Performs a parallel upload of a file using the slice strategy. The metadata for file_name should be present in temp_file_dict prior to calling. Also, the data from for file_name should be present in temp_file_dict if use_file is specified as False. Args: file_name: The name of the file to upload. object_name: The name of the object to upload to. use_file: If true, use disk I/O, otherwise read upload data from memory. gsutil_api: CloudApi instance to use for operations in this thread. gzip_encoded: Flag for if the file will be uploaded with the gzip transport encoding. If true, a semaphore is used to limit resource usage. """ # Divide the file into components. component_size = DivideAndCeil(self.thru_filesize, self.num_slices) component_object_names = ([ object_name + str(i) for i in range(self.num_slices) ]) args = [] for i in range(self.num_slices): component_start = i * component_size component_size = min(component_size, temp_file_dict[file_name].size - component_start) args.append( SliceUploadTuple(file_name, component_object_names[i], use_file, component_start, component_size, gzip_encoded)) # Upload the components in parallel. try: self.Apply( _UploadSlice, args, _PerfdiagExceptionHandler, ('total_requests', 'request_errors'), arg_checker=DummyArgChecker, parallel_operations_override=self.ParallelOverrideReason.PERFDIAG, process_count=self.processes, thread_count=self.threads) # Compose the components into an object. request_components = [] for i in range(self.num_slices): src_obj_metadata = ( apitools_messages.ComposeRequest.SourceObjectsValueListEntry( name=component_object_names[i])) request_components.append(src_obj_metadata) def _Compose(): dst_obj_metadata = apitools_messages.Object() dst_obj_metadata.name = object_name dst_obj_metadata.bucket = self.bucket_url.bucket_name gsutil_api.ComposeObject(request_components, dst_obj_metadata, provider=self.provider) self._RunOperation(_Compose) finally: # Delete the temporary components. self.Apply( _DeleteWrapper, component_object_names, _PerfdiagExceptionHandler, ('total_requests', 'request_errors'), arg_checker=DummyArgChecker, parallel_operations_override=self.ParallelOverrideReason.PERFDIAG, process_count=self.processes, thread_count=self.threads)
[ "def", "PerformSlicedUpload", "(", "self", ",", "file_name", ",", "object_name", ",", "use_file", ",", "gsutil_api", ",", "gzip_encoded", "=", "False", ")", ":", "# Divide the file into components.", "component_size", "=", "DivideAndCeil", "(", "self", ".", "thru_fi...
https://github.com/GoogleCloudPlatform/gsutil/blob/5be882803e76608e2fd29cf8c504ccd1fe0a7746/gslib/commands/perfdiag.py#L970-L1043
maas/maas
db2f89970c640758a51247c59bf1ec6f60cf4ab5
src/maasserver/views/rpc.py
python
info
(request)
return HttpResponse(json.dumps(info), content_type="application/json")
View returning a JSON document with information about RPC endpoints. Currently the only information returned is a list of `(host, port)` tuples on which the region has listening RPC endpoints. When the `rpc-advertise` service is not running this returns `None` instead of the list of event-loop endpoints. This denotes something along the lines of "I don't know". The cluster should not act on this, and instead ask again later.
View returning a JSON document with information about RPC endpoints.
[ "View", "returning", "a", "JSON", "document", "with", "information", "about", "RPC", "endpoints", "." ]
def info(request): """View returning a JSON document with information about RPC endpoints. Currently the only information returned is a list of `(host, port)` tuples on which the region has listening RPC endpoints. When the `rpc-advertise` service is not running this returns `None` instead of the list of event-loop endpoints. This denotes something along the lines of "I don't know". The cluster should not act on this, and instead ask again later. """ endpoints = {} for name, addr, port in get_endpoints(): if name in endpoints: endpoints[name].append((addr, port)) else: endpoints[name] = [(addr, port)] # Each endpoint is an entry point into this event-loop. info = {"eventloops": endpoints} return HttpResponse(json.dumps(info), content_type="application/json")
[ "def", "info", "(", "request", ")", ":", "endpoints", "=", "{", "}", "for", "name", ",", "addr", ",", "port", "in", "get_endpoints", "(", ")", ":", "if", "name", "in", "endpoints", ":", "endpoints", "[", "name", "]", ".", "append", "(", "(", "addr"...
https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/maasserver/views/rpc.py#L55-L77
reddit-archive/reddit
753b17407e9a9dca09558526805922de24133d53
r2/r2/models/traffic.py
python
decrement_month
(date)
return date.replace(day=1)
Given a truncated datetime, return a new one one month in the past.
Given a truncated datetime, return a new one one month in the past.
[ "Given", "a", "truncated", "datetime", "return", "a", "new", "one", "one", "month", "in", "the", "past", "." ]
def decrement_month(date): """Given a truncated datetime, return a new one one month in the past.""" if date.day != 1: raise ValueError("Input must be truncated to the 1st of the month.") date -= datetime.timedelta(days=1) return date.replace(day=1)
[ "def", "decrement_month", "(", "date", ")", ":", "if", "date", ".", "day", "!=", "1", ":", "raise", "ValueError", "(", "\"Input must be truncated to the 1st of the month.\"", ")", "date", "-=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "return"...
https://github.com/reddit-archive/reddit/blob/753b17407e9a9dca09558526805922de24133d53/r2/r2/models/traffic.py#L146-L153
jaseg/python-mpv
1f59cfa07246c993737b25857fd01421b2da8bbd
mpv.py
python
MPV.mouse
(x, y, button=None, mode='single')
Mapped mpv mouse command, see man mpv(1).
Mapped mpv mouse command, see man mpv(1).
[ "Mapped", "mpv", "mouse", "command", "see", "man", "mpv", "(", "1", ")", "." ]
def mouse(x, y, button=None, mode='single'): """Mapped mpv mouse command, see man mpv(1).""" if button is None: self.command('mouse', x, y, mode) else: self.command('mouse', x, y, button, mode)
[ "def", "mouse", "(", "x", ",", "y", ",", "button", "=", "None", ",", "mode", "=", "'single'", ")", ":", "if", "button", "is", "None", ":", "self", ".", "command", "(", "'mouse'", ",", "x", ",", "y", ",", "mode", ")", "else", ":", "self", ".", ...
https://github.com/jaseg/python-mpv/blob/1f59cfa07246c993737b25857fd01421b2da8bbd/mpv.py#L1273-L1278
liu-nlper/SLTK
b3edc58ef1d317eefc21e65d306932dedc6a8525
sltk/nn/modules/crf.py
python
CRF._score_sentence
(self, scores, mask, tags)
return gold_score
Args: scores: size=(seq_len, batch_size, tag_size, tag_size) mask: size=(batch_size, seq_len) tags: size=(batch_size, seq_len) Returns: score:
Args: scores: size=(seq_len, batch_size, tag_size, tag_size) mask: size=(batch_size, seq_len) tags: size=(batch_size, seq_len)
[ "Args", ":", "scores", ":", "size", "=", "(", "seq_len", "batch_size", "tag_size", "tag_size", ")", "mask", ":", "size", "=", "(", "batch_size", "seq_len", ")", "tags", ":", "size", "=", "(", "batch_size", "seq_len", ")" ]
def _score_sentence(self, scores, mask, tags): """ Args: scores: size=(seq_len, batch_size, tag_size, tag_size) mask: size=(batch_size, seq_len) tags: size=(batch_size, seq_len) Returns: score: """ batch_size = scores.size(1) seq_len = scores.size(0) tag_size = scores.size(-1) new_tags = Variable(torch.LongTensor(batch_size, seq_len)) if self.use_cuda: new_tags = new_tags.cuda() for idx in range(seq_len): if idx == 0: new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0] else: new_tags[:, idx] = tags[:, idx-1] * tag_size + tags[:, idx] end_transition = self.transitions[:, self.END_TAG_IDX].contiguous().view( 1, tag_size).expand(batch_size, tag_size) length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long() end_ids = torch.gather(tags, 1, length_mask-1) end_energy = torch.gather(end_transition, 1, end_ids) new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1) tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view( seq_len, batch_size) tg_energy = tg_energy.masked_select(mask.transpose(1, 0)) gold_score = tg_energy.sum() + end_energy.sum() return gold_score
[ "def", "_score_sentence", "(", "self", ",", "scores", ",", "mask", ",", "tags", ")", ":", "batch_size", "=", "scores", ".", "size", "(", "1", ")", "seq_len", "=", "scores", ".", "size", "(", "0", ")", "tag_size", "=", "scores", ".", "size", "(", "-...
https://github.com/liu-nlper/SLTK/blob/b3edc58ef1d317eefc21e65d306932dedc6a8525/sltk/nn/modules/crf.py#L191-L228
Robot-Will/Stino
a94831cd1bf40a59587a7b6cc2e9b5c4306b1bf2
libs/base_utils/file.py
python
File.write
(self, text, append=False)
Method Docs.
Method Docs.
[ "Method", "Docs", "." ]
def write(self, text, append=False): """Method Docs.""" if self._is_readonly: return mode = 'w' if append: mode = 'a' if not os.path.isdir(self._dir): os.makedirs(self._dir) try: with codecs.open(self._path, mode, self._encoding, 'ignore') as f: f.write(text) except (IOError, UnicodeError): pass
[ "def", "write", "(", "self", ",", "text", ",", "append", "=", "False", ")", ":", "if", "self", ".", "_is_readonly", ":", "return", "mode", "=", "'w'", "if", "append", ":", "mode", "=", "'a'", "if", "not", "os", ".", "path", ".", "isdir", "(", "se...
https://github.com/Robot-Will/Stino/blob/a94831cd1bf40a59587a7b6cc2e9b5c4306b1bf2/libs/base_utils/file.py#L139-L154
trezor/trezor-core
18c3a6a5bd45923380312b064be96155f5a7377d
mocks/generated/trezorcrypto/ed25519.py
python
generate_secret
()
Generate secret key.
Generate secret key.
[ "Generate", "secret", "key", "." ]
def generate_secret() -> bytes: ''' Generate secret key. '''
[ "def", "generate_secret", "(", ")", "->", "bytes", ":" ]
https://github.com/trezor/trezor-core/blob/18c3a6a5bd45923380312b064be96155f5a7377d/mocks/generated/trezorcrypto/ed25519.py#L4-L7
hardbyte/python-can
e7a2b040ee1f0cdd7fd77fbfef0454353166b333
can/interfaces/systec/ucanbus.py
python
UcanBus.state
(self, new_state)
[]
def state(self, new_state): if self._state is not BusState.ERROR and ( new_state is BusState.ACTIVE or new_state is BusState.PASSIVE ): try: # close the CAN channel self._ucan.shutdown(self.channel, False) # set mode if new_state is BusState.ACTIVE: self._params["mode"] &= ~Mode.MODE_LISTEN_ONLY else: self._params["mode"] |= Mode.MODE_LISTEN_ONLY # reinitialize CAN channel self._ucan.init_can(self.channel, **self._params) except UcanException as exception: raise CanOperationError() from exception
[ "def", "state", "(", "self", ",", "new_state", ")", ":", "if", "self", ".", "_state", "is", "not", "BusState", ".", "ERROR", "and", "(", "new_state", "is", "BusState", ".", "ACTIVE", "or", "new_state", "is", "BusState", ".", "PASSIVE", ")", ":", "try",...
https://github.com/hardbyte/python-can/blob/e7a2b040ee1f0cdd7fd77fbfef0454353166b333/can/interfaces/systec/ucanbus.py#L294-L309
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_internal/req/req_install.py
python
InstallRequirement.has_hash_options
(self)
return bool(self.hash_options)
Return whether any known-good hashes are specified as options. These activate --require-hashes mode; hashes specified as part of a URL do not.
Return whether any known-good hashes are specified as options.
[ "Return", "whether", "any", "known", "-", "good", "hashes", "are", "specified", "as", "options", "." ]
def has_hash_options(self) -> bool: """Return whether any known-good hashes are specified as options. These activate --require-hashes mode; hashes specified as part of a URL do not. """ return bool(self.hash_options)
[ "def", "has_hash_options", "(", "self", ")", "->", "bool", ":", "return", "bool", "(", "self", ".", "hash_options", ")" ]
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_internal/req/req_install.py#L258-L265
numenta/nupic
b9ebedaf54f49a33de22d8d44dff7c765cdb5548
src/nupic/algorithms/backtracking_tm_shim.py
python
MonitoredTMShim.topDownCompute
(self, topDownIn=None)
return output
(From `backtracking_tm.py`) Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us @returns best estimate of the TM input that would have generated bottomUpOut.
(From `backtracking_tm.py`) Top-down compute - generate expected input given output of the TM
[ "(", "From", "backtracking_tm", ".", "py", ")", "Top", "-", "down", "compute", "-", "generate", "expected", "input", "given", "output", "of", "the", "TM" ]
def topDownCompute(self, topDownIn=None): """ (From `backtracking_tm.py`) Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us @returns best estimate of the TM input that would have generated bottomUpOut. """ output = numpy.zeros(self.numberOfColumns()) columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()] output[columns] = 1 return output
[ "def", "topDownCompute", "(", "self", ",", "topDownIn", "=", "None", ")", ":", "output", "=", "numpy", ".", "zeros", "(", "self", ".", "numberOfColumns", "(", ")", ")", "columns", "=", "[", "self", ".", "columnForCell", "(", "idx", ")", "for", "idx", ...
https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/src/nupic/algorithms/backtracking_tm_shim.py#L281-L293
fedora-infra/bodhi
2b1df12d85eb2e575d8e481a3936c4f92d1fe29a
bodhi/client/__init__.py
python
releases
()
Interact with releases.
Interact with releases.
[ "Interact", "with", "releases", "." ]
def releases(): # Docs that show in the --help """Interact with releases.""" # Developer Docs """Manage the releases.""" pass
[ "def", "releases", "(", ")", ":", "# Docs that show in the --help", "# Developer Docs", "\"\"\"Manage the releases.\"\"\"", "pass" ]
https://github.com/fedora-infra/bodhi/blob/2b1df12d85eb2e575d8e481a3936c4f92d1fe29a/bodhi/client/__init__.py#L1176-L1181
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/contrib/gis/forms/fields.py
python
GeometryField.to_python
(self, value)
return value
Transforms the value to a Geometry object.
Transforms the value to a Geometry object.
[ "Transforms", "the", "value", "to", "a", "Geometry", "object", "." ]
def to_python(self, value): """ Transforms the value to a Geometry object. """ if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): try: value = GEOSGeometry(value) except (GEOSException, ValueError, TypeError): raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') # Try to set the srid if not value.srid: try: value.srid = self.widget.map_srid except AttributeError: if self.srid: value.srid = self.srid return value
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "in", "self", ".", "empty_values", ":", "return", "None", "if", "not", "isinstance", "(", "value", ",", "GEOSGeometry", ")", ":", "try", ":", "value", "=", "GEOSGeometry", "(", "va...
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/contrib/gis/forms/fields.py#L35-L55
Teichlab/cellphonedb
dca1c26555b012df30af0f2521cd4df317cf4600
cellphonedb/src/core/methods/cpdb_analysis_method.py
python
call
(meta: pd.DataFrame, counts: pd.DataFrame, counts_data: str, interactions: pd.DataFrame, genes: pd.DataFrame, complexes: pd.DataFrame, complex_compositions: pd.DataFrame, separator: str, threshold: float = 0.1, result_precision: int = 3)
return means, significant_means, deconvoluted
[]
def call(meta: pd.DataFrame, counts: pd.DataFrame, counts_data: str, interactions: pd.DataFrame, genes: pd.DataFrame, complexes: pd.DataFrame, complex_compositions: pd.DataFrame, separator: str, threshold: float = 0.1, result_precision: int = 3) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame): means, significant_means, deconvoluted = \ cpdb_analysis_complex_method.call(meta.copy(), counts.copy(), counts_data, interactions.copy(), genes, complexes, complex_compositions, separator, threshold, result_precision) max_rank = significant_means['rank'].max() significant_means['rank'] = significant_means['rank'].apply(lambda rank: rank if rank != 0 else (1 + max_rank)) significant_means.sort_values('rank', inplace=True) return means, significant_means, deconvoluted
[ "def", "call", "(", "meta", ":", "pd", ".", "DataFrame", ",", "counts", ":", "pd", ".", "DataFrame", ",", "counts_data", ":", "str", ",", "interactions", ":", "pd", ".", "DataFrame", ",", "genes", ":", "pd", ".", "DataFrame", ",", "complexes", ":", "...
https://github.com/Teichlab/cellphonedb/blob/dca1c26555b012df30af0f2521cd4df317cf4600/cellphonedb/src/core/methods/cpdb_analysis_method.py#L6-L31
biolab/orange2
db40a9449cb45b507d63dcd5739b223f9cffb8e6
Orange/OrangeCanvas/preview/previewdialog.py
python
PreviewDialog.setHeading
(self, heading)
Set `heading` as the heading string ('<h3>Preview</h3>' by default).
Set `heading` as the heading string ('<h3>Preview</h3>' by default).
[ "Set", "heading", "as", "the", "heading", "string", "(", "<h3", ">", "Preview<", "/", "h3", ">", "by", "default", ")", "." ]
def setHeading(self, heading): """Set `heading` as the heading string ('<h3>Preview</h3>' by default). """ self.__heading.setText(heading)
[ "def", "setHeading", "(", "self", ",", "heading", ")", ":", "self", ".", "__heading", ".", "setText", "(", "heading", ")" ]
https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/OrangeCanvas/preview/previewdialog.py#L99-L104
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/webapps/galaxy/api/jobs.py
python
JobController.inputs
(self, trans: ProvidesUserContext, id, **kwd)
return self.__dictify_associations(trans, job.input_datasets, job.input_library_datasets)
GET /api/jobs/{id}/inputs returns input datasets created by job :type id: string :param id: Encoded job id :rtype: dictionary :returns: dictionary containing input dataset associations
GET /api/jobs/{id}/inputs
[ "GET", "/", "api", "/", "jobs", "/", "{", "id", "}", "/", "inputs" ]
def inputs(self, trans: ProvidesUserContext, id, **kwd): """ GET /api/jobs/{id}/inputs returns input datasets created by job :type id: string :param id: Encoded job id :rtype: dictionary :returns: dictionary containing input dataset associations """ job = self.__get_job(trans, id) return self.__dictify_associations(trans, job.input_datasets, job.input_library_datasets)
[ "def", "inputs", "(", "self", ",", "trans", ":", "ProvidesUserContext", ",", "id", ",", "*", "*", "kwd", ")", ":", "job", "=", "self", ".", "__get_job", "(", "trans", ",", "id", ")", "return", "self", ".", "__dictify_associations", "(", "trans", ",", ...
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/webapps/galaxy/api/jobs.py#L265-L278
Azure/azure-devops-cli-extension
11334cd55806bef0b99c3bee5a438eed71e44037
azure-devops/azext_devops/devops_sdk/v5_1/settings/settings_client.py
python
SettingsClient.set_entries
(self, entries, user_scope)
SetEntries. [Preview API] Set the specified setting entry values for the given user/all-users scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
SetEntries. [Preview API] Set the specified setting entry values for the given user/all-users scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
[ "SetEntries", ".", "[", "Preview", "API", "]", "Set", "the", "specified", "setting", "entry", "values", "for", "the", "given", "user", "/", "all", "-", "users", "scope", ":", "param", "{", "object", "}", "entries", ":", "The", "entries", "to", "set", "...
def set_entries(self, entries, user_scope): """SetEntries. [Preview API] Set the specified setting entry values for the given user/all-users scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users. """ route_values = {} if user_scope is not None: route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') content = self._serialize.body(entries, '{object}') self._send(http_method='PATCH', location_id='cd006711-163d-4cd4-a597-b05bad2556ff', version='5.1-preview.1', route_values=route_values, content=content)
[ "def", "set_entries", "(", "self", ",", "entries", ",", "user_scope", ")", ":", "route_values", "=", "{", "}", "if", "user_scope", "is", "not", "None", ":", "route_values", "[", "'userScope'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'user_...
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_1/settings/settings_client.py#L60-L74
skelsec/msldap
ed3134135ddf9e13c74a4d7208ef3e48ec898192
msldap/client.py
python
MSLDAPClient.get_tree_plot
(self, root_dn, level = 2)
return {root_dn : tree}
Returns a dictionary representing a tree starting from 'dn' containing all subtrees. :param root_dn: The start DN of the tree :type root_dn: str :param level: Recursion level :type level: int :return: A dictionary representing the LDAP tree :rtype: dict
Returns a dictionary representing a tree starting from 'dn' containing all subtrees.
[ "Returns", "a", "dictionary", "representing", "a", "tree", "starting", "from", "dn", "containing", "all", "subtrees", "." ]
async def get_tree_plot(self, root_dn, level = 2): """ Returns a dictionary representing a tree starting from 'dn' containing all subtrees. :param root_dn: The start DN of the tree :type root_dn: str :param level: Recursion level :type level: int :return: A dictionary representing the LDAP tree :rtype: dict """ logger.debug('Tree, dn: %s level: %s' % (root_dn, level)) tree = {} async for entry, err in self._con.pagedsearch( root_dn, '(distinguishedName=*)', attributes = [b'distinguishedName'], size_limit = self.ldap_query_page_size, search_scope=LEVEL, controls = None, rate_limit=self.target.ldap_query_ratelimit ): if err is not None: raise err if level == 0: return {} #print(entry) #print(entry['attributes']['distinguishedName']) if 'distinguishedName' not in entry['attributes'] or entry['attributes']['distinguishedName'] is None or entry['attributes']['distinguishedName'] == []: continue subtree = await self.get_tree_plot(entry['attributes']['distinguishedName'], level = level -1) tree[entry['attributes']['distinguishedName']] = subtree return {root_dn : tree}
[ "async", "def", "get_tree_plot", "(", "self", ",", "root_dn", ",", "level", "=", "2", ")", ":", "logger", ".", "debug", "(", "'Tree, dn: %s level: %s'", "%", "(", "root_dn", ",", "level", ")", ")", "tree", "=", "{", "}", "async", "for", "entry", ",", ...
https://github.com/skelsec/msldap/blob/ed3134135ddf9e13c74a4d7208ef3e48ec898192/msldap/client.py#L151-L186
GoSecure/pyrdp
abd8b8762b6d7fd0e49d4a927b529f892b412743
pyrdp/convert/ExportedPDUStream.py
python
ExportedPDUStream.__len__
(self)
return len(self.packets)
[]
def __len__(self): return len(self.packets)
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "packets", ")" ]
https://github.com/GoSecure/pyrdp/blob/abd8b8762b6d7fd0e49d4a927b529f892b412743/pyrdp/convert/ExportedPDUStream.py#L21-L22
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/UserDict.py
python
DictMixin.itervalues
(self)
[]
def itervalues(self): for _, v in self.iteritems(): yield v
[ "def", "itervalues", "(", "self", ")", ":", "for", "_", ",", "v", "in", "self", ".", "iteritems", "(", ")", ":", "yield", "v" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/UserDict.py#L116-L118
frappe/frappe
b64cab6867dfd860f10ccaf41a4ec04bc890b583
frappe/website/doctype/website_theme/website_theme.py
python
get_scss
(website_theme)
return frappe.render_template('frappe/website/doctype/website_theme/website_theme_template.scss', context)
Render `website_theme_template.scss` with the values defined in Website Theme. params: website_theme - instance of a Website Theme
Render `website_theme_template.scss` with the values defined in Website Theme.
[ "Render", "website_theme_template", ".", "scss", "with", "the", "values", "defined", "in", "Website", "Theme", "." ]
def get_scss(website_theme): """ Render `website_theme_template.scss` with the values defined in Website Theme. params: website_theme - instance of a Website Theme """ apps_to_ignore = tuple((d.app + '/') for d in website_theme.ignored_apps) available_imports = get_scss_paths() imports_to_include = [d for d in available_imports if not d.startswith(apps_to_ignore)] context = website_theme.as_dict() context['website_theme_scss'] = imports_to_include return frappe.render_template('frappe/website/doctype/website_theme/website_theme_template.scss', context)
[ "def", "get_scss", "(", "website_theme", ")", ":", "apps_to_ignore", "=", "tuple", "(", "(", "d", ".", "app", "+", "'/'", ")", "for", "d", "in", "website_theme", ".", "ignored_apps", ")", "available_imports", "=", "get_scss_paths", "(", ")", "imports_to_incl...
https://github.com/frappe/frappe/blob/b64cab6867dfd860f10ccaf41a4ec04bc890b583/frappe/website/doctype/website_theme/website_theme.py#L139-L151
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/_cone.py
python
Cone.hovertemplatesrc
(self)
return self["hovertemplatesrc"]
Sets the source reference on Chart Studio Cloud for `hovertemplate`. The 'hovertemplatesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for `hovertemplate`. The 'hovertemplatesrc' property must be specified as a string or as a plotly.grid_objs.Column object
[ "Sets", "the", "source", "reference", "on", "Chart", "Studio", "Cloud", "for", "hovertemplate", ".", "The", "hovertemplatesrc", "property", "must", "be", "specified", "as", "a", "string", "or", "as", "a", "plotly", ".", "grid_objs", ".", "Column", "object" ]
def hovertemplatesrc(self): """ Sets the source reference on Chart Studio Cloud for `hovertemplate`. The 'hovertemplatesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["hovertemplatesrc"]
[ "def", "hovertemplatesrc", "(", "self", ")", ":", "return", "self", "[", "\"hovertemplatesrc\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/_cone.py#L746-L758
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/compat/__init__.py
python
get_path_uid
(path)
return file_uid
Return path's uid. Does not follow symlinks: https://github.com/pypa/pip/pull/935#discussion_r5307003 Placed this function in compat due to differences on AIX and Jython, that should eventually go away. :raises OSError: When path is a symlink or can't be read.
Return path's uid.
[ "Return", "path", "s", "uid", "." ]
def get_path_uid(path): """ Return path's uid. Does not follow symlinks: https://github.com/pypa/pip/pull/935#discussion_r5307003 Placed this function in compat due to differences on AIX and Jython, that should eventually go away. :raises OSError: When path is a symlink or can't be read. """ if hasattr(os, 'O_NOFOLLOW'): fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) file_uid = os.fstat(fd).st_uid os.close(fd) else: # AIX and Jython # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW if not os.path.islink(path): # older versions of Jython don't have `os.fstat` file_uid = os.stat(path).st_uid else: # raise OSError for parity with os.O_NOFOLLOW above raise OSError( "%s is a symlink; Will not return uid for symlinks" % path ) return file_uid
[ "def", "get_path_uid", "(", "path", ")", ":", "if", "hasattr", "(", "os", ",", "'O_NOFOLLOW'", ")", ":", "fd", "=", "os", ".", "open", "(", "path", ",", "os", ".", "O_RDONLY", "|", "os", ".", "O_NOFOLLOW", ")", "file_uid", "=", "os", ".", "fstat", ...
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/compat/__init__.py#L101-L127
tanghaibao/jcvi
5e720870c0928996f8b77a38208106ff0447ccb6
jcvi/formats/fasta.py
python
simulate
(args)
%prog simulate idsfile Simulate random FASTA file based on idsfile, which is a two-column tab-separated file with sequence name and size.
%prog simulate idsfile
[ "%prog", "simulate", "idsfile" ]
def simulate(args): """ %prog simulate idsfile Simulate random FASTA file based on idsfile, which is a two-column tab-separated file with sequence name and size. """ p = OptionParser(simulate.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (idsfile,) = args fp = open(idsfile) fw = must_open(opts.outfile, "w") for row in fp: name, size = row.split() size = int(size) simulate_one(fw, name, size) fp.close()
[ "def", "simulate", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "simulate", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", ...
https://github.com/tanghaibao/jcvi/blob/5e720870c0928996f8b77a38208106ff0447ccb6/jcvi/formats/fasta.py#L419-L440
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
libs/platform-tools/platform-tools_windows/systrace/catapult/third_party/pyserial/serial/urlhandler/protocol_loop.py
python
LoopbackSerial.sendBreak
(self, duration=0.25)
Send break condition. Timed, returns to idle state after given duration.
Send break condition. Timed, returns to idle state after given duration.
[ "Send", "break", "condition", ".", "Timed", "returns", "to", "idle", "state", "after", "given", "duration", "." ]
def sendBreak(self, duration=0.25): """Send break condition. Timed, returns to idle state after given duration.""" if not self._isOpen: raise portNotOpenError
[ "def", "sendBreak", "(", "self", ",", "duration", "=", "0.25", ")", ":", "if", "not", "self", ".", "_isOpen", ":", "raise", "portNotOpenError" ]
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_windows/systrace/catapult/third_party/pyserial/serial/urlhandler/protocol_loop.py#L181-L184
khanhnamle1994/natural-language-processing
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
assignment1/.env/lib/python2.7/site-packages/IPython/nbconvert/writers/files.py
python
FilesWriter._makedir
(self, path)
Make a directory if it doesn't already exist
Make a directory if it doesn't already exist
[ "Make", "a", "directory", "if", "it", "doesn", "t", "already", "exist" ]
def _makedir(self, path): """Make a directory if it doesn't already exist""" if path and not os.path.isdir(path): self.log.info("Making directory %s", path) os.makedirs(path)
[ "def", "_makedir", "(", "self", ",", "path", ")", ":", "if", "path", "and", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "self", ".", "log", ".", "info", "(", "\"Making directory %s\"", ",", "path", ")", "os", ".", "makedirs", "("...
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/IPython/nbconvert/writers/files.py#L49-L53
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/rlcompleter.py
python
Completer.complete
(self, text, state)
Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.
Return the next possible completion for 'text'.
[ "Return", "the", "next", "possible", "completion", "for", "text", "." ]
def complete(self, text, state): """Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. """ if self.use_main_ns: self.namespace = __main__.__dict__ if not text.strip(): if state == 0: return '\t' else: return None if state == 0: if "." in text: self.matches = self.attr_matches(text) else: self.matches = self.global_matches(text) try: return self.matches[state] except IndexError: return None
[ "def", "complete", "(", "self", ",", "text", ",", "state", ")", ":", "if", "self", ".", "use_main_ns", ":", "self", ".", "namespace", "=", "__main__", ".", "__dict__", "if", "not", "text", ".", "strip", "(", ")", ":", "if", "state", "==", "0", ":",...
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/rlcompleter.py#L66-L90
facelessuser/ColorHelper
cfed17c35dbae4db49a14165ef222407c48a3014
lib/coloraide/distance/delta_e_ok.py
python
DEOK.distance
(cls, color: 'Color', sample: 'Color', scalar: float = 1, **kwargs: Any)
return scalar * super().distance(color, sample)
Delta E OK color distance formula. This just uses simple Euclidean distance in the Oklab color space.
Delta E OK color distance formula.
[ "Delta", "E", "OK", "color", "distance", "formula", "." ]
def distance(cls, color: 'Color', sample: 'Color', scalar: float = 1, **kwargs: Any) -> float: """ Delta E OK color distance formula. This just uses simple Euclidean distance in the Oklab color space. """ # Equation (1) return scalar * super().distance(color, sample)
[ "def", "distance", "(", "cls", ",", "color", ":", "'Color'", ",", "sample", ":", "'Color'", ",", "scalar", ":", "float", "=", "1", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "float", ":", "# Equation (1)", "return", "scalar", "*", "super", "(", ...
https://github.com/facelessuser/ColorHelper/blob/cfed17c35dbae4db49a14165ef222407c48a3014/lib/coloraide/distance/delta_e_ok.py#L16-L24
nosmokingbandit/watcher
dadacd21a5790ee609058a98a17fcc8954d24439
lib/infi/pkg_resources/_vendor/pyparsing.py
python
ParseResults.haskeys
( self )
return bool(self.__tokdict)
Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.
Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.
[ "Since", "keys", "()", "returns", "an", "iterator", "this", "method", "is", "helpful", "in", "bypassing", "code", "that", "looks", "for", "the", "existence", "of", "any", "defined", "results", "names", "." ]
def haskeys( self ): """Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.""" return bool(self.__tokdict)
[ "def", "haskeys", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "__tokdict", ")" ]
https://github.com/nosmokingbandit/watcher/blob/dadacd21a5790ee609058a98a17fcc8954d24439/lib/infi/pkg_resources/_vendor/pyparsing.py#L483-L486
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
tensorflow_dl_models/research/capsules/models/layers/layers.py
python
capsule
(input_tensor, input_dim, output_dim, layer_name, input_atoms=8, output_atoms=8, **routing_args)
Builds a fully connected capsule layer. Given an input tensor of shape `[batch, input_dim, input_atoms]`, this op performs the following: 1. For each input capsule, multiples it with the weight variable to get votes of shape `[batch, input_dim, output_dim, output_atoms]`. 2. Scales the votes for each output capsule by iterative routing. 3. Squashes the output of each capsule to have norm less than one. Each capsule of this layer has one weight tensor for each capsules of layer below. Therefore, this layer has the following number of trainable variables: w: [input_dim * num_in_atoms, output_dim * num_out_atoms] b: [output_dim * num_out_atoms] Args: input_tensor: tensor, activation output of the layer below. input_dim: scalar, number of capsules in the layer below. output_dim: scalar, number of capsules in this layer. layer_name: string, Name of this layer. input_atoms: scalar, number of units in each capsule of input layer. output_atoms: scalar, number of units in each capsule of output layer. **routing_args: dictionary {leaky, num_routing}, args for routing function. Returns: Tensor of activations for this layer of shape `[batch, output_dim, output_atoms]`.
Builds a fully connected capsule layer.
[ "Builds", "a", "fully", "connected", "capsule", "layer", "." ]
def capsule(input_tensor, input_dim, output_dim, layer_name, input_atoms=8, output_atoms=8, **routing_args): """Builds a fully connected capsule layer. Given an input tensor of shape `[batch, input_dim, input_atoms]`, this op performs the following: 1. For each input capsule, multiples it with the weight variable to get votes of shape `[batch, input_dim, output_dim, output_atoms]`. 2. Scales the votes for each output capsule by iterative routing. 3. Squashes the output of each capsule to have norm less than one. Each capsule of this layer has one weight tensor for each capsules of layer below. Therefore, this layer has the following number of trainable variables: w: [input_dim * num_in_atoms, output_dim * num_out_atoms] b: [output_dim * num_out_atoms] Args: input_tensor: tensor, activation output of the layer below. input_dim: scalar, number of capsules in the layer below. output_dim: scalar, number of capsules in this layer. layer_name: string, Name of this layer. input_atoms: scalar, number of units in each capsule of input layer. output_atoms: scalar, number of units in each capsule of output layer. **routing_args: dictionary {leaky, num_routing}, args for routing function. Returns: Tensor of activations for this layer of shape `[batch, output_dim, output_atoms]`. """ with tf.variable_scope(layer_name): # weights variable will hold the state of the weights for the layer weights = variables.weight_variable( [input_dim, input_atoms, output_dim * output_atoms]) biases = variables.bias_variable([output_dim, output_atoms]) with tf.name_scope('Wx_plus_b'): # Depthwise matmul: [b, d, c] ** [d, c, o_c] = [b, d, o_c] # To do this: tile input, do element-wise multiplication and reduce # sum over input_atoms dimmension. input_tiled = tf.tile( tf.expand_dims(input_tensor, -1), [1, 1, 1, output_dim * output_atoms]) votes = tf.reduce_sum(input_tiled * weights, axis=2) votes_reshaped = tf.reshape(votes, [-1, input_dim, output_dim, output_atoms]) with tf.name_scope('routing'): input_shape = tf.shape(input_tensor) logit_shape = tf.stack([input_shape[0], input_dim, output_dim]) activations = _update_routing( votes=votes_reshaped, biases=biases, logit_shape=logit_shape, num_dims=4, input_dim=input_dim, output_dim=output_dim, **routing_args) return activations
[ "def", "capsule", "(", "input_tensor", ",", "input_dim", ",", "output_dim", ",", "layer_name", ",", "input_atoms", "=", "8", ",", "output_atoms", "=", "8", ",", "*", "*", "routing_args", ")", ":", "with", "tf", ".", "variable_scope", "(", "layer_name", ")"...
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/research/capsules/models/layers/layers.py#L138-L199
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/antlr3/antlr3/streams.py
python
CommonTokenStream.__init__
(self, tokenSource=None, channel=DEFAULT_CHANNEL)
@param tokenSource A TokenSource instance (usually a Lexer) to pull the tokens from. @param channel Skip tokens on any channel but this one; this is how we skip whitespace...
@param tokenSource A TokenSource instance (usually a Lexer) to pull the tokens from.
[ "@param", "tokenSource", "A", "TokenSource", "instance", "(", "usually", "a", "Lexer", ")", "to", "pull", "the", "tokens", "from", "." ]
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): """ @param tokenSource A TokenSource instance (usually a Lexer) to pull the tokens from. @param channel Skip tokens on any channel but this one; this is how we skip whitespace... """ TokenStream.__init__(self) self.tokenSource = tokenSource # Record every single token pulled from the source so we can reproduce # chunks of it later. self.tokens = [] # Map<tokentype, channel> to override some Tokens' channel numbers self.channelOverrideMap = {} # Set<tokentype>; discard any tokens with this type self.discardSet = set() # Skip tokens on any channel but this one; this is how we skip whitespace... self.channel = channel # By default, track all incoming tokens self.discardOffChannelTokens = False # The index into the tokens list of the current token (next token # to consume). p==-1 indicates that the tokens list is empty self.p = -1 # Remember last marked position self.lastMarker = None
[ "def", "__init__", "(", "self", ",", "tokenSource", "=", "None", ",", "channel", "=", "DEFAULT_CHANNEL", ")", ":", "TokenStream", ".", "__init__", "(", "self", ")", "self", ".", "tokenSource", "=", "tokenSource", "# Record every single token pulled from the source s...
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/antlr3/antlr3/streams.py#L608-L643
mayank93/Twitter-Sentiment-Analysis
f095c6ca6bf69787582b5dabb140fefaf278eb37
front-end/web2py/gluon/custom_import.py
python
_DateTrackerImporter.__call__
(self, name, globals=None, locals=None, fromlist=None, level=-1)
The import method itself.
The import method itself.
[ "The", "import", "method", "itself", "." ]
def __call__(self, name, globals=None, locals=None, fromlist=None, level=-1): """ The import method itself. """ globals = globals or {} locals = locals or {} fromlist = fromlist or [] call_begin_end = self._tl._modules_loaded is None if call_begin_end: self.begin() try: self._tl.globals = globals self._tl.locals = locals self._tl.level = level # Check the date and reload if needed: self._update_dates(name, fromlist) # Try to load the module and update the dates if it works: result = super(_DateTrackerImporter, self) \ .__call__(name, globals, locals, fromlist, level) # Module maybe loaded for the 1st time so we need to set the date self._update_dates(name, fromlist) return result except Exception: raise # Don't hide something that went wrong finally: if call_begin_end: self.end()
[ "def", "__call__", "(", "self", ",", "name", ",", "globals", "=", "None", ",", "locals", "=", "None", ",", "fromlist", "=", "None", ",", "level", "=", "-", "1", ")", ":", "globals", "=", "globals", "or", "{", "}", "locals", "=", "locals", "or", "...
https://github.com/mayank93/Twitter-Sentiment-Analysis/blob/f095c6ca6bf69787582b5dabb140fefaf278eb37/front-end/web2py/gluon/custom_import.py#L106-L137
PINTO0309/PINTO_model_zoo
2924acda7a7d541d8712efd7cc4fd1c61ef5bddd
171_Fast-SRGAN/demo/demo_fast_srgan_onnx.py
python
main
()
[]
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", type=str, default='model_128x128/model_128x128.onnx', ) parser.add_argument( "--input_size", type=str, default='128,128', ) args = parser.parse_args() model_path = args.model input_size = args.input_size input_size = [int(i) for i in input_size.split(',')] input_width, input_height = input_size[1], input_size[0] # Initialize video capture cap = cv.VideoCapture(0) # Load model onnx_session = onnxruntime.InferenceSession(model_path) while True: start_time = time.time() # Capture read ret, frame = cap.read() if not ret: break debug_image = copy.deepcopy(frame) debug_image = cv.resize(debug_image, dsize=(input_width, input_height)) # Inference execution hr_image = run_inference( onnx_session, input_size, frame, ) elapsed_time = time.time() - start_time # Draw original_image, concat_image, _, _ = draw_debug( debug_image, elapsed_time, hr_image, ) key = cv.waitKey(1) if key == 27: # ESC break cv.imshow('Fast-SRGAN Demo : Original', original_image) cv.imshow('Fast-SRGAN Demo : HR', concat_image) cap.release() cv.destroyAllWindows()
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"--model\"", ",", "type", "=", "str", ",", "default", "=", "'model_128x128/model_128x128.onnx'", ",", ")", "parser", ".", "add_arg...
https://github.com/PINTO0309/PINTO_model_zoo/blob/2924acda7a7d541d8712efd7cc4fd1c61ef5bddd/171_Fast-SRGAN/demo/demo_fast_srgan_onnx.py#L36-L97
wwqgtxx/wwqLyParse
33136508e52821babd9294fdecffbdf02d73a6fc
wwqLyParse/lib/python-3.7.2-embed-win32/Crypto/Hash/SHA1.py
python
SHA1Hash.digest
(self)
return get_raw_buffer(bfr)
Return the **binary** (non-printable) digest of the message that has been hashed so far. :return: The hash digest, computed over the data processed so far. Binary form. :rtype: byte string
Return the **binary** (non-printable) digest of the message that has been hashed so far.
[ "Return", "the", "**", "binary", "**", "(", "non", "-", "printable", ")", "digest", "of", "the", "message", "that", "has", "been", "hashed", "so", "far", "." ]
def digest(self): """Return the **binary** (non-printable) digest of the message that has been hashed so far. :return: The hash digest, computed over the data processed so far. Binary form. :rtype: byte string """ bfr = create_string_buffer(self.digest_size) result = _raw_sha1_lib.SHA1_digest(self._state.get(), bfr) if result: raise ValueError("Error %d while instantiating SHA1" % result) return get_raw_buffer(bfr)
[ "def", "digest", "(", "self", ")", ":", "bfr", "=", "create_string_buffer", "(", "self", ".", "digest_size", ")", "result", "=", "_raw_sha1_lib", ".", "SHA1_digest", "(", "self", ".", "_state", ".", "get", "(", ")", ",", "bfr", ")", "if", "result", ":"...
https://github.com/wwqgtxx/wwqLyParse/blob/33136508e52821babd9294fdecffbdf02d73a6fc/wwqLyParse/lib/python-3.7.2-embed-win32/Crypto/Hash/SHA1.py#L97-L112
amymcgovern/pyparrot
bf4775ec1199b282e4edde1e4a8e018dcc8725e0
pyparrot/utils/vlc.py
python
MediaPlayer.get_xwindow
(self)
return libvlc_media_player_get_xwindow(self)
Get the X Window System window identifier previously set with L{set_xwindow}(). Note that this will return the identifier even if VLC is not currently using it (for instance if it is playing an audio-only input). @return: an X window ID, or 0 if none where set.
Get the X Window System window identifier previously set with L{set_xwindow}(). Note that this will return the identifier even if VLC is not currently using it (for instance if it is playing an audio-only input).
[ "Get", "the", "X", "Window", "System", "window", "identifier", "previously", "set", "with", "L", "{", "set_xwindow", "}", "()", ".", "Note", "that", "this", "will", "return", "the", "identifier", "even", "if", "VLC", "is", "not", "currently", "using", "it"...
def get_xwindow(self): '''Get the X Window System window identifier previously set with L{set_xwindow}(). Note that this will return the identifier even if VLC is not currently using it (for instance if it is playing an audio-only input). @return: an X window ID, or 0 if none where set. ''' return libvlc_media_player_get_xwindow(self)
[ "def", "get_xwindow", "(", "self", ")", ":", "return", "libvlc_media_player_get_xwindow", "(", "self", ")" ]
https://github.com/amymcgovern/pyparrot/blob/bf4775ec1199b282e4edde1e4a8e018dcc8725e0/pyparrot/utils/vlc.py#L3552-L3559
ShuangLI59/person_search
ef7d77a58a581825611e575010d9a3653b1ddf98
lib/datasets/imdb.py
python
imdb.set_proposal_method
(self, method)
[]
def set_proposal_method(self, method): method = eval('self.' + method + '_roidb') self.roidb_handler = method
[ "def", "set_proposal_method", "(", "self", ",", "method", ")", ":", "method", "=", "eval", "(", "'self.'", "+", "method", "+", "'_roidb'", ")", "self", ".", "roidb_handler", "=", "method" ]
https://github.com/ShuangLI59/person_search/blob/ef7d77a58a581825611e575010d9a3653b1ddf98/lib/datasets/imdb.py#L55-L57
robhagemans/pcbasic
c3a043b46af66623a801e18a38175be077251ada
pcbasic/basic/interpreter.py
python
Interpreter.set_parse_mode
(self, on)
Enter or exit parse mode.
Enter or exit parse mode.
[ "Enter", "or", "exit", "parse", "mode", "." ]
def set_parse_mode(self, on): """Enter or exit parse mode.""" self._parse_mode = on self._cursor.set_direct(not on)
[ "def", "set_parse_mode", "(", "self", ",", "on", ")", ":", "self", ".", "_parse_mode", "=", "on", "self", ".", "_cursor", ".", "set_direct", "(", "not", "on", ")" ]
https://github.com/robhagemans/pcbasic/blob/c3a043b46af66623a801e18a38175be077251ada/pcbasic/basic/interpreter.py#L132-L135
stopstalk/stopstalk-deployment
10c3ab44c4ece33ae515f6888c15033db2004bb1
aws_lambda/spoj_aws_lambda_function/lambda_code/setuptools/msvc.py
python
SystemInfo.FSharpInstallDir
(self)
return self.ri.lookup(path, 'productdir') or ''
Microsoft Visual F# directory.
Microsoft Visual F# directory.
[ "Microsoft", "Visual", "F#", "directory", "." ]
def FSharpInstallDir(self): """ Microsoft Visual F# directory. """ path = r'%0.1f\Setup\F#' % self.vc_ver path = os.path.join(self.ri.visualstudio, path) return self.ri.lookup(path, 'productdir') or ''
[ "def", "FSharpInstallDir", "(", "self", ")", ":", "path", "=", "r'%0.1f\\Setup\\F#'", "%", "self", ".", "vc_ver", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "ri", ".", "visualstudio", ",", "path", ")", "return", "self", ".", "ri", ...
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/setuptools/msvc.py#L676-L682
zim-desktop-wiki/zim-desktop-wiki
fe717d7ee64e5c06d90df90eb87758e5e72d25c5
zim/formats/__init__.py
python
ParseTree.cleanup_headings
(self, offset=0, max=6)
Change the heading levels throughout the tree. This makes sure that al headings are nested directly under their parent (no gaps in the levels of the headings). Also you can set an offset for the top level and a max depth.
Change the heading levels throughout the tree. This makes sure that al headings are nested directly under their parent (no gaps in the levels of the headings). Also you can set an offset for the top level and a max depth.
[ "Change", "the", "heading", "levels", "throughout", "the", "tree", ".", "This", "makes", "sure", "that", "al", "headings", "are", "nested", "directly", "under", "their", "parent", "(", "no", "gaps", "in", "the", "levels", "of", "the", "headings", ")", ".",...
def cleanup_headings(self, offset=0, max=6): '''Change the heading levels throughout the tree. This makes sure that al headings are nested directly under their parent (no gaps in the levels of the headings). Also you can set an offset for the top level and a max depth. ''' path = [] for heading in self._etree.iter('h'): level = int(heading.attrib['level']) # find parent header in path using old level while path and path[-1][0] >= level: path.pop() if not path: newlevel = offset + 1 else: newlevel = path[-1][1] + 1 if newlevel > max: newlevel = max heading.attrib['level'] = newlevel path.append((level, newlevel))
[ "def", "cleanup_headings", "(", "self", ",", "offset", "=", "0", ",", "max", "=", "6", ")", ":", "path", "=", "[", "]", "for", "heading", "in", "self", ".", "_etree", ".", "iter", "(", "'h'", ")", ":", "level", "=", "int", "(", "heading", ".", ...
https://github.com/zim-desktop-wiki/zim-desktop-wiki/blob/fe717d7ee64e5c06d90df90eb87758e5e72d25c5/zim/formats/__init__.py#L493-L512
PaulSonOfLars/tgbot
0ece72778b7772725ab214fe0929daaa2fc7d2d1
tg_bot/modules/helper_funcs/misc.py
python
revert_buttons
(buttons)
return res
[]
def revert_buttons(buttons): res = "" for btn in buttons: if btn.same_line: res += "\n[{}](buttonurl://{}:same)".format(btn.name, btn.url) else: res += "\n[{}](buttonurl://{})".format(btn.name, btn.url) return res
[ "def", "revert_buttons", "(", "buttons", ")", ":", "res", "=", "\"\"", "for", "btn", "in", "buttons", ":", "if", "btn", ".", "same_line", ":", "res", "+=", "\"\\n[{}](buttonurl://{}:same)\"", ".", "format", "(", "btn", ".", "name", ",", "btn", ".", "url"...
https://github.com/PaulSonOfLars/tgbot/blob/0ece72778b7772725ab214fe0929daaa2fc7d2d1/tg_bot/modules/helper_funcs/misc.py#L97-L105
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/msilib/__init__.py
python
CAB.append
(self, full, file, logical)
return self.index, logical
[]
def append(self, full, file, logical): if os.path.isdir(full): return if not logical: logical = self.gen_id(file) self.index += 1 self.files.append((full, logical)) return self.index, logical
[ "def", "append", "(", "self", ",", "full", ",", "file", ",", "logical", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "full", ")", ":", "return", "if", "not", "logical", ":", "logical", "=", "self", ".", "gen_id", "(", "file", ")", "self"...
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/msilib/__init__.py#L201-L208
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
dist/lib/python2.7/decimal.py
python
Context.is_normal
(self, a)
return a.is_normal(context=self)
Return True if the operand is a normal number; otherwise return False. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.is_normal(Decimal('2.50')) True >>> c.is_normal(Decimal('0.1E-999')) False >>> c.is_normal(Decimal('0.00')) False >>> c.is_normal(Decimal('-Inf')) False >>> c.is_normal(Decimal('NaN')) False >>> c.is_normal(1) True
Return True if the operand is a normal number; otherwise return False.
[ "Return", "True", "if", "the", "operand", "is", "a", "normal", "number", ";", "otherwise", "return", "False", "." ]
def is_normal(self, a): """Return True if the operand is a normal number; otherwise return False. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.is_normal(Decimal('2.50')) True >>> c.is_normal(Decimal('0.1E-999')) False >>> c.is_normal(Decimal('0.00')) False >>> c.is_normal(Decimal('-Inf')) False >>> c.is_normal(Decimal('NaN')) False >>> c.is_normal(1) True """ a = _convert_other(a, raiseit=True) return a.is_normal(context=self)
[ "def", "is_normal", "(", "self", ",", "a", ")", ":", "a", "=", "_convert_other", "(", "a", ",", "raiseit", "=", "True", ")", "return", "a", ".", "is_normal", "(", "context", "=", "self", ")" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/decimal.py#L4348-L4369
santhoshkolloju/Abstractive-Summarization-With-Transfer-Learning
97ff2ae3ba9f2d478e174444c4e0f5349f28c319
texar_repo/texar/data/data/scalar_data.py
python
ScalarData.dataset_size
(self)
return self._dataset_size
Returns the number of data instances in the dataset. Note that this is the total data count in the raw files, before any filtering and truncation.
Returns the number of data instances in the dataset.
[ "Returns", "the", "number", "of", "data", "instances", "in", "the", "dataset", "." ]
def dataset_size(self): """Returns the number of data instances in the dataset. Note that this is the total data count in the raw files, before any filtering and truncation. """ if not self._dataset_size: # pylint: disable=attribute-defined-outside-init self._dataset_size = count_file_lines( self._hparams.dataset.files) return self._dataset_size
[ "def", "dataset_size", "(", "self", ")", ":", "if", "not", "self", ".", "_dataset_size", ":", "# pylint: disable=attribute-defined-outside-init", "self", ".", "_dataset_size", "=", "count_file_lines", "(", "self", ".", "_hparams", ".", "dataset", ".", "files", ")"...
https://github.com/santhoshkolloju/Abstractive-Summarization-With-Transfer-Learning/blob/97ff2ae3ba9f2d478e174444c4e0f5349f28c319/texar_repo/texar/data/data/scalar_data.py#L245-L255
hellock/icrawler
3506401984f4a2fe04db8a5608f54905c42bb6a6
icrawler/utils/signal.py
python
Signal.reset
(self)
Reset signals with their initial values
Reset signals with their initial values
[ "Reset", "signals", "with", "their", "initial", "values" ]
def reset(self): """Reset signals with their initial values""" self._signals = self._init_status.copy()
[ "def", "reset", "(", "self", ")", ":", "self", ".", "_signals", "=", "self", ".", "_init_status", ".", "copy", "(", ")" ]
https://github.com/hellock/icrawler/blob/3506401984f4a2fe04db8a5608f54905c42bb6a6/icrawler/utils/signal.py#L31-L33
Calysto/metakernel
9815c0e8b3f9c427105b5d094e9041a303302469
metakernel/magics/run_magic.py
python
RunMagic.line_run
(self, filename, language=None)
%run [--language LANG] FILENAME - run code in filename by kernel This magic will take the code in FILENAME and run it. The exact details of how the code runs are deterimined by your language. The --language LANG option will prefix the file contents with "%%LANG". You may also put information in the cell which will appear before the contents of the file. Examples: %run filename.ss %run -l python filename.py %kx calysto_scheme.kernel CalystoScheme %run --language kx filename.ss %run --language "kx default" filename.ss Note: not all languages may support %run.
%run [--language LANG] FILENAME - run code in filename by kernel
[ "%run", "[", "--", "language", "LANG", "]", "FILENAME", "-", "run", "code", "in", "filename", "by", "kernel" ]
def line_run(self, filename, language=None): """ %run [--language LANG] FILENAME - run code in filename by kernel This magic will take the code in FILENAME and run it. The exact details of how the code runs are deterimined by your language. The --language LANG option will prefix the file contents with "%%LANG". You may also put information in the cell which will appear before the contents of the file. Examples: %run filename.ss %run -l python filename.py %kx calysto_scheme.kernel CalystoScheme %run --language kx filename.ss %run --language "kx default" filename.ss Note: not all languages may support %run. """ if filename.startswith("~"): filename = os.path.expanduser(filename) filename = os.path.abspath(filename) if language is None: self.kernel.do_execute_file(filename) else: self.code = "%%" + language + "\n" + self.code with open(filename) as f: self.code += "".join(f.readlines())
[ "def", "line_run", "(", "self", ",", "filename", ",", "language", "=", "None", ")", ":", "if", "filename", ".", "startswith", "(", "\"~\"", ")", ":", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "filename", "=", "os", ...
https://github.com/Calysto/metakernel/blob/9815c0e8b3f9c427105b5d094e9041a303302469/metakernel/magics/run_magic.py#L13-L43