repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
DataDog/integrations-core
ceph/datadog_checks/ceph/ceph.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/ceph/datadog_checks/ceph/ceph.py#L249-L258
def _osd_pct_used(self, health): """Take a single health check string, return (OSD name, percentage used)""" # Full string looks like: osd.2 is full at 95% # Near full string: osd.1 is near full at 94% pct = re.compile(r'\d+%').findall(health) osd = re.compile(r'osd.\d+').findall(health) if len(pct) > 0 and len(osd) > 0: return osd[0], int(pct[0][:-1]) else: return None, None
[ "def", "_osd_pct_used", "(", "self", ",", "health", ")", ":", "# Full string looks like: osd.2 is full at 95%", "# Near full string: osd.1 is near full at 94%", "pct", "=", "re", ".", "compile", "(", "r'\\d+%'", ")", ".", "findall", "(", "health", ")", "osd", "=", "...
Take a single health check string, return (OSD name, percentage used)
[ "Take", "a", "single", "health", "check", "string", "return", "(", "OSD", "name", "percentage", "used", ")" ]
python
train
HPAC/matchpy
matchpy/expressions/substitution.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L79-L101
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': """Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable. """ new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
[ "def", "union_with_variable", "(", "self", ",", "variable", ":", "str", ",", "replacement", ":", "VariableReplacement", ")", "->", "'Substitution'", ":", "new_subst", "=", "Substitution", "(", "self", ")", "new_subst", ".", "try_add_variable", "(", "variable", "...
Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable.
[ "Try", "to", "create", "a", "new", "substitution", "with", "the", "given", "variable", "added", "." ]
python
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L625-L654
def callproc(self, procname, parameters = (), cursorClass = DictCursor, quiet = False): """Calls a MySQL stored procedure procname. This uses DictCursor by default.""" i = 0 errcode = 0 caughte = None while i < self.numTries: i += 1 try: cursor = self.connection.cursor(cursorClass) if type(parameters) != type(()): parameters = (parameters,) errcode = cursor.callproc(procname, parameters) results = cursor.fetchall() self.lastrowid = int(cursor.lastrowid) cursor.close() return results except MySQLdb.OperationalError, e: errcode = e[0] self.connection.ping() caughte = e continue except: traceback.print_exc() break if not quiet: sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
[ "def", "callproc", "(", "self", ",", "procname", ",", "parameters", "=", "(", ")", ",", "cursorClass", "=", "DictCursor", ",", "quiet", "=", "False", ")", ":", "i", "=", "0", "errcode", "=", "0", "caughte", "=", "None", "while", "i", "<", "self", "...
Calls a MySQL stored procedure procname. This uses DictCursor by default.
[ "Calls", "a", "MySQL", "stored", "procedure", "procname", ".", "This", "uses", "DictCursor", "by", "default", "." ]
python
train
Enteee/pdml2flow
pdml2flow/utils.py
https://github.com/Enteee/pdml2flow/blob/bc9efe379b0b2406bfbbbd8e0f678b1f63805c66/pdml2flow/utils.py#L11-L18
def autoconvert(string): """Try to convert variables into datatypes.""" for fn in (boolify, int, float): try: return fn(string) except ValueError: pass return string
[ "def", "autoconvert", "(", "string", ")", ":", "for", "fn", "in", "(", "boolify", ",", "int", ",", "float", ")", ":", "try", ":", "return", "fn", "(", "string", ")", "except", "ValueError", ":", "pass", "return", "string" ]
Try to convert variables into datatypes.
[ "Try", "to", "convert", "variables", "into", "datatypes", "." ]
python
train
rigetti/pyquil
pyquil/device.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/device.py#L265-L324
def to_dict(self): """ Create a JSON-serializable representation of the device Specs. The dictionary representation is of the form:: { '1Q': { "0": { "f1QRB": 0.99, "T1": 20e-6, ... }, "1": { "f1QRB": 0.989, "T1": 19e-6, ... }, ... }, '2Q': { "1-4": { "fBellState": 0.93, "fCZ": 0.92, "fCZ_std_err": 0.03, "fCPHASE": 0.91 }, "1-5": { "fBellState": 0.9, "fCZ": 0.89, "fCZ_std_err": 0.05, "fCPHASE": 0.88 }, ... }, ... } :return: A dctionary representation of self. :rtype: Dict[str, Any] """ return { '1Q': { "{}".format(qs.id): { 'f1QRB': qs.f1QRB, 'fRO': qs.fRO, 'T1': qs.T1, 'T2': qs.T2, 'fActiveReset': qs.fActiveReset } for qs in self.qubits_specs }, '2Q': { "{}-{}".format(*es.targets): { 'fBellState': es.fBellState, 'fCZ': es.fCZ, 'fCZ_std_err': es.fCZ_std_err, 'fCPHASE': es.fCPHASE } for es in self.edges_specs } }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'1Q'", ":", "{", "\"{}\"", ".", "format", "(", "qs", ".", "id", ")", ":", "{", "'f1QRB'", ":", "qs", ".", "f1QRB", ",", "'fRO'", ":", "qs", ".", "fRO", ",", "'T1'", ":", "qs", ".", "T1"...
Create a JSON-serializable representation of the device Specs. The dictionary representation is of the form:: { '1Q': { "0": { "f1QRB": 0.99, "T1": 20e-6, ... }, "1": { "f1QRB": 0.989, "T1": 19e-6, ... }, ... }, '2Q': { "1-4": { "fBellState": 0.93, "fCZ": 0.92, "fCZ_std_err": 0.03, "fCPHASE": 0.91 }, "1-5": { "fBellState": 0.9, "fCZ": 0.89, "fCZ_std_err": 0.05, "fCPHASE": 0.88 }, ... }, ... } :return: A dctionary representation of self. :rtype: Dict[str, Any]
[ "Create", "a", "JSON", "-", "serializable", "representation", "of", "the", "device", "Specs", "." ]
python
train
twitterdev/tweet_parser
tweet_parser/getter_methods/tweet_entities.py
https://github.com/twitterdev/tweet_parser/blob/3435de8367d36b483a6cfd8d46cc28694ee8a42e/tweet_parser/getter_methods/tweet_entities.py#L215-L245
def get_hashtags(tweet): """ Get a list of hashtags in the Tweet Note that in the case of a quote-tweet, this does not return the hashtags in the quoted status. Args: tweet (Tweet or dict): A Tweet object or dictionary Returns: list (a list of strings): list of all of the hashtags in the Tweet Example: >>> from tweet_parser.getter_methods.tweet_entities import get_hashtags >>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017", ... "entities": {"hashtags": [{"text":"1hashtag"}]}} >>> get_hashtags(original) ['1hashtag'] >>> activity = {"postedTime": "2017-05-24T20:17:19.000Z", ... "verb": "post", ... "twitter_entities": {"hashtags": [ ... {"text":"1hashtag"}, ... {"text": "moreHashtags"}]}} >>> get_hashtags(activity) ['1hashtag', 'moreHashtags'] """ entities = get_entities(tweet) hashtags = entities.get("hashtags") hashtags = [tag["text"] for tag in hashtags] if hashtags else [] return hashtags
[ "def", "get_hashtags", "(", "tweet", ")", ":", "entities", "=", "get_entities", "(", "tweet", ")", "hashtags", "=", "entities", ".", "get", "(", "\"hashtags\"", ")", "hashtags", "=", "[", "tag", "[", "\"text\"", "]", "for", "tag", "in", "hashtags", "]", ...
Get a list of hashtags in the Tweet Note that in the case of a quote-tweet, this does not return the hashtags in the quoted status. Args: tweet (Tweet or dict): A Tweet object or dictionary Returns: list (a list of strings): list of all of the hashtags in the Tweet Example: >>> from tweet_parser.getter_methods.tweet_entities import get_hashtags >>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017", ... "entities": {"hashtags": [{"text":"1hashtag"}]}} >>> get_hashtags(original) ['1hashtag'] >>> activity = {"postedTime": "2017-05-24T20:17:19.000Z", ... "verb": "post", ... "twitter_entities": {"hashtags": [ ... {"text":"1hashtag"}, ... {"text": "moreHashtags"}]}} >>> get_hashtags(activity) ['1hashtag', 'moreHashtags']
[ "Get", "a", "list", "of", "hashtags", "in", "the", "Tweet", "Note", "that", "in", "the", "case", "of", "a", "quote", "-", "tweet", "this", "does", "not", "return", "the", "hashtags", "in", "the", "quoted", "status", "." ]
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5393-L5400
def _trim_front(strings): """ Trims zeros and decimal points. """ trimmed = strings while len(strings) > 0 and all(x[0] == ' ' for x in trimmed): trimmed = [x[1:] for x in trimmed] return trimmed
[ "def", "_trim_front", "(", "strings", ")", ":", "trimmed", "=", "strings", "while", "len", "(", "strings", ")", ">", "0", "and", "all", "(", "x", "[", "0", "]", "==", "' '", "for", "x", "in", "trimmed", ")", ":", "trimmed", "=", "[", "x", "[", ...
Trims zeros and decimal points.
[ "Trims", "zeros", "and", "decimal", "points", "." ]
python
train
SteveMcGrath/pySecurityCenter
examples/sc5/download_scans/downloader.py
https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/examples/sc5/download_scans/downloader.py#L15-L79
def download_scans(sc, age=0, unzip=False, path='scans'): '''Scan Downloader Here we will attempt to download all of the scans that have completed between now and AGE days ago. sc = SecurityCenter5 object age = how many days back do we want to pull? (default: 0) unzip = Do we want to uncompress the nessus files? (default: False) path = Path where the resulting data will be placed. (default: scans) ''' # if the download path doesn't exist, we need to create it. if not os.path.exists(path): logger.debug('scan path didn\'t exist. creating it.') os.makedirs(path) # Now we will need to comuter the timestamp for the date that the age has # apecified. The API expects this in a unix timestamp format. findate = (date.today() - timedelta(days=age)) # Lets get the list of scans that had completed within the timefram that we # had specified. logger.debug('getting scan results for parsing') resp = sc.get('scanResult', params={ 'startTime': int(time.mktime(findate.timetuple())), 'fields': 'name,finishTime,downloadAvailable,repository', }) for scan in resp.json()['response']['usable']: # If this particular scan does not have any results (either it was a # partial, failed, or incomplete scan) then we have nothing further to # do and should simply ignore this scan. if scan['downloadAvailable'] == 'false': logger.debug('%s/"%s" not available for download' % (scan['id'], scan['name'])) else: # Well look, this scan actually has results, lets go ahead and pull # them down. logger.debug('%s/"%s" downloading' % (scan['id'], scan['name'])) scandata = sc.post('scanResult/%s/download' % scan['id'], json={'downloadType': 'v2'}) sfin = datetime.fromtimestamp(int(scan['finishTime'])) # The filename is being computed generically here. As this will be # used whether we extract the .nessus file out of the zipfile or # not. filename = '%s-%s.%s.%s' % (scan['id'], scan['name'].replace(' ', '_'), scan['repository']['id'], sfin.strftime('%Y.%m.%d-%H.%M')) if unzip: # Unzip that .nessus file! logger.debug('extracting %s/%s' % (scan['id'], scan['name'])) zfile = ZipFile(StringIO(buf=scandata.content)) scanfile = zfile.filelist[0] scanfile.filename = '%s.nessus' % filename zfile.extract(scanfile, path=path) else: # We want to keep it compressed, just dump to disk. logger.debug('writing zip for %s/%s' % (scan['id'], scan['name'])) with open('%s.zip' % filename, 'wb') as zfile: zfile.write(scandata.content) # Were done with this scan file!!! logger.info('%s/"%s" downloaded' % (scan['id'], scan['name']))
[ "def", "download_scans", "(", "sc", ",", "age", "=", "0", ",", "unzip", "=", "False", ",", "path", "=", "'scans'", ")", ":", "# if the download path doesn't exist, we need to create it.", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", ...
Scan Downloader Here we will attempt to download all of the scans that have completed between now and AGE days ago. sc = SecurityCenter5 object age = how many days back do we want to pull? (default: 0) unzip = Do we want to uncompress the nessus files? (default: False) path = Path where the resulting data will be placed. (default: scans)
[ "Scan", "Downloader", "Here", "we", "will", "attempt", "to", "download", "all", "of", "the", "scans", "that", "have", "completed", "between", "now", "and", "AGE", "days", "ago", "." ]
python
train
jfilter/text-classification-keras
texcla/libs/fastTextWikiTokenizer/subprocess_fix.py
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/libs/fastTextWikiTokenizer/subprocess_fix.py#L7-L63
def check_output_input(*popenargs, **kwargs): """Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' There is an additional optional argument, "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when in the course of fooman events\n") b'when in the course of barman events\n' If universal_newlines=True is passed, the return value will be a string rather than bytes. """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') if 'input' in kwargs: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') inputdata = kwargs['input'] del kwargs['input'] kwargs['stdin'] = PIPE else: inputdata = None process = Popen(*popenargs, stdout=PIPE, **kwargs) try: output, unused_err = process.communicate(inputdata) except: process.kill() process.wait() raise retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd, output=output) return output
[ "def", "check_output_input", "(", "*", "popenargs", ",", "*", "*", "kwargs", ")", ":", "if", "'stdout'", "in", "kwargs", ":", "raise", "ValueError", "(", "'stdout argument not allowed, it will be overridden.'", ")", "if", "'input'", "in", "kwargs", ":", "if", "'...
Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' There is an additional optional argument, "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when in the course of fooman events\n") b'when in the course of barman events\n' If universal_newlines=True is passed, the return value will be a string rather than bytes.
[ "Run", "command", "with", "arguments", "and", "return", "its", "output", "as", "a", "byte", "string", "." ]
python
train
saltstack/salt
salt/modules/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L1960-L2015
def discard_local_changes(cwd, path='.', user=None, password=None, ignore_retcode=False, output_encoding=None): ''' .. versionadded:: 2019.2.0 Runs a ``git checkout -- <path>`` from the directory specified by ``cwd``. cwd The path to the git checkout path path relative to cwd (defaults to ``.``) user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. CLI Example: .. code-block:: bash salt myminion git.discard_local_changes /path/to/repo salt myminion git.discard_local_changes /path/to/repo path=foo ''' cwd = _expand_path(cwd, user) command = ['git', 'checkout', '--', path] # Checkout message goes to stderr return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, redirect_stderr=True, output_encoding=output_encoding)['stdout']
[ "def", "discard_local_changes", "(", "cwd", ",", "path", "=", "'.'", ",", "user", "=", "None", ",", "password", "=", "None", ",", "ignore_retcode", "=", "False", ",", "output_encoding", "=", "None", ")", ":", "cwd", "=", "_expand_path", "(", "cwd", ",", ...
.. versionadded:: 2019.2.0 Runs a ``git checkout -- <path>`` from the directory specified by ``cwd``. cwd The path to the git checkout path path relative to cwd (defaults to ``.``) user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. CLI Example: .. code-block:: bash salt myminion git.discard_local_changes /path/to/repo salt myminion git.discard_local_changes /path/to/repo path=foo
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/wcsutil.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/wcsutil.py#L542-L570
def scale_WCS(self,pixel_scale,retain=True): ''' Scale the WCS to a new pixel_scale. The 'retain' parameter [default value: True] controls whether or not to retain the original distortion solution in the CD matrix. ''' _ratio = pixel_scale / self.pscale # Correct the size of the image and CRPIX values for scaled WCS self.naxis1 /= _ratio self.naxis2 /= _ratio self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. if retain: # Correct the WCS while retaining original distortion information self.cd11 *= _ratio self.cd12 *= _ratio self.cd21 *= _ratio self.cd22 *= _ratio else: pscale = pixel_scale / 3600. self.cd11 = -pscale * N.cos(pa) self.cd12 = pscale * N.sin(pa) self.cd21 = self.cd12 self.cd22 = -self.cd11 # Now make sure that all derived values are really up-to-date based # on these changes self.update()
[ "def", "scale_WCS", "(", "self", ",", "pixel_scale", ",", "retain", "=", "True", ")", ":", "_ratio", "=", "pixel_scale", "/", "self", ".", "pscale", "# Correct the size of the image and CRPIX values for scaled WCS", "self", ".", "naxis1", "/=", "_ratio", "self", "...
Scale the WCS to a new pixel_scale. The 'retain' parameter [default value: True] controls whether or not to retain the original distortion solution in the CD matrix.
[ "Scale", "the", "WCS", "to", "a", "new", "pixel_scale", ".", "The", "retain", "parameter", "[", "default", "value", ":", "True", "]", "controls", "whether", "or", "not", "to", "retain", "the", "original", "distortion", "solution", "in", "the", "CD", "matri...
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L345-L364
def resolve_base_href(self, handle_failures=None): """ Find any ``<base href>`` tag in the document, and apply its values to all links found in the document. Also remove the tag once it has been applied. If ``handle_failures`` is None (default), a failure to process a URL will abort the processing. If set to 'ignore', errors are ignored. If set to 'discard', failing URLs will be removed. """ base_href = None basetags = self.xpath('//base[@href]|//x:base[@href]', namespaces={'x': XHTML_NAMESPACE}) for b in basetags: base_href = b.get('href') b.drop_tree() if not base_href: return self.make_links_absolute(base_href, resolve_base_href=False, handle_failures=handle_failures)
[ "def", "resolve_base_href", "(", "self", ",", "handle_failures", "=", "None", ")", ":", "base_href", "=", "None", "basetags", "=", "self", ".", "xpath", "(", "'//base[@href]|//x:base[@href]'", ",", "namespaces", "=", "{", "'x'", ":", "XHTML_NAMESPACE", "}", ")...
Find any ``<base href>`` tag in the document, and apply its values to all links found in the document. Also remove the tag once it has been applied. If ``handle_failures`` is None (default), a failure to process a URL will abort the processing. If set to 'ignore', errors are ignored. If set to 'discard', failing URLs will be removed.
[ "Find", "any", "<base", "href", ">", "tag", "in", "the", "document", "and", "apply", "its", "values", "to", "all", "links", "found", "in", "the", "document", ".", "Also", "remove", "the", "tag", "once", "it", "has", "been", "applied", "." ]
python
test
Esri/ArcREST
src/arcresthelper/common.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/common.py#L527-L550
def chunklist(l, n): """Yield successive n-sized chunks from l. Args: l (object): The object to chunk. n (int): The size of the chunks. Yields: The next chunk in the object. Raises: TypeError: if ``l`` has no :py:func:`len`. Examples: >>> for c in arcresthelper.common.chunklist(list(range(20)), 6): ... print(c) [0, 1, 2, 3, 4, 5] [6, 7, 8, 9, 10, 11] [12, 13, 14, 15, 16, 17] [18, 19] >>> list(arcresthelper.common.chunklist(string.ascii_uppercase, 7)) ['ABCDEFG', 'HIJKLMN', 'OPQRSTU', 'VWXYZ'] """ n = max(1, n) for i in range(0, len(l), n): yield l[i:i+n]
[ "def", "chunklist", "(", "l", ",", "n", ")", ":", "n", "=", "max", "(", "1", ",", "n", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "l", ")", ",", "n", ")", ":", "yield", "l", "[", "i", ":", "i", "+", "n", "]" ]
Yield successive n-sized chunks from l. Args: l (object): The object to chunk. n (int): The size of the chunks. Yields: The next chunk in the object. Raises: TypeError: if ``l`` has no :py:func:`len`. Examples: >>> for c in arcresthelper.common.chunklist(list(range(20)), 6): ... print(c) [0, 1, 2, 3, 4, 5] [6, 7, 8, 9, 10, 11] [12, 13, 14, 15, 16, 17] [18, 19] >>> list(arcresthelper.common.chunklist(string.ascii_uppercase, 7)) ['ABCDEFG', 'HIJKLMN', 'OPQRSTU', 'VWXYZ']
[ "Yield", "successive", "n", "-", "sized", "chunks", "from", "l", "." ]
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L222-L236
def lerp(vec1, vec2, time): """Lerp between vec1 to vec2 based on time. Time is clamped between 0 and 1.""" if isinstance(vec1, Vector2) \ and isinstance(vec2, Vector2): # Clamp the time value into the 0-1 range. if time < 0: time = 0 elif time > 1: time = 1 x_lerp = vec1[0] + time * (vec2[0] - vec1[0]) y_lerp = vec1[1] + time * (vec2[1] - vec1[1]) return Vector2(x_lerp, y_lerp) else: raise TypeError("Objects must be of type Vector2")
[ "def", "lerp", "(", "vec1", ",", "vec2", ",", "time", ")", ":", "if", "isinstance", "(", "vec1", ",", "Vector2", ")", "and", "isinstance", "(", "vec2", ",", "Vector2", ")", ":", "# Clamp the time value into the 0-1 range.", "if", "time", "<", "0", ":", "...
Lerp between vec1 to vec2 based on time. Time is clamped between 0 and 1.
[ "Lerp", "between", "vec1", "to", "vec2", "based", "on", "time", ".", "Time", "is", "clamped", "between", "0", "and", "1", "." ]
python
train
dmwm/DBS
Server/Python/src/dbs/business/DBSMigrate.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSMigrate.py#L184-L238
def prepareBlockMigrationList(self, conn, request): """ Prepare the ordered lists of blocks based on input BLOCK 1. see if block already exists at dst (no need to migrate), raise "ALREADY EXISTS" 2. see if block exists at src & make sure the block's open_for_writing=0 3. see if block has parents 4. see if parent blocks are already at dst 5. add 'order' to parent and then this block (ascending) 6. return the ordered list """ ordered_dict = {} block_name = request["migration_input"] url = request["migration_url"] order_counter = 0 try: #1. dstblock = self.blocklist.execute(conn, block_name=block_name) for item in dstblock: if item: dbsExceptionHandler('dbsException-invalid-input', 'ALREADY EXISTS: \ Required block (%s) migration is already at destination' %block_name, self.logger.exception) #2. srcblock = self.getSrcBlocks(url, block=block_name) if len(srcblock) < 1: e = 'DBSMigration: Invalid input. Required Block %s not found at source %s.' %(block_name, url) dbsExceptionHandler('dbsException-invalid-input2', e, self.logger.exception, e) ##This block has to be migrated ordered_dict[order_counter] = [] ordered_dict[order_counter].append(block_name) parent_ordered_dict = self.getParentBlocksOrderedList(url, conn, block_name, order_counter+1) if parent_ordered_dict != {}: ordered_dict.update(parent_ordered_dict) #6. #check for duplicates return remove_duplicated_items(ordered_dict) except Exception as ex: if '500 Internal Server Error' in str(ex): #"Server Error" is the default in dbsExceptionHandler dbsExceptionHandler('Server Error', str(ex), self.logger.exception, "DBSMigrate/prepareBlockMigrationList: "+str(ex)) if isinstance(ex, pycurl.error): if ex.args[0] == 7: message = ex.args[1] dbsExceptionHandler('dbsException-failed-connect2host', message, self.logger.exception, message) if 'urlopen error' in str(ex): message='Connection to source DBS server refused. Check your source url.' elif 'Bad Request' in str(ex): message='cannot get data from the source DBS server. Check your migration input.' else: message='Failed to make a block migration list.' dbsExceptionHandler('dbsException-invalid-input2', \ """DBSMigrate/prepareBlockMigrationList failed to prepare ordered block list: %s""" %str(ex), self.logger.exception, message)
[ "def", "prepareBlockMigrationList", "(", "self", ",", "conn", ",", "request", ")", ":", "ordered_dict", "=", "{", "}", "block_name", "=", "request", "[", "\"migration_input\"", "]", "url", "=", "request", "[", "\"migration_url\"", "]", "order_counter", "=", "0...
Prepare the ordered lists of blocks based on input BLOCK 1. see if block already exists at dst (no need to migrate), raise "ALREADY EXISTS" 2. see if block exists at src & make sure the block's open_for_writing=0 3. see if block has parents 4. see if parent blocks are already at dst 5. add 'order' to parent and then this block (ascending) 6. return the ordered list
[ "Prepare", "the", "ordered", "lists", "of", "blocks", "based", "on", "input", "BLOCK", "1", ".", "see", "if", "block", "already", "exists", "at", "dst", "(", "no", "need", "to", "migrate", ")", "raise", "ALREADY", "EXISTS", "2", ".", "see", "if", "bloc...
python
train
DarkEnergySurvey/ugali
ugali/utils/config.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L91-L124
def _validate(self): """ Enforce some structure to the config file """ # This could be done with a default config # Check that specific keys exist sections = odict([ ('catalog',['dirname','basename', 'lon_field','lat_field','objid_field', 'mag_1_band', 'mag_1_field', 'mag_err_1_field', 'mag_2_band', 'mag_2_field', 'mag_err_2_field', ]), ('mask',[]), ('coords',['nside_catalog','nside_mask','nside_likelihood', 'nside_pixel','roi_radius','roi_radius_annulus', 'roi_radius_interior','coordsys', ]), ('likelihood',[]), ('output',[]), ('batch',[]), ]) keys = np.array(list(sections.keys())) found = np.in1d(keys,list(self.keys())) if not np.all(found): msg = 'Missing sections: '+str(keys[~found]) raise Exception(msg) for section,keys in sections.items(): keys = np.array(keys) found = np.in1d(keys,list(self[section].keys())) if not np.all(found): msg = 'Missing keys in %s: '%(section)+str(keys[~found]) raise Exception(msg)
[ "def", "_validate", "(", "self", ")", ":", "# This could be done with a default config", "# Check that specific keys exist", "sections", "=", "odict", "(", "[", "(", "'catalog'", ",", "[", "'dirname'", ",", "'basename'", ",", "'lon_field'", ",", "'lat_field'", ",", ...
Enforce some structure to the config file
[ "Enforce", "some", "structure", "to", "the", "config", "file" ]
python
train
rigetti/quantumflow
quantumflow/qubits.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/qubits.py#L137-L143
def relabel(self, qubits: Qubits) -> 'QubitVector': """Return a copy of this vector with new qubits""" qubits = tuple(qubits) assert len(qubits) == self.qubit_nb vec = copy(self) vec.qubits = qubits return vec
[ "def", "relabel", "(", "self", ",", "qubits", ":", "Qubits", ")", "->", "'QubitVector'", ":", "qubits", "=", "tuple", "(", "qubits", ")", "assert", "len", "(", "qubits", ")", "==", "self", ".", "qubit_nb", "vec", "=", "copy", "(", "self", ")", "vec",...
Return a copy of this vector with new qubits
[ "Return", "a", "copy", "of", "this", "vector", "with", "new", "qubits" ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/mask_flags.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/mask_flags.py#L112-L120
def checked_run(cmd): """Prepare and run a subprocess cmd, checking for successful completion.""" completed_process = run(cmd) if completed_process.returncode > 0: print("Command failed! Hanging around in case someone needs a " "docker connection. (Ctrl-C to quit now)") time.sleep(300) raise RuntimeError return completed_process
[ "def", "checked_run", "(", "cmd", ")", ":", "completed_process", "=", "run", "(", "cmd", ")", "if", "completed_process", ".", "returncode", ">", "0", ":", "print", "(", "\"Command failed! Hanging around in case someone needs a \"", "\"docker connection. (Ctrl-C to quit n...
Prepare and run a subprocess cmd, checking for successful completion.
[ "Prepare", "and", "run", "a", "subprocess", "cmd", "checking", "for", "successful", "completion", "." ]
python
train
saltstack/salt
salt/utils/files.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/files.py#L85-L100
def mkstemp(*args, **kwargs): ''' Helper function which does exactly what ``tempfile.mkstemp()`` does but accepts another argument, ``close_fd``, which, by default, is true and closes the fd before returning the file path. Something commonly done throughout Salt's code. ''' if 'prefix' not in kwargs: kwargs['prefix'] = '__salt.tmp.' close_fd = kwargs.pop('close_fd', True) fd_, f_path = tempfile.mkstemp(*args, **kwargs) if close_fd is False: return fd_, f_path os.close(fd_) del fd_ return f_path
[ "def", "mkstemp", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'prefix'", "not", "in", "kwargs", ":", "kwargs", "[", "'prefix'", "]", "=", "'__salt.tmp.'", "close_fd", "=", "kwargs", ".", "pop", "(", "'close_fd'", ",", "True", ")", "fd...
Helper function which does exactly what ``tempfile.mkstemp()`` does but accepts another argument, ``close_fd``, which, by default, is true and closes the fd before returning the file path. Something commonly done throughout Salt's code.
[ "Helper", "function", "which", "does", "exactly", "what", "tempfile", ".", "mkstemp", "()", "does", "but", "accepts", "another", "argument", "close_fd", "which", "by", "default", "is", "true", "and", "closes", "the", "fd", "before", "returning", "the", "file",...
python
train
ejeschke/ginga
ginga/trcalc.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/trcalc.py#L268-L311
def get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht): """ Like get_scaled_cutout_wdht, but returns the view/slice to extract from an image instead of the extraction itself. """ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) new_wd, new_ht = int(new_wd), int(new_ht) # calculate dimensions of NON-scaled cutout old_wd = x2 - x1 + 1 old_ht = y2 - y1 + 1 max_x, max_y = shp[1] - 1, shp[0] - 1 if (new_wd != old_wd) or (new_ht != old_ht): # Make indexes and scale them # Is there a more efficient way to do this? yi = np.mgrid[0:new_ht].reshape(-1, 1) xi = np.mgrid[0:new_wd].reshape(1, -1) iscale_x = float(old_wd) / float(new_wd) iscale_y = float(old_ht) / float(new_ht) xi = (x1 + xi * iscale_x).clip(0, max_x).astype(np.int, copy=False) yi = (y1 + yi * iscale_y).clip(0, max_y).astype(np.int, copy=False) wd, ht = xi.shape[1], yi.shape[0] # bounds check against shape (to protect future data access) xi_max, yi_max = xi[0, -1], yi[-1, 0] assert xi_max <= max_x, ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x)) assert yi_max <= max_y, ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y)) view = np.s_[yi, xi] else: # simple stepped view will do, because new view is same as old wd, ht = old_wd, old_ht view = np.s_[y1:y2 + 1, x1:x2 + 1] # Calculate actual scale used (vs. desired) old_wd, old_ht = max(old_wd, 1), max(old_ht, 1) scale_x = float(wd) / old_wd scale_y = float(ht) / old_ht # return view + actual scale factors used return (view, (scale_x, scale_y))
[ "def", "get_scaled_cutout_wdht_view", "(", "shp", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "new_wd", ",", "new_ht", ")", ":", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "int", "(", "x1", ")", ",", "int", "(", "y1", ")", ",", "int", "...
Like get_scaled_cutout_wdht, but returns the view/slice to extract from an image instead of the extraction itself.
[ "Like", "get_scaled_cutout_wdht", "but", "returns", "the", "view", "/", "slice", "to", "extract", "from", "an", "image", "instead", "of", "the", "extraction", "itself", "." ]
python
train
GNS3/gns3-server
gns3server/compute/docker/docker_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/docker/docker_vm.py#L916-L924
def _get_log(self): """ Return the log from the container :returns: string """ result = yield from self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1}) return result
[ "def", "_get_log", "(", "self", ")", ":", "result", "=", "yield", "from", "self", ".", "manager", ".", "query", "(", "\"GET\"", ",", "\"containers/{}/logs\"", ".", "format", "(", "self", ".", "_cid", ")", ",", "params", "=", "{", "\"stderr\"", ":", "1"...
Return the log from the container :returns: string
[ "Return", "the", "log", "from", "the", "container" ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/launchpad.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/launchpad.py#L390-L394
def __send_request(self, url, params=None): """Send request""" r = self.fetch(url, payload=params) return r.text
[ "def", "__send_request", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "r", "=", "self", ".", "fetch", "(", "url", ",", "payload", "=", "params", ")", "return", "r", ".", "text" ]
Send request
[ "Send", "request" ]
python
test
CalebBell/fluids
fluids/pump.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/pump.py#L548-L600
def current_ideal(P, V, phase=3, PF=1): r'''Returns the current drawn by a motor of power `P` operating at voltage `V`, with line AC of phase `phase` and power factor `PF` according to [1]_. Single-phase power: .. math:: I = \frac{P}{V \cdot \text{PF}} 3-phase power: .. math:: I = \frac{P}{V \cdot \text{PF} \sqrt{3}} Parameters ---------- P : float Power, [W] V : float Voltage, [V] phase : int, optional Line AC phase, either 1 or 3 PF : float, optional Power factor of motor Returns ------- I : float Power drawn by motor, [A] Notes ----- Does not include power used by the motor's fan, or startor, or internal losses. These are all significant. Examples -------- >>> current_ideal(V=120, P=1E4, PF=1, phase=1) 83.33333333333333 References ---------- .. [1] Electrical Construction, and Maintenance. "Calculating Single- and 3-Phase Parameters." April 1, 2008. http://ecmweb.com/basics/calculating-single-and-3-phase-parameters. ''' if phase not in [1, 3]: raise Exception('Only 1 and 3 phase power supported') if phase == 3: return P/(V*3**0.5*PF) else: return P/(V*PF)
[ "def", "current_ideal", "(", "P", ",", "V", ",", "phase", "=", "3", ",", "PF", "=", "1", ")", ":", "if", "phase", "not", "in", "[", "1", ",", "3", "]", ":", "raise", "Exception", "(", "'Only 1 and 3 phase power supported'", ")", "if", "phase", "==", ...
r'''Returns the current drawn by a motor of power `P` operating at voltage `V`, with line AC of phase `phase` and power factor `PF` according to [1]_. Single-phase power: .. math:: I = \frac{P}{V \cdot \text{PF}} 3-phase power: .. math:: I = \frac{P}{V \cdot \text{PF} \sqrt{3}} Parameters ---------- P : float Power, [W] V : float Voltage, [V] phase : int, optional Line AC phase, either 1 or 3 PF : float, optional Power factor of motor Returns ------- I : float Power drawn by motor, [A] Notes ----- Does not include power used by the motor's fan, or startor, or internal losses. These are all significant. Examples -------- >>> current_ideal(V=120, P=1E4, PF=1, phase=1) 83.33333333333333 References ---------- .. [1] Electrical Construction, and Maintenance. "Calculating Single- and 3-Phase Parameters." April 1, 2008. http://ecmweb.com/basics/calculating-single-and-3-phase-parameters.
[ "r", "Returns", "the", "current", "drawn", "by", "a", "motor", "of", "power", "P", "operating", "at", "voltage", "V", "with", "line", "AC", "of", "phase", "phase", "and", "power", "factor", "PF", "according", "to", "[", "1", "]", "_", "." ]
python
train
hubo1016/vlcp
vlcp/event/core.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L568-L586
def syscall_clearremovequeue(queue, index): ''' Clear the subqueue `queue[index]` and remove it from queue. ''' def _syscall(scheduler, processor): qes, qees = queue[index].clear() events = scheduler.queue.unblockqueue(queue[index]) for e in events: scheduler.eventtree.remove(e) qes2, qees2 = queue.removeSubQueue(index) for e in qes: processor(e) for e in qes2: processor(e) for e in qees: processor(e) for e in qees2: processor(e) return _syscall
[ "def", "syscall_clearremovequeue", "(", "queue", ",", "index", ")", ":", "def", "_syscall", "(", "scheduler", ",", "processor", ")", ":", "qes", ",", "qees", "=", "queue", "[", "index", "]", ".", "clear", "(", ")", "events", "=", "scheduler", ".", "que...
Clear the subqueue `queue[index]` and remove it from queue.
[ "Clear", "the", "subqueue", "queue", "[", "index", "]", "and", "remove", "it", "from", "queue", "." ]
python
train
inveniosoftware/invenio-oaiserver
invenio_oaiserver/percolator.py
https://github.com/inveniosoftware/invenio-oaiserver/blob/eae765e32bd816ddc5612d4b281caf205518b512/invenio_oaiserver/percolator.py#L94-L104
def _delete_percolator(spec, search_pattern): """Delete percolator associated with the new oaiset.""" if spec: for index in current_search.mappings.keys(): # Create the percolator doc_type in the existing index for >= ES5 percolator_doc_type = _get_percolator_doc_type(index) _create_percolator_mapping(index, percolator_doc_type) current_search_client.delete( index=index, doc_type=percolator_doc_type, id='oaiset-{}'.format(spec), ignore=[404] )
[ "def", "_delete_percolator", "(", "spec", ",", "search_pattern", ")", ":", "if", "spec", ":", "for", "index", "in", "current_search", ".", "mappings", ".", "keys", "(", ")", ":", "# Create the percolator doc_type in the existing index for >= ES5", "percolator_doc_type",...
Delete percolator associated with the new oaiset.
[ "Delete", "percolator", "associated", "with", "the", "new", "oaiset", "." ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/polygon_filter.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L158-L171
def _set_unique_id(self, unique_id): """Define a unique id""" assert isinstance(unique_id, int), "unique_id must be an integer" if PolygonFilter.instace_exists(unique_id): newid = max(PolygonFilter._instance_counter, unique_id+1) msg = "PolygonFilter with unique_id '{}' exists.".format(unique_id) msg += " Using new unique id '{}'.".format(newid) warnings.warn(msg, FilterIdExistsWarning) unique_id = newid ic = max(PolygonFilter._instance_counter, unique_id+1) PolygonFilter._instance_counter = ic self.unique_id = unique_id
[ "def", "_set_unique_id", "(", "self", ",", "unique_id", ")", ":", "assert", "isinstance", "(", "unique_id", ",", "int", ")", ",", "\"unique_id must be an integer\"", "if", "PolygonFilter", ".", "instace_exists", "(", "unique_id", ")", ":", "newid", "=", "max", ...
Define a unique id
[ "Define", "a", "unique", "id" ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L1485-L1530
def _compress_hextets(self, hextets): """Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings. """ best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index in range(len(hextets)): if hextets[index] == '0': doublecolon_len += 1 if doublecolon_start == -1: # Start of a sequence of zeros. doublecolon_start = index if doublecolon_len > best_doublecolon_len: # This is the longest sequence of zeros so far. best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) # For zeros at the end of the address. if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] # For zeros at the beginning of the address. if best_doublecolon_start == 0: hextets = [''] + hextets return hextets
[ "def", "_compress_hextets", "(", "self", ",", "hextets", ")", ":", "best_doublecolon_start", "=", "-", "1", "best_doublecolon_len", "=", "0", "doublecolon_start", "=", "-", "1", "doublecolon_len", "=", "0", "for", "index", "in", "range", "(", "len", "(", "he...
Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings.
[ "Compresses", "a", "list", "of", "hextets", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/defects/utils.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/utils.py#L794-L802
def get_structure_with_nodes(self): """ Get the modified structure with the voronoi nodes inserted. The species is set as a DummySpecie X. """ new_s = Structure.from_sites(self.structure) for v in self.vnodes: new_s.append("X", v.frac_coords) return new_s
[ "def", "get_structure_with_nodes", "(", "self", ")", ":", "new_s", "=", "Structure", ".", "from_sites", "(", "self", ".", "structure", ")", "for", "v", "in", "self", ".", "vnodes", ":", "new_s", ".", "append", "(", "\"X\"", ",", "v", ".", "frac_coords", ...
Get the modified structure with the voronoi nodes inserted. The species is set as a DummySpecie X.
[ "Get", "the", "modified", "structure", "with", "the", "voronoi", "nodes", "inserted", ".", "The", "species", "is", "set", "as", "a", "DummySpecie", "X", "." ]
python
train
SamLau95/nbinteract
docs/convert_notebooks_to_html_partial.py
https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/docs/convert_notebooks_to_html_partial.py#L73-L135
def convert_notebooks_to_html_partial(notebook_paths, url_map): """ Converts notebooks in notebook_paths to HTML partials """ for notebook_path in notebook_paths: # Computes <name>.ipynb from notebooks/01/<name>.ipynb path, filename = os.path.split(notebook_path) # Computes examples from notebooks/examples chapter = os.path.split(path)[1] if os.sep in path else '' # Computes <name> from <name>.ipynb basename, _ = os.path.splitext(filename) # Computes <name>.html from notebooks/<name>.ipynb outfile_name = basename + '.html' # This results in images like AB_5_1.png for a notebook called AB.ipynb unique_image_key = basename # This sets the img tag URL in the rendered HTML. output_files_dir = '/' + NOTEBOOK_IMAGE_DIR # Path to output final HTML file outfile_path = os.path.join(chapter, outfile_name) if chapter: os.makedirs(chapter, exist_ok=True) extract_output_config = { 'unique_key': unique_image_key, 'output_files_dir': output_files_dir, } notebook = nbformat.read(notebook_path, 4) notebook.cells.insert(0, _preamble_cell(path)) html, resources = html_exporter.from_notebook_node( notebook, resources=extract_output_config, ) if outfile_path not in url_map: print( '[Warning]: {} not found in _data/toc.yml. This page will ' 'not appear in the textbook table of contents.' .format(outfile_path) ) prev_page = url_map.get(outfile_path, {}).get('prev', 'false') next_page = url_map.get(outfile_path, {}).get('next', 'false') final_output = wrapper.format( html=html, prev_page=prev_page, next_page=next_page, ) # Write out HTML with open(outfile_path, 'w', encoding='utf-8') as outfile: outfile.write(final_output) # Write out images for relative_path, image_data in resources['outputs'].items(): image_name = os.path.basename(relative_path) final_image_path = os.path.join(NOTEBOOK_IMAGE_DIR, image_name) with open(final_image_path, 'wb') as outimage: outimage.write(image_data) print(outfile_path + " written.")
[ "def", "convert_notebooks_to_html_partial", "(", "notebook_paths", ",", "url_map", ")", ":", "for", "notebook_path", "in", "notebook_paths", ":", "# Computes <name>.ipynb from notebooks/01/<name>.ipynb", "path", ",", "filename", "=", "os", ".", "path", ".", "split", "("...
Converts notebooks in notebook_paths to HTML partials
[ "Converts", "notebooks", "in", "notebook_paths", "to", "HTML", "partials" ]
python
train
pymoca/pymoca
src/pymoca/ast.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/ast.py#L185-L199
def concatenate(cls, *args: List['ComponentRef']) -> 'ComponentRef': """ Helper function to append two component references to eachother, e.g. a "within" component ref and an "object type" component ref. :return: New component reference, with other appended to self. """ a = copy.deepcopy(args[0]) n = a for b in args[1:]: while n.child: n = n.child[0] b = copy.deepcopy(b) # Not strictly necessary n.child = [b] return a
[ "def", "concatenate", "(", "cls", ",", "*", "args", ":", "List", "[", "'ComponentRef'", "]", ")", "->", "'ComponentRef'", ":", "a", "=", "copy", ".", "deepcopy", "(", "args", "[", "0", "]", ")", "n", "=", "a", "for", "b", "in", "args", "[", "1", ...
Helper function to append two component references to eachother, e.g. a "within" component ref and an "object type" component ref. :return: New component reference, with other appended to self.
[ "Helper", "function", "to", "append", "two", "component", "references", "to", "eachother", "e", ".", "g", ".", "a", "within", "component", "ref", "and", "an", "object", "type", "component", "ref", ".", ":", "return", ":", "New", "component", "reference", "...
python
train
Parsl/parsl
parsl/app/app.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/app/app.py#L78-L116
def App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'): """The App decorator function. Args: - apptype (string) : Apptype can be bash|python Kwargs: - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. - walltime (int) : Walltime for app in seconds, default=60 - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'. - cache (Bool) : Enable caching of the app call default=False Returns: A PythonApp or BashApp object, which when called runs the apps through the executor. """ from parsl.app.python import PythonApp from parsl.app.bash import BashApp logger.warning("The 'App' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.") if apptype == 'python': app_class = PythonApp elif apptype == 'bash': app_class = BashApp else: raise InvalidAppTypeError("Invalid apptype requested {}; must be 'python' or 'bash'".format(apptype)) def wrapper(f): return app_class(f, data_flow_kernel=data_flow_kernel, walltime=walltime, cache=cache, executors=executors) return wrapper
[ "def", "App", "(", "apptype", ",", "data_flow_kernel", "=", "None", ",", "walltime", "=", "60", ",", "cache", "=", "False", ",", "executors", "=", "'all'", ")", ":", "from", "parsl", ".", "app", ".", "python", "import", "PythonApp", "from", "parsl", "....
The App decorator function. Args: - apptype (string) : Apptype can be bash|python Kwargs: - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. - walltime (int) : Walltime for app in seconds, default=60 - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'. - cache (Bool) : Enable caching of the app call default=False Returns: A PythonApp or BashApp object, which when called runs the apps through the executor.
[ "The", "App", "decorator", "function", "." ]
python
valid
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L523-L532
def _dEndfr(self): """Eq. 4 from Orazio and Samsing (2018) Takes f in rest frame. """ Mc = self._chirp_mass() return (np.pi**(2./3.)*Mc**(5./3.)/(3.*(1.+self.z)**(1./3.) * (self.freqs_orb/(1.+self.z))**(1./3.))*(2./self.n)**(2./3.) * self._g_func()/self._f_func())
[ "def", "_dEndfr", "(", "self", ")", ":", "Mc", "=", "self", ".", "_chirp_mass", "(", ")", "return", "(", "np", ".", "pi", "**", "(", "2.", "/", "3.", ")", "*", "Mc", "**", "(", "5.", "/", "3.", ")", "/", "(", "3.", "*", "(", "1.", "+", "s...
Eq. 4 from Orazio and Samsing (2018) Takes f in rest frame.
[ "Eq", ".", "4", "from", "Orazio", "and", "Samsing", "(", "2018", ")" ]
python
train
OpenGov/carpenter
carpenter/blocks/cellanalyzer.py
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/cellanalyzer.py#L153-L242
def auto_convert_numeric_string_cell(flagable, cell_str, position, worksheet, flags, units): ''' Handles the string containing numeric case of cell and attempts auto-conversion for auto_convert_cell. ''' def numerify_str(cell_str, flag_level='minor', flag_text=""): ''' Differentiates between int and float strings. Expects a numeric string. ''' if re.search(allregex.integer_regex, cell_str): flagable.flag_change(flags, flag_level, position, worksheet) return int(cell_str) else: flagable.flag_change(flags, flag_level, worksheet, position) return float(cell_str) def numerify_percentage_str(cell_str, flag_level='minor', flag_text=""): flagable.flag_change(flags, flag_level, position, worksheet) return float(cell_str) / 100 def convert_to_int_or_float(cell_str, flag_level='minor', flag_text=""): if not cell_str: conversion = 0 flagable.flag_change(flags, 'warning', position, worksheet, flagable.FLAGS['empty-to-zero-string']) if re.search(allregex.numerical_regex, cell_str): conversion = numerify_str(cell_str, flag_level, flag_text) # Comma separated? elif re.search(allregex.comma_sep_numerical_regex, cell_str): smashed_cell = ''.join(cell_str.split(',')) conversion = numerify_str(smashed_cell, flag_level, flag_text) # Ends in percentage sign elif re.search(allregex.percent_numerical_regex, cell_str): cell_str = allregex.percent_numerical_regex.search(cell_str).group(1) conversion = numerify_percentage_str(cell_str, flag_level, flag_text) # Ends in + or - sign (estimate)? elif re.search(allregex.estimate_numerical_regex, cell_str): cell_str = cell_str[:-1].replace(",","") conversion = numerify_str(cell_str, flag_level, flag_text) # Begins with money symbol? elif re.search(allregex.begins_with_monetary_symbol_regex, cell_str): symbol = cell_str[0] cell_str = cell_str[1:] try: conversion = convert_to_int_or_float(cell_str, 'interpreted', flagable.FLAGS['monetary-removal']) if re.search(allregex.contains_dollar_symbol_regex, symbol): units[position] = UNITS_DOLLAR elif re.search(allregex.contains_pound_symbol_regex, symbol): units[position] = UNITS_POUND elif re.search(allregex.contains_euro_symbol_regex, symbol): units[position] = UNITS_EURO except ValueError: conversion = cell_str flagable.flag_change(flags, 'warning', position, worksheet, flagable.FLAGS['failed-monetary-convert']) # Number ending in 'k'? elif re.search(allregex.ends_with_thousands_scaling_regex, cell_str): cell_str = cell_str.rstrip()[:-1] try: conversion = 1000*convert_to_int_or_float(cell_str, 'interpreted', flagable.FLAGS['thousands-convert']) except ValueError: flagable.flag_change(flags, 'warning', position, worksheet, flagable.FLAGS['failed-thousands-convert']) # Number ending in 'M' or 'MM'? elif re.search(allregex.ends_with_millions_scaling_regex, cell_str): if cell_str[-2] == "M": cell_str = cell_str[:-2] else: cell_str = cell_str[:-1] try: conversion = 1000000*convert_to_int_or_float(cell_str, 'interpreted', flagable.FLAGS['millions-convert']) except ValueError: flagable.flag_change(flags, 'warning', position, worksheet, flagable.FLAGS['failed-millions-convert']) else: raise ValueError("Cannot convert cell") return conversion # Try converting try: return convert_to_int_or_float(cell_str) # Couldn't convert? except ValueError: flagable.flag_change(flags, 'minor', position, worksheet, flagable.FLAGS['failed-convert-numeric-string']) return cell_str
[ "def", "auto_convert_numeric_string_cell", "(", "flagable", ",", "cell_str", ",", "position", ",", "worksheet", ",", "flags", ",", "units", ")", ":", "def", "numerify_str", "(", "cell_str", ",", "flag_level", "=", "'minor'", ",", "flag_text", "=", "\"\"", ")",...
Handles the string containing numeric case of cell and attempts auto-conversion for auto_convert_cell.
[ "Handles", "the", "string", "containing", "numeric", "case", "of", "cell", "and", "attempts", "auto", "-", "conversion", "for", "auto_convert_cell", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L253-L303
def _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, data, validate_method): """Run evaluation of a caller against the truth set using rtg vcfeval. """ out_dir = os.path.join(base_dir, "rtg") if not utils.file_exists(os.path.join(out_dir, "done")): if os.path.exists(out_dir): shutil.rmtree(out_dir) vrn_file, rm_file, interval_bed = _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data) rtg_ref = tz.get_in(["reference", "rtg"], data) if isinstance(rtg_ref, dict) and "base" in rtg_ref: rtg_ref = os.path.dirname(rtg_ref["base"]) assert rtg_ref and os.path.exists(rtg_ref), ("Did not find rtg indexed reference file for validation:\n%s\n" "Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref) # handle CWL where we have a reference to a single file in the RTG directory if os.path.isfile(rtg_ref): rtg_ref = os.path.dirname(rtg_ref) # get core and memory usage from standard configuration threads = min(dd.get_num_cores(data), 6) resources = config_utils.get_resources("rtg", data["config"]) memory = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms500m", "-Xmx1500m"]), {"algorithm": {"memory_adjust": {"magnitude": threads, "direction": "increase"}}}) jvm_stack = [x for x in memory if x.startswith("-Xms")] jvm_mem = [x for x in memory if x.startswith("-Xmx")] jvm_stack = jvm_stack[0] if len(jvm_stack) > 0 else "-Xms500m" jvm_mem = jvm_mem[0].replace("-Xmx", "") if len(jvm_mem) > 0 else "3g" cmd = ["rtg", "vcfeval", "--threads", str(threads), "-b", rm_file, "--bed-regions", interval_bed, "-c", vrn_file, "-t", rtg_ref, "-o", out_dir] if validate_method == "rtg-squash-ploidy": cmd += ["--squash-ploidy"] rm_samples = vcfutils.get_samples(rm_file) if len(rm_samples) > 1 and dd.get_sample_name(data) in rm_samples: cmd += ["--sample=%s" % dd.get_sample_name(data)] cmd += ["--vcf-score-field='%s'" % (_pick_best_quality_score(vrn_file))] mem_export = "%s export RTG_JAVA_OPTS='%s' && export RTG_MEM=%s" % (utils.local_path_export(), jvm_stack, jvm_mem) cmd = mem_export + " && " + " ".join(cmd) do.run(cmd, "Validate calls using rtg vcfeval", data) out = {"fp": os.path.join(out_dir, "fp.vcf.gz"), "fn": os.path.join(out_dir, "fn.vcf.gz")} tp_calls = os.path.join(out_dir, "tp.vcf.gz") tp_baseline = os.path.join(out_dir, "tp-baseline.vcf.gz") if os.path.exists(tp_baseline): out["tp"] = tp_baseline out["tp-calls"] = tp_calls else: out["tp"] = tp_calls return out
[ "def", "_run_rtg_eval", "(", "vrn_file", ",", "rm_file", ",", "rm_interval_file", ",", "base_dir", ",", "data", ",", "validate_method", ")", ":", "out_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "\"rtg\"", ")", "if", "not", "utils", ...
Run evaluation of a caller against the truth set using rtg vcfeval.
[ "Run", "evaluation", "of", "a", "caller", "against", "the", "truth", "set", "using", "rtg", "vcfeval", "." ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/command.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/command.py#L37-L48
def pause(self, message=None, verbose=False): """ The pause command displays a dialog with the text provided in the message argument and waits for the user to click OK :param message: a message to display. default=None :param verbose: print more """ PARAMS=set_param(["message"],[message]) response=api(url=self.__url+"/pause", PARAMS=PARAMS, verbose=verbose) return response
[ "def", "pause", "(", "self", ",", "message", "=", "None", ",", "verbose", "=", "False", ")", ":", "PARAMS", "=", "set_param", "(", "[", "\"message\"", "]", ",", "[", "message", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url"...
The pause command displays a dialog with the text provided in the message argument and waits for the user to click OK :param message: a message to display. default=None :param verbose: print more
[ "The", "pause", "command", "displays", "a", "dialog", "with", "the", "text", "provided", "in", "the", "message", "argument", "and", "waits", "for", "the", "user", "to", "click", "OK" ]
python
train
dossier/dossier.models
dossier/models/features/basic.py
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/features/basic.py#L78-L92
def host_names(urls): ''' Takes a StringCounter of normalized URL and parses their hostnames N.B. this assumes that absolute URLs will begin with http:// in order to accurately resolve the host name. Relative URLs will not have host names. ''' host_names = StringCounter() for url in urls: host_names[urlparse(url).netloc] += urls[url] return host_names
[ "def", "host_names", "(", "urls", ")", ":", "host_names", "=", "StringCounter", "(", ")", "for", "url", "in", "urls", ":", "host_names", "[", "urlparse", "(", "url", ")", ".", "netloc", "]", "+=", "urls", "[", "url", "]", "return", "host_names" ]
Takes a StringCounter of normalized URL and parses their hostnames N.B. this assumes that absolute URLs will begin with http:// in order to accurately resolve the host name. Relative URLs will not have host names.
[ "Takes", "a", "StringCounter", "of", "normalized", "URL", "and", "parses", "their", "hostnames" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/lstm.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/lstm.py#L177-L203
def lstm_seq2seq_internal(inputs, targets, hparams, train): """The basic LSTM seq2seq model, main step used for training.""" with tf.variable_scope("lstm_seq2seq"): if inputs is not None: inputs_length = common_layers.length_from_embedding(inputs) # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) _, final_encoder_state = lstm(inputs, inputs_length, hparams, train, "encoder") else: final_encoder_state = None # LSTM decoder. shifted_targets = common_layers.shift_right(targets) # Add 1 to account for the padding added to the left from shift_right targets_length = common_layers.length_from_embedding(shifted_targets) + 1 decoder_outputs, _ = lstm( common_layers.flatten4d3d(shifted_targets), targets_length, hparams, train, "decoder", initial_state=final_encoder_state) return tf.expand_dims(decoder_outputs, axis=2)
[ "def", "lstm_seq2seq_internal", "(", "inputs", ",", "targets", ",", "hparams", ",", "train", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"lstm_seq2seq\"", ")", ":", "if", "inputs", "is", "not", "None", ":", "inputs_length", "=", "common_layers", "....
The basic LSTM seq2seq model, main step used for training.
[ "The", "basic", "LSTM", "seq2seq", "model", "main", "step", "used", "for", "training", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L2370-L2388
def get_assets_by_repositories(self, repository_ids): """Gets the list of ``Assets`` corresponding to a list of ``Repository`` objects. arg: repository_ids (osid.id.IdList): list of repository ``Ids`` return: (osid.repository.AssetList) - list of assets raise: NullArgument - ``repository_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resources_by_bins asset_list = [] for repository_id in repository_ids: asset_list += list( self.get_assets_by_repository(repository_id)) return objects.AssetList(asset_list)
[ "def", "get_assets_by_repositories", "(", "self", ",", "repository_ids", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_resources_by_bins", "asset_list", "=", "[", "]", "for", "repository_id", "in", "repository_ids", ":", "asset_list", "+...
Gets the list of ``Assets`` corresponding to a list of ``Repository`` objects. arg: repository_ids (osid.id.IdList): list of repository ``Ids`` return: (osid.repository.AssetList) - list of assets raise: NullArgument - ``repository_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Assets", "corresponding", "to", "a", "list", "of", "Repository", "objects", "." ]
python
train
theelous3/asks
asks/request_object.py
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/request_object.py#L616-L628
async def _send(self, request_bytes, body_bytes, h11_connection): ''' Takes a package and body, combines then, then shoots 'em off in to the ether. Args: package (list of str): The header package. body (str): The str representation of the body. ''' await self.sock.send_all(h11_connection.send(request_bytes)) if body_bytes is not None: await self.sock.send_all(h11_connection.send(body_bytes)) await self.sock.send_all(h11_connection.send(h11.EndOfMessage()))
[ "async", "def", "_send", "(", "self", ",", "request_bytes", ",", "body_bytes", ",", "h11_connection", ")", ":", "await", "self", ".", "sock", ".", "send_all", "(", "h11_connection", ".", "send", "(", "request_bytes", ")", ")", "if", "body_bytes", "is", "no...
Takes a package and body, combines then, then shoots 'em off in to the ether. Args: package (list of str): The header package. body (str): The str representation of the body.
[ "Takes", "a", "package", "and", "body", "combines", "then", "then", "shoots", "em", "off", "in", "to", "the", "ether", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/browser.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L557-L571
def resizeEvent(self, event): """Schedules an item layout if resize mode is \"adjust\". Somehow this is needed for correctly scaling down items. The reason this was reimplemented was the CommentDelegate. :param event: the resize event :type event: QtCore.QEvent :returns: None :rtype: None :raises: None """ if self.resizeMode() == self.Adjust: self.scheduleDelayedItemsLayout() return super(ListLevel, self).resizeEvent(event)
[ "def", "resizeEvent", "(", "self", ",", "event", ")", ":", "if", "self", ".", "resizeMode", "(", ")", "==", "self", ".", "Adjust", ":", "self", ".", "scheduleDelayedItemsLayout", "(", ")", "return", "super", "(", "ListLevel", ",", "self", ")", ".", "re...
Schedules an item layout if resize mode is \"adjust\". Somehow this is needed for correctly scaling down items. The reason this was reimplemented was the CommentDelegate. :param event: the resize event :type event: QtCore.QEvent :returns: None :rtype: None :raises: None
[ "Schedules", "an", "item", "layout", "if", "resize", "mode", "is", "\\", "adjust", "\\", ".", "Somehow", "this", "is", "needed", "for", "correctly", "scaling", "down", "items", "." ]
python
train
ThreatResponse/margaritashotgun
margaritashotgun/cli.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/cli.py#L113-L141
def configure(self, arguments=None, config=None): """ Merge command line arguments, config files, and default configs :type arguments: argparse.Namespace :params arguments: Arguments produced by Cli.parse_args :type config: dict :params config: configuration dict to merge and validate """ if arguments is not None: args_config = self.configure_args(arguments) base_config = copy.deepcopy(default_config) working_config = self.merge_config(base_config, args_config) if config is not None: self.validate_config(config) base_config = copy.deepcopy(default_config) working_config = self.merge_config(base_config, config) # override configuration with environment variables repo = self.get_env_default('LIME_REPOSITORY', 'disabled') repo_url = self.get_env_default('LIME_REPOSITORY_URL', working_config['repository']['url']) if repo.lower() == 'enabled': working_config['repository']['enabled'] = True working_config['repository']['url'] = repo_url return working_config
[ "def", "configure", "(", "self", ",", "arguments", "=", "None", ",", "config", "=", "None", ")", ":", "if", "arguments", "is", "not", "None", ":", "args_config", "=", "self", ".", "configure_args", "(", "arguments", ")", "base_config", "=", "copy", ".", ...
Merge command line arguments, config files, and default configs :type arguments: argparse.Namespace :params arguments: Arguments produced by Cli.parse_args :type config: dict :params config: configuration dict to merge and validate
[ "Merge", "command", "line", "arguments", "config", "files", "and", "default", "configs" ]
python
train
sporsh/carnifex
carnifex/inductor.py
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/carnifex/inductor.py#L46-L58
def getExitCode(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """Execute a command and get the return code of the finished process. """ deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) self.execute(processProtocol, command, env, path, uid, gid, usePTY, childFDs) @deferred.addCallback def getStdOut(tuple_): _stdout, _stderr, exitCode = tuple_ return exitCode return deferred
[ "def", "getExitCode", "(", "self", ",", "command", ",", "env", "=", "{", "}", ",", "path", "=", "None", ",", "uid", "=", "None", ",", "gid", "=", "None", ",", "usePTY", "=", "0", ",", "childFDs", "=", "None", ")", ":", "deferred", "=", "defer", ...
Execute a command and get the return code of the finished process.
[ "Execute", "a", "command", "and", "get", "the", "return", "code", "of", "the", "finished", "process", "." ]
python
train
QualiSystems/CloudShell-Traffic
cloudshell/traffic/quali_rest_api_helper.py
https://github.com/QualiSystems/CloudShell-Traffic/blob/4579d42e359fa9d5736dc4ceb8d86547f0e7120d/cloudshell/traffic/quali_rest_api_helper.py#L5-L24
def create_quali_api_instance(context, logger): """ Get needed attributes from context and create instance of QualiApiHelper :param context: :param logger: :return: """ if hasattr(context, 'reservation') and context.reservation: domain = context.reservation.domain elif hasattr(context, 'remote_reservation') and context.remote_reservation: domain = context.remote_reservation.domain else: domain = None address = context.connectivity.server_address token = context.connectivity.admin_auth_token if token: instance = QualiAPIHelper(address, logger, token=token, domain=domain) else: instance = QualiAPIHelper(address, logger, username='admin', password='admin', domain=domain) return instance
[ "def", "create_quali_api_instance", "(", "context", ",", "logger", ")", ":", "if", "hasattr", "(", "context", ",", "'reservation'", ")", "and", "context", ".", "reservation", ":", "domain", "=", "context", ".", "reservation", ".", "domain", "elif", "hasattr", ...
Get needed attributes from context and create instance of QualiApiHelper :param context: :param logger: :return:
[ "Get", "needed", "attributes", "from", "context", "and", "create", "instance", "of", "QualiApiHelper", ":", "param", "context", ":", ":", "param", "logger", ":", ":", "return", ":" ]
python
train
ethereum/lahja
lahja/endpoint.py
https://github.com/ethereum/lahja/blob/e3993c5892232887a11800ed3e66332febcee96b/lahja/endpoint.py#L475-L481
async def wait_for(self, event_type: Type[TWaitForEvent]) -> TWaitForEvent: # type: ignore """ Wait for a single instance of an event that matches the specified event type. """ # mypy thinks we are missing a return statement but this seems fair to do async for event in self.stream(event_type, num_events=1): return event
[ "async", "def", "wait_for", "(", "self", ",", "event_type", ":", "Type", "[", "TWaitForEvent", "]", ")", "->", "TWaitForEvent", ":", "# type: ignore", "# mypy thinks we are missing a return statement but this seems fair to do", "async", "for", "event", "in", "self", "."...
Wait for a single instance of an event that matches the specified event type.
[ "Wait", "for", "a", "single", "instance", "of", "an", "event", "that", "matches", "the", "specified", "event", "type", "." ]
python
train
acutesoftware/AIKIF
scripts/examples/document_AIKIF.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/document_AIKIF.py#L126-L143
def document_agents(p): """ Document agents in AIKIF (purpose and intent) """ p.comment('agent.py', 'base agent class') p.comment('run_agents.py', 'Top level function to run the agents') p.comment('agent_image_metadata.py', 'agent to collect file picture metadata') p.comment('agent_learn_aixi.py', '') p.comment('dummy_learn_1.py', 'sample (but stub only) learning algorithm to be called as test below') p.comment('agent_explore_grid.py', 'working prototype of agent to move through a grid world, using very simple path finding.') p.comment('agent_email.py', 'Agent that reads emails (currently only gmail)') p.comment('agent_filelist.py', 'TOK - correctly scans and logs filelists from an agent') p.comment('collect_Win_processes.py', 'script to collect windows processes. Currently not part of agent process, more an exercise on what can be logged') p.comment('log_PC_usage.py', 'script to read current window title to be used as part of context to see what user is doing') p.comment('log_browser_history.py', 'script to dump chrome browser history to CSV - not used') p.comment('agg_context.py', 'detects context of user and computer')
[ "def", "document_agents", "(", "p", ")", ":", "p", ".", "comment", "(", "'agent.py'", ",", "'base agent class'", ")", "p", ".", "comment", "(", "'run_agents.py'", ",", "'Top level function to run the agents'", ")", "p", ".", "comment", "(", "'agent_image_metadata....
Document agents in AIKIF (purpose and intent)
[ "Document", "agents", "in", "AIKIF", "(", "purpose", "and", "intent", ")" ]
python
train
awslabs/sockeye
sockeye/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L1497-L1619
def translate(self, trans_inputs: List[TranslatorInput], fill_up_batches: bool = True) -> List[TranslatorOutput]: """ Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs. Empty or bad inputs are skipped. Splits inputs longer than Translator.max_input_length into segments of size max_input_length, and then groups segments into batches of at most Translator.max_batch_size. Too-long segments that were split are reassembled into a single output after translation. If fill_up_batches is set to True, underfilled batches are padded to Translator.max_batch_size, otherwise dynamic batch sizing is used, which comes at increased memory usage. :param trans_inputs: List of TranslatorInputs as returned by make_input(). :param fill_up_batches: If True, underfilled batches are padded to Translator.max_batch_size. :return: List of translation results. """ num_inputs = len(trans_inputs) translated_chunks = [] # type: List[IndexedTranslation] # split into chunks input_chunks = [] # type: List[IndexedTranslatorInput] for trans_input_idx, trans_input in enumerate(trans_inputs): # bad input if isinstance(trans_input, BadTranslatorInput): translated_chunks.append(IndexedTranslation(input_idx=trans_input_idx, chunk_idx=0, translation=empty_translation(add_nbest=(self.nbest_size > 1)))) # empty input elif len(trans_input.tokens) == 0: translated_chunks.append(IndexedTranslation(input_idx=trans_input_idx, chunk_idx=0, translation=empty_translation(add_nbest=(self.nbest_size > 1)))) else: # TODO(tdomhan): Remove branch without EOS with next major version bump, as future models will always be trained with source side EOS symbols if self.source_with_eos: max_input_length_without_eos = self.max_input_length # oversized input if len(trans_input.tokens) > max_input_length_without_eos: logger.debug( "Input %s has length (%d) that exceeds max input length (%d). " "Splitting into chunks of size %d.", trans_input.sentence_id, len(trans_input.tokens), self.buckets_source[-1], max_input_length_without_eos) chunks = [trans_input_chunk.with_eos() for trans_input_chunk in trans_input.chunks(max_input_length_without_eos)] input_chunks.extend([IndexedTranslatorInput(trans_input_idx, chunk_idx, chunk_input) for chunk_idx, chunk_input in enumerate(chunks)]) # regular input else: input_chunks.append(IndexedTranslatorInput(trans_input_idx, chunk_idx=0, translator_input=trans_input.with_eos())) else: if len(trans_input.tokens) > self.max_input_length: # oversized input logger.debug( "Input %s has length (%d) that exceeds max input length (%d). " "Splitting into chunks of size %d.", trans_input.sentence_id, len(trans_input.tokens), self.buckets_source[-1], self.max_input_length) chunks = [trans_input_chunk for trans_input_chunk in trans_input.chunks(self.max_input_length)] input_chunks.extend([IndexedTranslatorInput(trans_input_idx, chunk_idx, chunk_input) for chunk_idx, chunk_input in enumerate(chunks)]) else: # regular input input_chunks.append(IndexedTranslatorInput(trans_input_idx, chunk_idx=0, translator_input=trans_input)) if trans_input.constraints is not None: logger.info("Input %s has %d %s: %s", trans_input.sentence_id, len(trans_input.constraints), "constraint" if len(trans_input.constraints) == 1 else "constraints", ", ".join(" ".join(x) for x in trans_input.constraints)) num_bad_empty = len(translated_chunks) # Sort longest to shortest (to rather fill batches of shorter than longer sequences) input_chunks = sorted(input_chunks, key=lambda chunk: len(chunk.translator_input.tokens), reverse=True) # translate in batch-sized blocks over input chunks batch_size = self.max_batch_size if fill_up_batches else min(len(input_chunks), self.max_batch_size) num_batches = 0 for batch_id, batch in enumerate(utils.grouper(input_chunks, batch_size)): logger.debug("Translating batch %d", batch_id) rest = batch_size - len(batch) if fill_up_batches and rest > 0: logger.debug("Padding batch of size %d to full batch size (%d)", len(batch), batch_size) batch = batch + [batch[0]] * rest translator_inputs = [indexed_translator_input.translator_input for indexed_translator_input in batch] batch_translations = self._translate_nd(*self._get_inference_input(translator_inputs)) # truncate to remove filler translations if fill_up_batches and rest > 0: batch_translations = batch_translations[:-rest] for chunk, translation in zip(batch, batch_translations): translated_chunks.append(IndexedTranslation(chunk.input_idx, chunk.chunk_idx, translation)) num_batches += 1 # Sort by input idx and then chunk id translated_chunks = sorted(translated_chunks) num_chunks = len(translated_chunks) # Concatenate results results = [] # type: List[TranslatorOutput] chunks_by_input_idx = itertools.groupby(translated_chunks, key=lambda translation: translation.input_idx) for trans_input, (input_idx, translations_for_input_idx) in zip(trans_inputs, chunks_by_input_idx): translations_for_input_idx = list(translations_for_input_idx) # type: ignore if len(translations_for_input_idx) == 1: # type: ignore translation = translations_for_input_idx[0].translation # type: ignore else: translations_to_concat = [translated_chunk.translation for translated_chunk in translations_for_input_idx] translation = self._concat_translations(translations_to_concat) results.append(self._make_result(trans_input, translation)) num_outputs = len(results) logger.debug("Translated %d inputs (%d chunks) in %d batches to %d outputs. %d empty/bad inputs.", num_inputs, num_chunks, num_batches, num_outputs, num_bad_empty) return results
[ "def", "translate", "(", "self", ",", "trans_inputs", ":", "List", "[", "TranslatorInput", "]", ",", "fill_up_batches", ":", "bool", "=", "True", ")", "->", "List", "[", "TranslatorOutput", "]", ":", "num_inputs", "=", "len", "(", "trans_inputs", ")", "tra...
Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs. Empty or bad inputs are skipped. Splits inputs longer than Translator.max_input_length into segments of size max_input_length, and then groups segments into batches of at most Translator.max_batch_size. Too-long segments that were split are reassembled into a single output after translation. If fill_up_batches is set to True, underfilled batches are padded to Translator.max_batch_size, otherwise dynamic batch sizing is used, which comes at increased memory usage. :param trans_inputs: List of TranslatorInputs as returned by make_input(). :param fill_up_batches: If True, underfilled batches are padded to Translator.max_batch_size. :return: List of translation results.
[ "Batch", "-", "translates", "a", "list", "of", "TranslatorInputs", "returns", "a", "list", "of", "TranslatorOutputs", ".", "Empty", "or", "bad", "inputs", "are", "skipped", ".", "Splits", "inputs", "longer", "than", "Translator", ".", "max_input_length", "into",...
python
train
batiste/django-page-cms
pages/placeholders.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/placeholders.py#L282-L303
def render(self, context): """Output the content of the `PlaceholdeNode` as a template.""" content = self.get_render_content(context) request = context.get('request') render_edit_tag = False if request and request.user.is_staff and request.COOKIES.get('enable_edit_mode'): render_edit_tag = True if not content: if not render_edit_tag: return '' return self.edit_tag() if self.parsed: content = self.render_parsed(context, content) if self.as_varname is None: if not render_edit_tag: return content return content + self.edit_tag() context[self.as_varname] = content return ''
[ "def", "render", "(", "self", ",", "context", ")", ":", "content", "=", "self", ".", "get_render_content", "(", "context", ")", "request", "=", "context", ".", "get", "(", "'request'", ")", "render_edit_tag", "=", "False", "if", "request", "and", "request"...
Output the content of the `PlaceholdeNode` as a template.
[ "Output", "the", "content", "of", "the", "PlaceholdeNode", "as", "a", "template", "." ]
python
train
quantumlib/Cirq
cirq/ops/qubit_order.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/ops/qubit_order.py#L117-L132
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList' ) -> 'QubitOrder': """Converts a value into a basis. Args: val: An iterable or a basis. Returns: The basis implied by the value. """ if isinstance(val, collections.Iterable): return QubitOrder.explicit(val) if isinstance(val, QubitOrder): return val raise ValueError( "Don't know how to interpret <{}> as a Basis.".format(val))
[ "def", "as_qubit_order", "(", "val", ":", "'qubit_order_or_list.QubitOrderOrList'", ")", "->", "'QubitOrder'", ":", "if", "isinstance", "(", "val", ",", "collections", ".", "Iterable", ")", ":", "return", "QubitOrder", ".", "explicit", "(", "val", ")", "if", "...
Converts a value into a basis. Args: val: An iterable or a basis. Returns: The basis implied by the value.
[ "Converts", "a", "value", "into", "a", "basis", "." ]
python
train
metagriffin/pysyncml
pysyncml/items/base.py
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/items/base.py#L70-L83
def dumps(self, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ''' buf = six.StringIO() ret = self.dump(buf, contentType, version) if ret is None: return buf.getvalue() return (ret[0], ret[1], buf.getvalue())
[ "def", "dumps", "(", "self", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "buf", "=", "six", ".", "StringIO", "(", ")", "ret", "=", "self", ".", "dump", "(", "buf", ",", "contentType", ",", "version", ")", "if", "ret", ...
[OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`.
[ "[", "OPTIONAL", "]", "Identical", "to", ":", "meth", ":", "dump", "except", "the", "serialized", "form", "is", "returned", "as", "a", "string", "representation", ".", "As", "documented", "in", ":", "meth", ":", "dump", "the", "return", "value", "can", "...
python
valid
ronaldguillen/wave
wave/views.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/views.py#L161-L167
def permission_denied(self, request, message=None): """ If request is not permitted, determine what kind of exception to raise. """ if not request.successful_authenticator: raise exceptions.NotAuthenticated() raise exceptions.PermissionDenied(detail=message)
[ "def", "permission_denied", "(", "self", ",", "request", ",", "message", "=", "None", ")", ":", "if", "not", "request", ".", "successful_authenticator", ":", "raise", "exceptions", ".", "NotAuthenticated", "(", ")", "raise", "exceptions", ".", "PermissionDenied"...
If request is not permitted, determine what kind of exception to raise.
[ "If", "request", "is", "not", "permitted", "determine", "what", "kind", "of", "exception", "to", "raise", "." ]
python
train
hubo1016/vlcp
vlcp/event/core.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L163-L173
def unregisterall(self, runnable): ''' Unregister all matches and detach the runnable. Automatically called when runnable returns StopIteration. ''' if runnable in self.registerIndex: for m in self.registerIndex[runnable]: self.matchtree.remove(m, runnable) if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2: self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], False) del self.registerIndex[runnable] self.daemons.discard(runnable)
[ "def", "unregisterall", "(", "self", ",", "runnable", ")", ":", "if", "runnable", "in", "self", ".", "registerIndex", ":", "for", "m", "in", "self", ".", "registerIndex", "[", "runnable", "]", ":", "self", ".", "matchtree", ".", "remove", "(", "m", ","...
Unregister all matches and detach the runnable. Automatically called when runnable returns StopIteration.
[ "Unregister", "all", "matches", "and", "detach", "the", "runnable", ".", "Automatically", "called", "when", "runnable", "returns", "StopIteration", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidget.py#L600-L617
def setUpdatesEnabled(self, state): """ Sets whether or not updates will be enabled. :param state | <bool> """ super(XGanttWidget, self).setUpdatesEnabled(state) self.treeWidget().setUpdatesEnabled(state) self.viewWidget().setUpdatesEnabled(state) if state: self._updateViewRect() for i in range(self.topLevelItemCount()): item = self.topLevelItem(i) try: item.sync(recursive=True) except AttributeError: continue
[ "def", "setUpdatesEnabled", "(", "self", ",", "state", ")", ":", "super", "(", "XGanttWidget", ",", "self", ")", ".", "setUpdatesEnabled", "(", "state", ")", "self", ".", "treeWidget", "(", ")", ".", "setUpdatesEnabled", "(", "state", ")", "self", ".", "...
Sets whether or not updates will be enabled. :param state | <bool>
[ "Sets", "whether", "or", "not", "updates", "will", "be", "enabled", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
cbrand/vpnchooser
src/vpnchooser/resources/device.py
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/device.py#L114-L123
def put(self, device_id: int) -> Device: """ Updates the Device Resource with the name. """ device = self._get_or_abort(device_id) self.update(device) session.commit() session.add(device) return device
[ "def", "put", "(", "self", ",", "device_id", ":", "int", ")", "->", "Device", ":", "device", "=", "self", ".", "_get_or_abort", "(", "device_id", ")", "self", ".", "update", "(", "device", ")", "session", ".", "commit", "(", ")", "session", ".", "add...
Updates the Device Resource with the name.
[ "Updates", "the", "Device", "Resource", "with", "the", "name", "." ]
python
train
Projectplace/basepage
basepage/base_page.py
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L432-L448
def get_present_elements(self, locator, params=None, timeout=None, visible=False, parent=None): """ Get elements present in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: element identifier :param params: (optional) locator parameters :param timeout: (optional) time to wait for element (default: self._explicit_wait) :param visible: (optional) if the element should also be visible (default: False) :param parent: internal (see #get_present_children) :return: WebElement instance """ error_msg = "Children were never present" if parent else "Elements were never present!" expected_condition = ec.visibility_of_all_elements_located if visible else ec.presence_of_all_elements_located return self._get(locator, expected_condition, params, timeout, error_msg, parent)
[ "def", "get_present_elements", "(", "self", ",", "locator", ",", "params", "=", "None", ",", "timeout", "=", "None", ",", "visible", "=", "False", ",", "parent", "=", "None", ")", ":", "error_msg", "=", "\"Children were never present\"", "if", "parent", "els...
Get elements present in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: element identifier :param params: (optional) locator parameters :param timeout: (optional) time to wait for element (default: self._explicit_wait) :param visible: (optional) if the element should also be visible (default: False) :param parent: internal (see #get_present_children) :return: WebElement instance
[ "Get", "elements", "present", "in", "the", "DOM", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/client.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/client.py#L150-L162
def OpenDialog(self, Name, *Params): """Open dialog. Use this method to open dialogs added in newer Skype versions if there is no dedicated method in Skype4Py. :Parameters: Name : str Dialog name. Params : unicode One or more optional parameters. """ self._Skype._Api.allow_focus(self._Skype.Timeout) params = filter(None, (str(Name),) + Params) self._Skype._DoCommand('OPEN %s' % tounicode(' '.join(params)))
[ "def", "OpenDialog", "(", "self", ",", "Name", ",", "*", "Params", ")", ":", "self", ".", "_Skype", ".", "_Api", ".", "allow_focus", "(", "self", ".", "_Skype", ".", "Timeout", ")", "params", "=", "filter", "(", "None", ",", "(", "str", "(", "Name"...
Open dialog. Use this method to open dialogs added in newer Skype versions if there is no dedicated method in Skype4Py. :Parameters: Name : str Dialog name. Params : unicode One or more optional parameters.
[ "Open", "dialog", ".", "Use", "this", "method", "to", "open", "dialogs", "added", "in", "newer", "Skype", "versions", "if", "there", "is", "no", "dedicated", "method", "in", "Skype4Py", "." ]
python
train
niklasf/python-chess
chess/__init__.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/__init__.py#L1143-L1188
def unicode(self, *, invert_color: bool = False, borders: bool = False) -> str: """ Returns a string representation of the board with Unicode pieces. Useful for pretty-printing to a terminal. :param invert_color: Invert color of the Unicode pieces. :param borders: Show borders and a coordinate margin. """ builder = [] for rank_index in range(7, -1, -1): if borders: builder.append(" ") builder.append("-" * 17) builder.append("\n") builder.append(RANK_NAMES[rank_index]) builder.append(" ") for file_index in range(8): square_index = square(file_index, rank_index) if borders: builder.append("|") elif file_index > 0: builder.append(" ") piece = self.piece_at(square_index) if piece: builder.append(piece.unicode_symbol(invert_color=invert_color)) else: builder.append(u"·") if borders: builder.append("|") if borders or rank_index > 0: builder.append("\n") if borders: builder.append(" ") builder.append("-" * 17) builder.append("\n") builder.append(" a b c d e f g h") return "".join(builder)
[ "def", "unicode", "(", "self", ",", "*", ",", "invert_color", ":", "bool", "=", "False", ",", "borders", ":", "bool", "=", "False", ")", "->", "str", ":", "builder", "=", "[", "]", "for", "rank_index", "in", "range", "(", "7", ",", "-", "1", ",",...
Returns a string representation of the board with Unicode pieces. Useful for pretty-printing to a terminal. :param invert_color: Invert color of the Unicode pieces. :param borders: Show borders and a coordinate margin.
[ "Returns", "a", "string", "representation", "of", "the", "board", "with", "Unicode", "pieces", ".", "Useful", "for", "pretty", "-", "printing", "to", "a", "terminal", "." ]
python
train
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L420-L428
def setLog(self, fileName, writeName=False): """ Opens a log file with name fileName. """ self.log = 1 self.logFile = fileName self._logPtr = open(fileName, "w") if writeName: self._namePtr = open(fileName + ".name", "w")
[ "def", "setLog", "(", "self", ",", "fileName", ",", "writeName", "=", "False", ")", ":", "self", ".", "log", "=", "1", "self", ".", "logFile", "=", "fileName", "self", ".", "_logPtr", "=", "open", "(", "fileName", ",", "\"w\"", ")", "if", "writeName"...
Opens a log file with name fileName.
[ "Opens", "a", "log", "file", "with", "name", "fileName", "." ]
python
train
raiden-network/raiden-contracts
raiden_contracts/deploy/contract_deployer.py
https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L108-L120
def transact( self, contract_method: ContractFunction, ): """ A wrapper around to_be_called.transact() that waits until the transaction succeeds. """ txhash = contract_method.transact(self.transaction) LOG.debug(f'Sending txHash={encode_hex(txhash)}') (receipt, _) = check_successful_tx( web3=self.web3, txid=txhash, timeout=self.wait, ) return receipt
[ "def", "transact", "(", "self", ",", "contract_method", ":", "ContractFunction", ",", ")", ":", "txhash", "=", "contract_method", ".", "transact", "(", "self", ".", "transaction", ")", "LOG", ".", "debug", "(", "f'Sending txHash={encode_hex(txhash)}'", ")", "(",...
A wrapper around to_be_called.transact() that waits until the transaction succeeds.
[ "A", "wrapper", "around", "to_be_called", ".", "transact", "()", "that", "waits", "until", "the", "transaction", "succeeds", "." ]
python
train
wikimedia/editquality
editquality/utilities/autolabel.py
https://github.com/wikimedia/editquality/blob/73bab7bdd0ef3dba9a000f91f2fd810b1772d1f0/editquality/utilities/autolabel.py#L316-L331
def query_revisions_by_revids(session, revids, **params): """ Gets a set of revisions by their IDs by repeatedly querying in batches. If an ID cannot be found, it is ignored. """ doc = session.get(action='query', prop='revisions', revids=revids, **params) for page_doc in doc['query'].get('pages', {}).values(): revisions = page_doc.get('revisions', []) if 'revisions' in page_doc: del page_doc['revisions'] for revision_doc in revisions: revision_doc['page'] = page_doc yield revision_doc
[ "def", "query_revisions_by_revids", "(", "session", ",", "revids", ",", "*", "*", "params", ")", ":", "doc", "=", "session", ".", "get", "(", "action", "=", "'query'", ",", "prop", "=", "'revisions'", ",", "revids", "=", "revids", ",", "*", "*", "param...
Gets a set of revisions by their IDs by repeatedly querying in batches. If an ID cannot be found, it is ignored.
[ "Gets", "a", "set", "of", "revisions", "by", "their", "IDs", "by", "repeatedly", "querying", "in", "batches", ".", "If", "an", "ID", "cannot", "be", "found", "it", "is", "ignored", "." ]
python
train
bruziev/security_interface
security_interface/api.py
https://github.com/bruziev/security_interface/blob/ec1f30c8ac051291694b0099caa0a7fde97ddfe6/security_interface/api.py#L49-L61
async def check_authorized(self, identity): """ Works like :func:`Security.identity`, but when check is failed :func:`UnauthorizedError` exception is raised. :param identity: Claim :return: Checked claim or return ``None`` :raise: :func:`UnauthorizedError` """ identify = await self.identify(identity) if identify is None: raise UnauthorizedError() return identify
[ "async", "def", "check_authorized", "(", "self", ",", "identity", ")", ":", "identify", "=", "await", "self", ".", "identify", "(", "identity", ")", "if", "identify", "is", "None", ":", "raise", "UnauthorizedError", "(", ")", "return", "identify" ]
Works like :func:`Security.identity`, but when check is failed :func:`UnauthorizedError` exception is raised. :param identity: Claim :return: Checked claim or return ``None`` :raise: :func:`UnauthorizedError`
[ "Works", "like", ":", "func", ":", "Security", ".", "identity", "but", "when", "check", "is", "failed", ":", "func", ":", "UnauthorizedError", "exception", "is", "raised", "." ]
python
train
CivicSpleen/ambry
ambry/bundle/bundle.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L2422-L2436
def collect_segment_partitions(self): """Return a dict of segments partitions, keyed on the name of the parent partition """ from collections import defaultdict # Group the segments by their parent partition name, which is the # same name, but without the segment. partitions = defaultdict(set) for p in self.dataset.partitions: if p.type == p.TYPE.SEGMENT: name = p.identity.name name.segment = None partitions[name].add(p) return partitions
[ "def", "collect_segment_partitions", "(", "self", ")", ":", "from", "collections", "import", "defaultdict", "# Group the segments by their parent partition name, which is the", "# same name, but without the segment.", "partitions", "=", "defaultdict", "(", "set", ")", "for", "p...
Return a dict of segments partitions, keyed on the name of the parent partition
[ "Return", "a", "dict", "of", "segments", "partitions", "keyed", "on", "the", "name", "of", "the", "parent", "partition" ]
python
train
ggaughan/pipe2py
pipe2py/modules/pipestrregex.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrregex.py#L67-L91
def pipe_strregex(context=None, _INPUT=None, conf=None, **kwargs): """A string module that replaces text using regexes. Each has the general format: "In [field] replace [regex pattern] with [text]". Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings conf : { 'RULE': [ { 'match': {'value': <regex>}, 'replace': {'value': <'replacement'>} } ] } Returns ------- _OUTPUT : generator of replaced strings """ splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs)) parsed = utils.dispatch(splits, *get_dispatch_funcs(first=convert_func)) _OUTPUT = starmap(parse_result, parsed) return _OUTPUT
[ "def", "pipe_strregex", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "splits", "=", "get_splits", "(", "_INPUT", ",", "conf", "[", "'RULE'", "]", ",", "*", "*", "cdicts", "(",...
A string module that replaces text using regexes. Each has the general format: "In [field] replace [regex pattern] with [text]". Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings conf : { 'RULE': [ { 'match': {'value': <regex>}, 'replace': {'value': <'replacement'>} } ] } Returns ------- _OUTPUT : generator of replaced strings
[ "A", "string", "module", "that", "replaces", "text", "using", "regexes", ".", "Each", "has", "the", "general", "format", ":", "In", "[", "field", "]", "replace", "[", "regex", "pattern", "]", "with", "[", "text", "]", ".", "Loopable", "." ]
python
train
edwardslabs/cleverwrap.py
cleverwrap/cleverwrap.py
https://github.com/edwardslabs/cleverwrap.py/blob/0c1e8279fe0f780fcf1ca1270cfb477c28c39e27/cleverwrap/cleverwrap.py#L45-L62
def say(self, text): """ Say something to www.cleverbot.com :type text: string Returns: string """ params = { "input": text, "key": self.key, "cs": self.cs, "conversation_id": self.convo_id, "wrapper": "CleverWrap.py" } reply = self._send(params) self._process_reply(reply) return self.output
[ "def", "say", "(", "self", ",", "text", ")", ":", "params", "=", "{", "\"input\"", ":", "text", ",", "\"key\"", ":", "self", ".", "key", ",", "\"cs\"", ":", "self", ".", "cs", ",", "\"conversation_id\"", ":", "self", ".", "convo_id", ",", "\"wrapper\...
Say something to www.cleverbot.com :type text: string Returns: string
[ "Say", "something", "to", "www", ".", "cleverbot", ".", "com", ":", "type", "text", ":", "string", "Returns", ":", "string" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/plugins/base_plugin.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/plugins/base_plugin.py#L124-L137
def __log_all_options_if_none_specified(self, test): """ When testing_base is specified, but none of the log options to save are specified (basic_test_info, screen_shots, page_source), then save them all by default. Otherwise, save only selected ones from their plugins. """ if ((not self.options.enable_plugin_basic_test_info) and ( not self.options.enable_plugin_screen_shots) and ( not self.options.enable_plugin_page_source)): test_logpath = self.options.log_path + "/" + test.id() log_helper.log_screenshot(test_logpath, test.driver) log_helper.log_test_failure_data( test, test_logpath, test.driver, test.browser) log_helper.log_page_source(test_logpath, test.driver)
[ "def", "__log_all_options_if_none_specified", "(", "self", ",", "test", ")", ":", "if", "(", "(", "not", "self", ".", "options", ".", "enable_plugin_basic_test_info", ")", "and", "(", "not", "self", ".", "options", ".", "enable_plugin_screen_shots", ")", "and", ...
When testing_base is specified, but none of the log options to save are specified (basic_test_info, screen_shots, page_source), then save them all by default. Otherwise, save only selected ones from their plugins.
[ "When", "testing_base", "is", "specified", "but", "none", "of", "the", "log", "options", "to", "save", "are", "specified", "(", "basic_test_info", "screen_shots", "page_source", ")", "then", "save", "them", "all", "by", "default", ".", "Otherwise", "save", "on...
python
train
aiogram/aiogram
aiogram/types/message.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/message.py#L1470-L1476
async def delete(self): """ Delete this message :return: bool """ return await self.bot.delete_message(self.chat.id, self.message_id)
[ "async", "def", "delete", "(", "self", ")", ":", "return", "await", "self", ".", "bot", ".", "delete_message", "(", "self", ".", "chat", ".", "id", ",", "self", ".", "message_id", ")" ]
Delete this message :return: bool
[ "Delete", "this", "message" ]
python
train
Kortemme-Lab/klab
klab/pymath/cartesian/rmsd.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/pymath/cartesian/rmsd.py#L3-L8
def compute_rmsd_by_matrix(dataframe_1, dataframe_2, use_assertion = False): '''Computes the RMSD of two pandas dataframes. The dataframes are expected to be of equal dimensions and use_assertion can be set to assert that the row indices match. ''' if use_assertion: assert([i for i in dataframe_1.index] == [i for i in dataframe_2.index]) # Note: this assertion creates garbage memory allocations num_points = dataframe_1.shape[0] return numpy.linalg.norm(dataframe_1 - dataframe_2) / numpy.sqrt(num_points)
[ "def", "compute_rmsd_by_matrix", "(", "dataframe_1", ",", "dataframe_2", ",", "use_assertion", "=", "False", ")", ":", "if", "use_assertion", ":", "assert", "(", "[", "i", "for", "i", "in", "dataframe_1", ".", "index", "]", "==", "[", "i", "for", "i", "i...
Computes the RMSD of two pandas dataframes. The dataframes are expected to be of equal dimensions and use_assertion can be set to assert that the row indices match.
[ "Computes", "the", "RMSD", "of", "two", "pandas", "dataframes", ".", "The", "dataframes", "are", "expected", "to", "be", "of", "equal", "dimensions", "and", "use_assertion", "can", "be", "set", "to", "assert", "that", "the", "row", "indices", "match", "." ]
python
train
genialis/resolwe
resolwe/flow/management/commands/collecttools.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L120-L132
def handle(self, **options): """Collect tools.""" self.set_options(**options) os.makedirs(self.destination_path, exist_ok=True) if self.interactive and any(os.listdir(self.destination_path)): self.get_confirmation() if self.clear: self.clear_dir() self.collect()
[ "def", "handle", "(", "self", ",", "*", "*", "options", ")", ":", "self", ".", "set_options", "(", "*", "*", "options", ")", "os", ".", "makedirs", "(", "self", ".", "destination_path", ",", "exist_ok", "=", "True", ")", "if", "self", ".", "interacti...
Collect tools.
[ "Collect", "tools", "." ]
python
train
bninja/pilo
pilo/fields.py
https://github.com/bninja/pilo/blob/32b7298a47e33fb7383103017b4f3b59ad76ea6f/pilo/fields.py#L481-L489
def _validate(self, value): """ Predicate used to determine if a computed value is valid, True, or not, False. """ if value is None and not self.nullable: self.ctx.errors.invalid('not nullable') return False return True
[ "def", "_validate", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", "and", "not", "self", ".", "nullable", ":", "self", ".", "ctx", ".", "errors", ".", "invalid", "(", "'not nullable'", ")", "return", "False", "return", "True" ]
Predicate used to determine if a computed value is valid, True, or not, False.
[ "Predicate", "used", "to", "determine", "if", "a", "computed", "value", "is", "valid", "True", "or", "not", "False", "." ]
python
train
mcuadros/pynats
pynats/connection.py
https://github.com/mcuadros/pynats/blob/afbf0766c5546f9b8e7b54ddc89abd2602883b6c/pynats/connection.py#L117-L132
def unsubscribe(self, subscription, max=None): """ Unsubscribe will remove interest in the given subject. If max is provided an automatic Unsubscribe that is processed by the server when max messages have been received Args: subscription (pynats.Subscription): a Subscription object max (int=None): number of messages """ if max is None: self._send('UNSUB %d' % subscription.sid) self._subscriptions.pop(subscription.sid) else: subscription.max = max self._send('UNSUB %d %s' % (subscription.sid, max))
[ "def", "unsubscribe", "(", "self", ",", "subscription", ",", "max", "=", "None", ")", ":", "if", "max", "is", "None", ":", "self", ".", "_send", "(", "'UNSUB %d'", "%", "subscription", ".", "sid", ")", "self", ".", "_subscriptions", ".", "pop", "(", ...
Unsubscribe will remove interest in the given subject. If max is provided an automatic Unsubscribe that is processed by the server when max messages have been received Args: subscription (pynats.Subscription): a Subscription object max (int=None): number of messages
[ "Unsubscribe", "will", "remove", "interest", "in", "the", "given", "subject", ".", "If", "max", "is", "provided", "an", "automatic", "Unsubscribe", "that", "is", "processed", "by", "the", "server", "when", "max", "messages", "have", "been", "received" ]
python
train
titusjan/argos
argos/utils/moduleinfo.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/moduleinfo.py#L136-L152
def tryImportModule(self, name): """ Imports the module and sets version information If the module cannot be imported, the version is set to empty values. """ self._name = name try: import importlib self._module = importlib.import_module(name) except ImportError: self._module = None self._version = '' self._packagePath = '' else: if self._versionAttribute: self._version = getattr(self._module, self._versionAttribute, '???') if self._pathAttribute: self._packagePath = getattr(self._module, self._pathAttribute, '???')
[ "def", "tryImportModule", "(", "self", ",", "name", ")", ":", "self", ".", "_name", "=", "name", "try", ":", "import", "importlib", "self", ".", "_module", "=", "importlib", ".", "import_module", "(", "name", ")", "except", "ImportError", ":", "self", "....
Imports the module and sets version information If the module cannot be imported, the version is set to empty values.
[ "Imports", "the", "module", "and", "sets", "version", "information", "If", "the", "module", "cannot", "be", "imported", "the", "version", "is", "set", "to", "empty", "values", "." ]
python
train
thespacedoctor/transientNamer
transientNamer/search.py
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L359-L430
def yaml( self, dirPath=None): """*Render the results in yaml format* **Key Arguments:** - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None* **Return:** - `yamlSources` -- the top-level transient data - `yamlPhot` -- all photometry associated with the transients - `yamlSpec` -- all spectral data associated with the transients - `yamlFiles` -- all files associated with the matched transients found on the tns **Usage:** To render the results in yaml format: .. code-block:: python yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml() print yamlSources .. code-block:: text - TNSId: 2016asf TNSName: SN2016asf decDeg: 31.1126 decSex: '+31:06:45.36' discDate: '2016-03-06 08:09:36' discMag: '17.1' discMagFilter: V-Johnson discSurvey: ASAS-SN discoveryName: ASASSN-16cs hostName: KUG 0647+311 hostRedshift: null objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf raDeg: 102.65304166666667 raSex: '06:50:36.73' separationArcsec: '0.66' separationEastArcsec: '-0.13' separationNorthArcsec: '0.65' specType: SN Ia transRedshift: '0.021' You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`. .. code-block:: python tns.yaml("~/tns") .. image:: https://i.imgur.com/ZpJIC6p.png :width: 800px :alt: yaml output """ if dirPath: p = self._file_prefix() yamlSources = self.sourceResults.yaml( filepath=dirPath + "/" + p + "sources.yaml") yamlPhot = self.photResults.yaml( filepath=dirPath + "/" + p + "phot.yaml") yamlSpec = self.specResults.yaml( filepath=dirPath + "/" + p + "spec.yaml") yamlFiles = self.relatedFilesResults.yaml( filepath=dirPath + "/" + p + "relatedFiles.yaml") else: yamlSources = self.sourceResults.yaml() yamlPhot = self.photResults.yaml() yamlSpec = self.specResults.yaml() yamlFiles = self.relatedFilesResults.yaml() return yamlSources, yamlPhot, yamlSpec, yamlFiles
[ "def", "yaml", "(", "self", ",", "dirPath", "=", "None", ")", ":", "if", "dirPath", ":", "p", "=", "self", ".", "_file_prefix", "(", ")", "yamlSources", "=", "self", ".", "sourceResults", ".", "yaml", "(", "filepath", "=", "dirPath", "+", "\"/\"", "+...
*Render the results in yaml format* **Key Arguments:** - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None* **Return:** - `yamlSources` -- the top-level transient data - `yamlPhot` -- all photometry associated with the transients - `yamlSpec` -- all spectral data associated with the transients - `yamlFiles` -- all files associated with the matched transients found on the tns **Usage:** To render the results in yaml format: .. code-block:: python yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml() print yamlSources .. code-block:: text - TNSId: 2016asf TNSName: SN2016asf decDeg: 31.1126 decSex: '+31:06:45.36' discDate: '2016-03-06 08:09:36' discMag: '17.1' discMagFilter: V-Johnson discSurvey: ASAS-SN discoveryName: ASASSN-16cs hostName: KUG 0647+311 hostRedshift: null objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf raDeg: 102.65304166666667 raSex: '06:50:36.73' separationArcsec: '0.66' separationEastArcsec: '-0.13' separationNorthArcsec: '0.65' specType: SN Ia transRedshift: '0.021' You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`. .. code-block:: python tns.yaml("~/tns") .. image:: https://i.imgur.com/ZpJIC6p.png :width: 800px :alt: yaml output
[ "*", "Render", "the", "results", "in", "yaml", "format", "*" ]
python
train
Crypto-toolbox/btfxwss
btfxwss/connection.py
https://github.com/Crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/connection.py#L99-L109
def disconnect(self): """Disconnects from the websocket connection and joins the Thread. :return: """ self.log.debug("disconnect(): Disconnecting from API..") self.reconnect_required.clear() self.disconnect_called.set() if self.socket: self.socket.close() self.join(timeout=1)
[ "def", "disconnect", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "\"disconnect(): Disconnecting from API..\"", ")", "self", ".", "reconnect_required", ".", "clear", "(", ")", "self", ".", "disconnect_called", ".", "set", "(", ")", "if", "se...
Disconnects from the websocket connection and joins the Thread. :return:
[ "Disconnects", "from", "the", "websocket", "connection", "and", "joins", "the", "Thread", "." ]
python
test
vertexproject/synapse
synapse/lib/lmdblayer.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/lmdblayer.py#L259-L295
async def _storBuidSet(self, oper): ''' Migration-only method Notes: Precondition: buid cache must be disabled ''' assert self.buidcache.disabled _, (form, oldb, newb) = oper fenc = form.encode() + b'\x00' pvoldval = s_msgpack.en((oldb,)) pvnewval = s_msgpack.en((newb,)) for lkey, lval in self.layrslab.scanByPref(oldb, db=self.bybuid): proputf8 = lkey[32:] valu, indx = s_msgpack.un(lval) if indx is not None: # <prop><00><indx> propindx = proputf8 + b'\x00' + indx if proputf8[0] in (46, 35): # ".univ" or "#tag" self.layrslab.put(propindx, pvnewval, dupdata=True, db=self.byuniv) self.layrslab.delete(propindx, pvoldval, db=self.byuniv) bypropkey = fenc + propindx self.layrslab.put(bypropkey, pvnewval, db=self.byprop) self.layrslab.delete(bypropkey, pvoldval, db=self.byprop) self.layrslab.put(newb + proputf8, lval, db=self.bybuid) self.layrslab.delete(lkey, db=self.bybuid)
[ "async", "def", "_storBuidSet", "(", "self", ",", "oper", ")", ":", "assert", "self", ".", "buidcache", ".", "disabled", "_", ",", "(", "form", ",", "oldb", ",", "newb", ")", "=", "oper", "fenc", "=", "form", ".", "encode", "(", ")", "+", "b'\\x00'...
Migration-only method Notes: Precondition: buid cache must be disabled
[ "Migration", "-", "only", "method" ]
python
train
lucasmaystre/choix
choix/lsr.py
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L20-L33
def _ilsr(fun, params, max_iter, tol): """Iteratively refine LSR estimates until convergence. Raises ------ RuntimeError If the algorithm does not converge after ``max_iter`` iterations. """ converged = NormOfDifferenceTest(tol, order=1) for _ in range(max_iter): params = fun(initial_params=params) if converged(params): return params raise RuntimeError("Did not converge after {} iterations".format(max_iter))
[ "def", "_ilsr", "(", "fun", ",", "params", ",", "max_iter", ",", "tol", ")", ":", "converged", "=", "NormOfDifferenceTest", "(", "tol", ",", "order", "=", "1", ")", "for", "_", "in", "range", "(", "max_iter", ")", ":", "params", "=", "fun", "(", "i...
Iteratively refine LSR estimates until convergence. Raises ------ RuntimeError If the algorithm does not converge after ``max_iter`` iterations.
[ "Iteratively", "refine", "LSR", "estimates", "until", "convergence", "." ]
python
train
ethereum/web3.py
web3/middleware/fixture.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/middleware/fixture.py#L35-L50
def construct_error_generator_middleware(error_generators): """ Constructs a middleware which intercepts requests for any method found in the provided mapping of endpoints to generator functions, returning whatever error message the generator function returns. Callbacks must be functions with the signature `fn(method, params)`. """ def error_generator_middleware(make_request, web3): def middleware(method, params): if method in error_generators: error_msg = error_generators[method](method, params) return {'error': error_msg} else: return make_request(method, params) return middleware return error_generator_middleware
[ "def", "construct_error_generator_middleware", "(", "error_generators", ")", ":", "def", "error_generator_middleware", "(", "make_request", ",", "web3", ")", ":", "def", "middleware", "(", "method", ",", "params", ")", ":", "if", "method", "in", "error_generators", ...
Constructs a middleware which intercepts requests for any method found in the provided mapping of endpoints to generator functions, returning whatever error message the generator function returns. Callbacks must be functions with the signature `fn(method, params)`.
[ "Constructs", "a", "middleware", "which", "intercepts", "requests", "for", "any", "method", "found", "in", "the", "provided", "mapping", "of", "endpoints", "to", "generator", "functions", "returning", "whatever", "error", "message", "the", "generator", "function", ...
python
train
trailofbits/manticore
manticore/platforms/evm.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L1564-L1570
def SLOAD(self, offset): """Load word from storage""" storage_address = self.address self._publish('will_evm_read_storage', storage_address, offset) value = self.world.get_storage_data(storage_address, offset) self._publish('did_evm_read_storage', storage_address, offset, value) return value
[ "def", "SLOAD", "(", "self", ",", "offset", ")", ":", "storage_address", "=", "self", ".", "address", "self", ".", "_publish", "(", "'will_evm_read_storage'", ",", "storage_address", ",", "offset", ")", "value", "=", "self", ".", "world", ".", "get_storage_d...
Load word from storage
[ "Load", "word", "from", "storage" ]
python
valid
cjdrake/pyeda
pyeda/boolalg/bfarray.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bfarray.py#L628-L634
def restrict(self, point): """Apply the ``restrict`` method to all functions. Returns a new farray. """ items = [f.restrict(point) for f in self._items] return self.__class__(items, self.shape, self.ftype)
[ "def", "restrict", "(", "self", ",", "point", ")", ":", "items", "=", "[", "f", ".", "restrict", "(", "point", ")", "for", "f", "in", "self", ".", "_items", "]", "return", "self", ".", "__class__", "(", "items", ",", "self", ".", "shape", ",", "s...
Apply the ``restrict`` method to all functions. Returns a new farray.
[ "Apply", "the", "restrict", "method", "to", "all", "functions", "." ]
python
train
lionheart/django-pyodbc
django_pyodbc/operations.py
https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L470-L483
def adapt_decimalfield_value(self, value, max_digits, decimal_places): """ Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns. """ if value is None: return None if isinstance(value, decimal.Decimal): context = decimal.getcontext().copy() context.prec = max_digits #context.rounding = ROUND_FLOOR return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)) else: return "%.*f" % (decimal_places + 1, value)
[ "def", "adapt_decimalfield_value", "(", "self", ",", "value", ",", "max_digits", ",", "decimal_places", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "decimal", ".", "Decimal", ")", ":", "context", "="...
Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns.
[ "Transform", "a", "decimal", ".", "Decimal", "value", "to", "an", "object", "compatible", "with", "what", "is", "expected", "by", "the", "backend", "driver", "for", "decimal", "(", "numeric", ")", "columns", "." ]
python
train
LonamiWebs/Telethon
telethon/network/authenticator.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/network/authenticator.py#L22-L169
async def do_authentication(sender): """ Executes the authentication process with the Telegram servers. :param sender: a connected `MTProtoPlainSender`. :return: returns a (authorization key, time offset) tuple. """ # Step 1 sending: PQ Request, endianness doesn't matter since it's random nonce = int.from_bytes(os.urandom(16), 'big', signed=True) res_pq = await sender.send(ReqPqMultiRequest(nonce)) assert isinstance(res_pq, ResPQ), 'Step 1 answer was %s' % res_pq if res_pq.nonce != nonce: raise SecurityError('Step 1 invalid nonce from server') pq = get_int(res_pq.pq) # Step 2 sending: DH Exchange p, q = Factorization.factorize(pq) p, q = rsa.get_byte_array(p), rsa.get_byte_array(q) new_nonce = int.from_bytes(os.urandom(32), 'little', signed=True) pq_inner_data = bytes(PQInnerData( pq=rsa.get_byte_array(pq), p=p, q=q, nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, new_nonce=new_nonce )) # sha_digest + data + random_bytes cipher_text, target_fingerprint = None, None for fingerprint in res_pq.server_public_key_fingerprints: cipher_text = rsa.encrypt(fingerprint, pq_inner_data) if cipher_text is not None: target_fingerprint = fingerprint break if cipher_text is None: raise SecurityError( 'Step 2 could not find a valid key for fingerprints: {}' .format(', '.join( [str(f) for f in res_pq.server_public_key_fingerprints]) ) ) server_dh_params = await sender.send(ReqDHParamsRequest( nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, p=p, q=q, public_key_fingerprint=target_fingerprint, encrypted_data=cipher_text )) assert isinstance( server_dh_params, (ServerDHParamsOk, ServerDHParamsFail)),\ 'Step 2.1 answer was %s' % server_dh_params if server_dh_params.nonce != res_pq.nonce: raise SecurityError('Step 2 invalid nonce from server') if server_dh_params.server_nonce != res_pq.server_nonce: raise SecurityError('Step 2 invalid server nonce from server') if isinstance(server_dh_params, ServerDHParamsFail): nnh = int.from_bytes( sha1(new_nonce.to_bytes(32, 'little', signed=True)).digest()[4:20], 'little', signed=True ) if server_dh_params.new_nonce_hash != nnh: raise SecurityError('Step 2 invalid DH fail nonce from server') assert isinstance(server_dh_params, ServerDHParamsOk),\ 'Step 2.2 answer was %s' % server_dh_params # Step 3 sending: Complete DH Exchange key, iv = helpers.generate_key_data_from_nonce( res_pq.server_nonce, new_nonce ) if len(server_dh_params.encrypted_answer) % 16 != 0: # See PR#453 raise SecurityError('Step 3 AES block size mismatch') plain_text_answer = AES.decrypt_ige( server_dh_params.encrypted_answer, key, iv ) with BinaryReader(plain_text_answer) as reader: reader.read(20) # hash sum server_dh_inner = reader.tgread_object() assert isinstance(server_dh_inner, ServerDHInnerData),\ 'Step 3 answer was %s' % server_dh_inner if server_dh_inner.nonce != res_pq.nonce: raise SecurityError('Step 3 Invalid nonce in encrypted answer') if server_dh_inner.server_nonce != res_pq.server_nonce: raise SecurityError('Step 3 Invalid server nonce in encrypted answer') dh_prime = get_int(server_dh_inner.dh_prime, signed=False) g_a = get_int(server_dh_inner.g_a, signed=False) time_offset = server_dh_inner.server_time - int(time.time()) b = get_int(os.urandom(256), signed=False) gb = pow(server_dh_inner.g, b, dh_prime) gab = pow(g_a, b, dh_prime) # Prepare client DH Inner Data client_dh_inner = bytes(ClientDHInnerData( nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, retry_id=0, # TODO Actual retry ID g_b=rsa.get_byte_array(gb) )) client_dh_inner_hashed = sha1(client_dh_inner).digest() + client_dh_inner # Encryption client_dh_encrypted = AES.encrypt_ige(client_dh_inner_hashed, key, iv) # Prepare Set client DH params dh_gen = await sender.send(SetClientDHParamsRequest( nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, encrypted_data=client_dh_encrypted, )) nonce_types = (DhGenOk, DhGenRetry, DhGenFail) assert isinstance(dh_gen, nonce_types), 'Step 3.1 answer was %s' % dh_gen name = dh_gen.__class__.__name__ if dh_gen.nonce != res_pq.nonce: raise SecurityError('Step 3 invalid {} nonce from server'.format(name)) if dh_gen.server_nonce != res_pq.server_nonce: raise SecurityError( 'Step 3 invalid {} server nonce from server'.format(name)) auth_key = AuthKey(rsa.get_byte_array(gab)) nonce_number = 1 + nonce_types.index(type(dh_gen)) new_nonce_hash = auth_key.calc_new_nonce_hash(new_nonce, nonce_number) dh_hash = getattr(dh_gen, 'new_nonce_hash{}'.format(nonce_number)) if dh_hash != new_nonce_hash: raise SecurityError('Step 3 invalid new nonce hash') if not isinstance(dh_gen, DhGenOk): raise AssertionError('Step 3.2 answer was %s' % dh_gen) return auth_key, time_offset
[ "async", "def", "do_authentication", "(", "sender", ")", ":", "# Step 1 sending: PQ Request, endianness doesn't matter since it's random", "nonce", "=", "int", ".", "from_bytes", "(", "os", ".", "urandom", "(", "16", ")", ",", "'big'", ",", "signed", "=", "True", ...
Executes the authentication process with the Telegram servers. :param sender: a connected `MTProtoPlainSender`. :return: returns a (authorization key, time offset) tuple.
[ "Executes", "the", "authentication", "process", "with", "the", "Telegram", "servers", "." ]
python
train
wbond/oscrypto
oscrypto/_osx/_core_foundation_cffi.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/_core_foundation_cffi.py#L284-L297
def cf_data_to_bytes(value): """ Extracts a bytestring from a CFData object :param value: A CFData object :return: A byte string """ start = CoreFoundation.CFDataGetBytePtr(value) num_bytes = CoreFoundation.CFDataGetLength(value) return ffi.buffer(start, num_bytes)[:]
[ "def", "cf_data_to_bytes", "(", "value", ")", ":", "start", "=", "CoreFoundation", ".", "CFDataGetBytePtr", "(", "value", ")", "num_bytes", "=", "CoreFoundation", ".", "CFDataGetLength", "(", "value", ")", "return", "ffi", ".", "buffer", "(", "start", ",", "...
Extracts a bytestring from a CFData object :param value: A CFData object :return: A byte string
[ "Extracts", "a", "bytestring", "from", "a", "CFData", "object" ]
python
valid
pywbem/pywbem
pywbem_mock/_mockmofwbemconnection.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_mockmofwbemconnection.py#L147-L158
def _get_class(self, superclass, namespace=None, local_only=False, include_qualifiers=True, include_classorigin=True): """ This method is just rename of GetClass to support same method with both MOFWBEMConnection and FakedWBEMConnection """ return self.GetClass(superclass, namespace=namespace, local_only=local_only, include_qualifiers=include_qualifiers, include_classorigin=include_classorigin)
[ "def", "_get_class", "(", "self", ",", "superclass", ",", "namespace", "=", "None", ",", "local_only", "=", "False", ",", "include_qualifiers", "=", "True", ",", "include_classorigin", "=", "True", ")", ":", "return", "self", ".", "GetClass", "(", "superclas...
This method is just rename of GetClass to support same method with both MOFWBEMConnection and FakedWBEMConnection
[ "This", "method", "is", "just", "rename", "of", "GetClass", "to", "support", "same", "method", "with", "both", "MOFWBEMConnection", "and", "FakedWBEMConnection" ]
python
train
LCAV/pylocus
pylocus/opt_space.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/opt_space.py#L124-L144
def svds_descending(M, k): ''' In contrast to MATLAB, numpy's svds() arranges the singular values in ascending order. In order to have matching codes, we wrap it around by a function which re-sorts the singular values and singular vectors. Args: M: 2D numpy array; the matrix whose SVD is to be computed. k: Number of singular values to be computed. Returns: u, s, vt = svds(M, k=k) ''' u, s, vt = svds(M, k=k) # reverse columns of u u = u[:, ::-1] # reverse s s = s[::-1] # reverse rows of vt vt = vt[::-1, :] return u, np.diag(s), vt.T
[ "def", "svds_descending", "(", "M", ",", "k", ")", ":", "u", ",", "s", ",", "vt", "=", "svds", "(", "M", ",", "k", "=", "k", ")", "# reverse columns of u\r", "u", "=", "u", "[", ":", ",", ":", ":", "-", "1", "]", "# reverse s\r", "s", "=", "s...
In contrast to MATLAB, numpy's svds() arranges the singular values in ascending order. In order to have matching codes, we wrap it around by a function which re-sorts the singular values and singular vectors. Args: M: 2D numpy array; the matrix whose SVD is to be computed. k: Number of singular values to be computed. Returns: u, s, vt = svds(M, k=k)
[ "In", "contrast", "to", "MATLAB", "numpy", "s", "svds", "()", "arranges", "the", "singular", "values", "in", "ascending", "order", ".", "In", "order", "to", "have", "matching", "codes", "we", "wrap", "it", "around", "by", "a", "function", "which", "re", ...
python
train
jonhadfield/creds
lib/creds/utils.py
https://github.com/jonhadfield/creds/blob/b2053b43516cf742c6e4c2b79713bc625592f47c/lib/creds/utils.py#L121-L157
def write_sudoers_entry(username=None, sudoers_entry=None): """Write sudoers entry. args: user (User): Instance of User containing sudoers entry. returns: str: sudoers entry for the specified user. """ sudoers_path = '/etc/sudoers' rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars) execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path)))) execute_command( shlex.split(str('{0} chmod 777 {1}'.format(sudo_check(), tmp_sudoers_path)))) with open(tmp_sudoers_path, mode=text_type('r')) as tmp_sudoers_file: sudoers_entries = tmp_sudoers_file.readlines() sudoers_output = list() for entry in sudoers_entries: if entry and not entry.startswith(username): sudoers_output.append(entry) if sudoers_entry: sudoers_output.append('{0} {1}'.format(username, sudoers_entry)) sudoers_output.append('\n') with open(tmp_sudoers_path, mode=text_type('w+')) as tmp_sudoers_file: tmp_sudoers_file.writelines(sudoers_output) sudoers_check_result = execute_command( shlex.split(str('{0} {1} -cf {2}'.format(sudo_check(), LINUX_CMD_VISUDO, tmp_sudoers_path)))) if sudoers_check_result[1] > 0: raise ValueError(sudoers_check_result[0][1]) execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_sudoers_path, sudoers_path)))) execute_command(shlex.split(str('{0} chown root:root {1}'.format(sudo_check(), sudoers_path)))) execute_command(shlex.split(str('{0} chmod 440 {1}'.format(sudo_check(), sudoers_path)))) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path))))
[ "def", "write_sudoers_entry", "(", "username", "=", "None", ",", "sudoers_entry", "=", "None", ")", ":", "sudoers_path", "=", "'/etc/sudoers'", "rnd_chars", "=", "random_string", "(", "length", "=", "RANDOM_FILE_EXT_LENGTH", ")", "tmp_sudoers_path", "=", "'/tmp/sudo...
Write sudoers entry. args: user (User): Instance of User containing sudoers entry. returns: str: sudoers entry for the specified user.
[ "Write", "sudoers", "entry", "." ]
python
train
kylef/refract.py
refract/elements/base.py
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/base.py#L206-L215
def recursive_children(self): """ Generator returning all recursive children elements. """ for child in self.children: yield child for recursive_child in child.recursive_children: yield recursive_child
[ "def", "recursive_children", "(", "self", ")", ":", "for", "child", "in", "self", ".", "children", ":", "yield", "child", "for", "recursive_child", "in", "child", ".", "recursive_children", ":", "yield", "recursive_child" ]
Generator returning all recursive children elements.
[ "Generator", "returning", "all", "recursive", "children", "elements", "." ]
python
train
SiLab-Bonn/pyBAR_fei4_interpreter
pybar_fei4_interpreter/data_struct.py
https://github.com/SiLab-Bonn/pyBAR_fei4_interpreter/blob/0f8df18557598d6db0c64baa708e587c84bb787b/pybar_fei4_interpreter/data_struct.py#L27-L45
def generate_scan_parameter_description(scan_parameters): '''Generate scan parameter dictionary. This is the only way to dynamically create table with dictionary, cannot be done with tables.IsDescription Parameters ---------- scan_parameters : list, tuple List of scan parameters names (strings). Returns ------- table_description : dict Table description. Usage ----- pytables.createTable(self.raw_data_file_h5.root, name = 'scan_parameters', description = generate_scan_parameter_description(['PlsrDAC']), title = 'scan_parameters', filters = filter_tables) ''' table_description = np.dtype([(key, tb.Int32Col(pos=idx)) for idx, key in enumerate(scan_parameters)]) return table_description
[ "def", "generate_scan_parameter_description", "(", "scan_parameters", ")", ":", "table_description", "=", "np", ".", "dtype", "(", "[", "(", "key", ",", "tb", ".", "Int32Col", "(", "pos", "=", "idx", ")", ")", "for", "idx", ",", "key", "in", "enumerate", ...
Generate scan parameter dictionary. This is the only way to dynamically create table with dictionary, cannot be done with tables.IsDescription Parameters ---------- scan_parameters : list, tuple List of scan parameters names (strings). Returns ------- table_description : dict Table description. Usage ----- pytables.createTable(self.raw_data_file_h5.root, name = 'scan_parameters', description = generate_scan_parameter_description(['PlsrDAC']), title = 'scan_parameters', filters = filter_tables)
[ "Generate", "scan", "parameter", "dictionary", ".", "This", "is", "the", "only", "way", "to", "dynamically", "create", "table", "with", "dictionary", "cannot", "be", "done", "with", "tables", ".", "IsDescription" ]
python
train
cpenv/cpenv
cpenv/cli.py
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/cli.py#L438-L483
def remove(name, local): '''Remove a module named NAME. Will remove the first resolved module named NAME. You can also specify a full path to a module. Use the --local option to ensure removal of modules local to the currently active environment.''' click.echo() if not local: # Use resolver to find module try: r = cpenv.resolve(name) except cpenv.ResolveError as e: click.echo(e) return obj = r.resolved[0] else: # Try to find module in active environment env = cpenv.get_active_env() if not env: click.echo('You must activate an env to remove local modules') return mod = env.get_module(name) if not mod: click.echo('Failed to resolve module: ' + name) return obj = mod if isinstance(obj, cpenv.VirtualEnvironment): click.echo('{} is an environment. Use `cpenv remove` instead.') return click.echo(format_objects([obj])) click.echo() user_confirmed = click.confirm( red('Are you sure you want to remove this module?') ) if user_confirmed: click.echo('Attempting to remove...', nl=False) try: obj.remove() except Exception as e: click.echo(bold_red('FAILED')) click.echo(e) else: click.echo(bold_green('OK!'))
[ "def", "remove", "(", "name", ",", "local", ")", ":", "click", ".", "echo", "(", ")", "if", "not", "local", ":", "# Use resolver to find module", "try", ":", "r", "=", "cpenv", ".", "resolve", "(", "name", ")", "except", "cpenv", ".", "ResolveError", "...
Remove a module named NAME. Will remove the first resolved module named NAME. You can also specify a full path to a module. Use the --local option to ensure removal of modules local to the currently active environment.
[ "Remove", "a", "module", "named", "NAME", ".", "Will", "remove", "the", "first", "resolved", "module", "named", "NAME", ".", "You", "can", "also", "specify", "a", "full", "path", "to", "a", "module", ".", "Use", "the", "--", "local", "option", "to", "e...
python
valid
jtwhite79/pyemu
pyemu/la.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/la.py#L1013-L1030
def adjust_obscov_resfile(self, resfile=None): """reset the elements of obscov by scaling the implied weights based on the phi components in res_file so that the total phi is equal to the number of non-zero weights. Parameters ---------- resfile : str residual file to use. If None, residual file with case name is sought. default is None Note ---- calls pyemu.Pst.adjust_weights_resfile() """ self.pst.adjust_weights_resfile(resfile) self.__obscov.from_observation_data(self.pst)
[ "def", "adjust_obscov_resfile", "(", "self", ",", "resfile", "=", "None", ")", ":", "self", ".", "pst", ".", "adjust_weights_resfile", "(", "resfile", ")", "self", ".", "__obscov", ".", "from_observation_data", "(", "self", ".", "pst", ")" ]
reset the elements of obscov by scaling the implied weights based on the phi components in res_file so that the total phi is equal to the number of non-zero weights. Parameters ---------- resfile : str residual file to use. If None, residual file with case name is sought. default is None Note ---- calls pyemu.Pst.adjust_weights_resfile()
[ "reset", "the", "elements", "of", "obscov", "by", "scaling", "the", "implied", "weights", "based", "on", "the", "phi", "components", "in", "res_file", "so", "that", "the", "total", "phi", "is", "equal", "to", "the", "number", "of", "non", "-", "zero", "w...
python
train
infothrill/python-launchd
launchd/cmd.py
https://github.com/infothrill/python-launchd/blob/2cd50579e808851b116f5a26f9b871a32b65ce0e/launchd/cmd.py#L8-L27
def launchctl(subcommand, *args): ''' A minimal wrapper to call the launchctl binary and capture the output :param subcommand: string ''' if not isinstance(subcommand, six.string_types): raise ValueError("Argument is invalid: %r" % repr(subcommand)) if isinstance(subcommand, six.text_type): subcommand = subcommand.encode('utf-8') cmd = ["launchctl", subcommand] for arg in args: if isinstance(arg, six.string_types): if isinstance(arg, six.text_type): cmd.append(arg.encode('utf-8')) else: cmd.append(arg) else: raise ValueError("Argument is invalid: %r" % repr(arg)) return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, shell=False)
[ "def", "launchctl", "(", "subcommand", ",", "*", "args", ")", ":", "if", "not", "isinstance", "(", "subcommand", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "\"Argument is invalid: %r\"", "%", "repr", "(", "subcommand", ")", ")", ...
A minimal wrapper to call the launchctl binary and capture the output :param subcommand: string
[ "A", "minimal", "wrapper", "to", "call", "the", "launchctl", "binary", "and", "capture", "the", "output", ":", "param", "subcommand", ":", "string" ]
python
train
penguinmenac3/starttf
starttf/data/autorecords.py
https://github.com/penguinmenac3/starttf/blob/f4086489d169757c0504e822165db2fea534b944/starttf/data/autorecords.py#L170-L208
def _read_data_legacy(prefix, batch_size): """ Loads a tf record as tensors you can use. :param prefix: The path prefix as defined in the write data method. :param batch_size: The batch size you want for the tensors. :return: A feature tensor dict and a label tensor dict. """ prefix = prefix.replace("\\", "/") folder = "/".join(prefix.split("/")[:-1]) phase = prefix.split("/")[-1] config = json.load(open(prefix + '_config.json')) num_threads = config["num_threads"] filenames = [folder + "/" + f for f in listdir(folder) if isfile(join(folder, f)) and phase in f and not "config.json" in f] # Create a tf object for the filename list and the readers. filename_queue = tf.train.string_input_producer(filenames) readers = [_read_tf_record(filename_queue, config) for _ in range(num_threads)] batch_dict = tf.train.shuffle_batch_join( readers, batch_size=batch_size, capacity=10 * batch_size, min_after_dequeue=5 * batch_size ) # Add batch dimension to feature and label shape feature_batch = {} label_batch = {} for k in batch_dict.keys(): shape = tuple([batch_size] + list(config[k]["shape"])) tensor = tf.reshape(batch_dict[k], shape, name="input/"+phase+"/" + k + "_reshape") if "feature_" in k: feature_batch["_".join(k.split("_")[1:])] = tensor if "label_" in k: label_batch["_".join(k.split("_")[1:])] = tensor return feature_batch, label_batch
[ "def", "_read_data_legacy", "(", "prefix", ",", "batch_size", ")", ":", "prefix", "=", "prefix", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "folder", "=", "\"/\"", ".", "join", "(", "prefix", ".", "split", "(", "\"/\"", ")", "[", ":", "-", "...
Loads a tf record as tensors you can use. :param prefix: The path prefix as defined in the write data method. :param batch_size: The batch size you want for the tensors. :return: A feature tensor dict and a label tensor dict.
[ "Loads", "a", "tf", "record", "as", "tensors", "you", "can", "use", ".", ":", "param", "prefix", ":", "The", "path", "prefix", "as", "defined", "in", "the", "write", "data", "method", ".", ":", "param", "batch_size", ":", "The", "batch", "size", "you",...
python
train
Unidata/MetPy
metpy/calc/kinematics.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/kinematics.py#L447-L480
def ageostrophic_wind(heights, f, dx, dy, u, v, dim_order='yx'): r"""Calculate the ageostrophic wind given from the heights or geopotential. Parameters ---------- heights : (M, N) ndarray The height field. f : array_like The coriolis parameter. This can be a scalar to be applied everywhere or an array of values. dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. u : (M, N) ndarray The u wind field. v : (M, N) ndarray The u wind field. Returns ------- A 2-item tuple of arrays A tuple of the u-component and v-component of the ageostrophic wind. Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ u_geostrophic, v_geostrophic = geostrophic_wind(heights, f, dx, dy, dim_order=dim_order) return u - u_geostrophic, v - v_geostrophic
[ "def", "ageostrophic_wind", "(", "heights", ",", "f", ",", "dx", ",", "dy", ",", "u", ",", "v", ",", "dim_order", "=", "'yx'", ")", ":", "u_geostrophic", ",", "v_geostrophic", "=", "geostrophic_wind", "(", "heights", ",", "f", ",", "dx", ",", "dy", "...
r"""Calculate the ageostrophic wind given from the heights or geopotential. Parameters ---------- heights : (M, N) ndarray The height field. f : array_like The coriolis parameter. This can be a scalar to be applied everywhere or an array of values. dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. u : (M, N) ndarray The u wind field. v : (M, N) ndarray The u wind field. Returns ------- A 2-item tuple of arrays A tuple of the u-component and v-component of the ageostrophic wind. Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
[ "r", "Calculate", "the", "ageostrophic", "wind", "given", "from", "the", "heights", "or", "geopotential", "." ]
python
train
jason-weirather/py-seq-tools
seqtools/structure/transcript/__init__.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/structure/transcript/__init__.py#L588-L590
def get_range_string(self): """Another string representation of the junction. these may be redundant.""" return self.left.chr+":"+str(self.left.end)+'/'+self.right.chr+":"+str(self.right.start)
[ "def", "get_range_string", "(", "self", ")", ":", "return", "self", ".", "left", ".", "chr", "+", "\":\"", "+", "str", "(", "self", ".", "left", ".", "end", ")", "+", "'/'", "+", "self", ".", "right", ".", "chr", "+", "\":\"", "+", "str", "(", ...
Another string representation of the junction. these may be redundant.
[ "Another", "string", "representation", "of", "the", "junction", ".", "these", "may", "be", "redundant", "." ]
python
train
EpistasisLab/scikit-rebate
skrebate/relieff.py
https://github.com/EpistasisLab/scikit-rebate/blob/67dab51a7525fa5d076b059f1e6f8cff7481c1ef/skrebate/relieff.py#L277-L299
def _get_attribute_info(self): """ Preprocess the training dataset to identify which features/attributes are discrete vs. continuous valued. Ignores missing values in this determination.""" attr = dict() d = 0 limit = self.discrete_threshold w = self._X.transpose() for idx in range(len(w)): h = self._headers[idx] z = w[idx] if self._missing_data_count > 0: z = z[np.logical_not(np.isnan(z))] # Exclude any missing values from consideration zlen = len(np.unique(z)) if zlen <= limit: attr[h] = ('discrete', 0, 0, 0, 0) d += 1 else: mx = np.max(z) mn = np.min(z) sd = np.std(z) attr[h] = ('continuous', mx, mn, mx - mn, sd) # For each feature/attribute we store (type, max value, min value, max min difference, average, standard deviation) - the latter three values are set to zero if feature is discrete. return attr
[ "def", "_get_attribute_info", "(", "self", ")", ":", "attr", "=", "dict", "(", ")", "d", "=", "0", "limit", "=", "self", ".", "discrete_threshold", "w", "=", "self", ".", "_X", ".", "transpose", "(", ")", "for", "idx", "in", "range", "(", "len", "(...
Preprocess the training dataset to identify which features/attributes are discrete vs. continuous valued. Ignores missing values in this determination.
[ "Preprocess", "the", "training", "dataset", "to", "identify", "which", "features", "/", "attributes", "are", "discrete", "vs", ".", "continuous", "valued", ".", "Ignores", "missing", "values", "in", "this", "determination", "." ]
python
train
ActivisionGameScience/assertpy
assertpy/assertpy.py
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L927-L932
def is_directory(self): """Asserts that val is an existing path to a directory.""" self.exists() if not os.path.isdir(self.val): self._err('Expected <%s> to be a directory, but was not.' % self.val) return self
[ "def", "is_directory", "(", "self", ")", ":", "self", ".", "exists", "(", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "val", ")", ":", "self", ".", "_err", "(", "'Expected <%s> to be a directory, but was not.'", "%", "self", ".",...
Asserts that val is an existing path to a directory.
[ "Asserts", "that", "val", "is", "an", "existing", "path", "to", "a", "directory", "." ]
python
valid
praekelt/jmbo-competition
competition/admin.py
https://github.com/praekelt/jmbo-competition/blob/7efdc6d2d57077229108e7eb2ae99f87c32210ee/competition/admin.py#L152-L163
def get_urls(self): """ Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model """ urls = super(CompetitionEntryAdmin, self).get_urls() csv_urls = patterns('', url( r'^exportcsv/$', self.admin_site.admin_view(self.csv_export), name='competition-csv-export' ) ) return csv_urls + urls
[ "def", "get_urls", "(", "self", ")", ":", "urls", "=", "super", "(", "CompetitionEntryAdmin", ",", "self", ")", ".", "get_urls", "(", ")", "csv_urls", "=", "patterns", "(", "''", ",", "url", "(", "r'^exportcsv/$'", ",", "self", ".", "admin_site", ".", ...
Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model
[ "Extend", "the", "admin", "urls", "for", "the", "CompetitionEntryAdmin", "model", "to", "be", "able", "to", "invoke", "a", "CSV", "export", "view", "on", "the", "admin", "model" ]
python
train
digidotcom/python-suitcase
suitcase/fields.py
https://github.com/digidotcom/python-suitcase/blob/b53681a33efd350daf1b63094b1d21587e45a806/suitcase/fields.py#L87-L91
def _ph2f(self, placeholder): """Lookup a field given a field placeholder""" if issubclass(placeholder.cls, FieldAccessor): return placeholder.cls.access(self._parent, placeholder) return self._parent.lookup_field_by_placeholder(placeholder)
[ "def", "_ph2f", "(", "self", ",", "placeholder", ")", ":", "if", "issubclass", "(", "placeholder", ".", "cls", ",", "FieldAccessor", ")", ":", "return", "placeholder", ".", "cls", ".", "access", "(", "self", ".", "_parent", ",", "placeholder", ")", "retu...
Lookup a field given a field placeholder
[ "Lookup", "a", "field", "given", "a", "field", "placeholder" ]
python
train
miguelgrinberg/Flask-Migrate
flask_migrate/templates/flask-multidb/env.py
https://github.com/miguelgrinberg/Flask-Migrate/blob/65fbd978681bdf2eddf8940edd04ed7272a94480/flask_migrate/templates/flask-multidb/env.py#L94-L169
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # this callback is used to prevent an auto-migration from being generated # when there are no changes to the schema # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html def process_revision_directives(context, revision, directives): if getattr(config.cmd_opts, 'autogenerate', False): script = directives[0] if len(script.upgrade_ops_list) >= len(bind_names) + 1: empty = True for upgrade_ops in script.upgrade_ops_list: if not upgrade_ops.is_empty(): empty = False if empty: directives[:] = [] logger.info('No changes in schema detected.') # for the direct-to-DB use case, start a transaction on all # engines, then run all migrations, then commit all transactions. engines = { '': { 'engine': engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool, ) } } for name in bind_names: engines[name] = rec = {} rec['engine'] = engine_from_config( context.config.get_section(name), prefix='sqlalchemy.', poolclass=pool.NullPool) for name, rec in engines.items(): engine = rec['engine'] rec['connection'] = conn = engine.connect() if USE_TWOPHASE: rec['transaction'] = conn.begin_twophase() else: rec['transaction'] = conn.begin() try: for name, rec in engines.items(): logger.info("Migrating database %s" % (name or '<default>')) context.configure( connection=rec['connection'], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=get_metadata(name), process_revision_directives=process_revision_directives, **current_app.extensions['migrate'].configure_args ) context.run_migrations(engine_name=name) if USE_TWOPHASE: for rec in engines.values(): rec['transaction'].prepare() for rec in engines.values(): rec['transaction'].commit() except: for rec in engines.values(): rec['transaction'].rollback() raise finally: for rec in engines.values(): rec['connection'].close()
[ "def", "run_migrations_online", "(", ")", ":", "# this callback is used to prevent an auto-migration from being generated", "# when there are no changes to the schema", "# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html", "def", "process_revision_directives", "(", "context",...
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
[ "Run", "migrations", "in", "online", "mode", "." ]
python
train
senseobservationsystems/commonsense-python-lib
senseapi.py
https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L1668-L1681
def GroupsSensorsGet(self, group_id, parameters): """ Retrieve sensors shared within the group. @param group_id (int) - Id of the group to retrieve sensors from @param parameters (dictionary) - Additional parameters for the call @return (bool) - Boolean indicating whether GroupsSensorsGet was successful """ if self.__SenseApiCall("/groups/{0}/sensors.json".format(group_id), "GET", parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
[ "def", "GroupsSensorsGet", "(", "self", ",", "group_id", ",", "parameters", ")", ":", "if", "self", ".", "__SenseApiCall", "(", "\"/groups/{0}/sensors.json\"", ".", "format", "(", "group_id", ")", ",", "\"GET\"", ",", "parameters", "=", "parameters", ")", ":",...
Retrieve sensors shared within the group. @param group_id (int) - Id of the group to retrieve sensors from @param parameters (dictionary) - Additional parameters for the call @return (bool) - Boolean indicating whether GroupsSensorsGet was successful
[ "Retrieve", "sensors", "shared", "within", "the", "group", "." ]
python
train
StanfordVL/robosuite
robosuite/models/tasks/pick_place_task.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/tasks/pick_place_task.py#L45-L51
def merge_arena(self, mujoco_arena): """Adds arena model to the MJCF model.""" self.arena = mujoco_arena self.bin_offset = mujoco_arena.bin_abs self.bin_size = mujoco_arena.table_full_size self.bin2_body = mujoco_arena.bin2_body self.merge(mujoco_arena)
[ "def", "merge_arena", "(", "self", ",", "mujoco_arena", ")", ":", "self", ".", "arena", "=", "mujoco_arena", "self", ".", "bin_offset", "=", "mujoco_arena", ".", "bin_abs", "self", ".", "bin_size", "=", "mujoco_arena", ".", "table_full_size", "self", ".", "b...
Adds arena model to the MJCF model.
[ "Adds", "arena", "model", "to", "the", "MJCF", "model", "." ]
python
train
mitsei/dlkit
dlkit/handcar/relationship/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/relationship/objects.py#L308-L327
def get_next_family(self): """Gets the next ``Family`` in this list. return: (osid.relationship.Family) - the next ``Family`` in this list. The ``has_next()`` method should be used to test that a next ``Family`` is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ try: next_object = next(self) except StopIteration: raise IllegalState('no more elements available in this list') except Exception: # Need to specify exceptions here! raise OperationFailed() else: return next_object
[ "def", "get_next_family", "(", "self", ")", ":", "try", ":", "next_object", "=", "next", "(", "self", ")", "except", "StopIteration", ":", "raise", "IllegalState", "(", "'no more elements available in this list'", ")", "except", "Exception", ":", "# Need to specify ...
Gets the next ``Family`` in this list. return: (osid.relationship.Family) - the next ``Family`` in this list. The ``has_next()`` method should be used to test that a next ``Family`` is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "next", "Family", "in", "this", "list", "." ]
python
train
bachya/pypollencom
pypollencom/decorators.py
https://github.com/bachya/pypollencom/blob/d1616a8471b350953d4f99f5a1dddca035977366/pypollencom/decorators.py#L7-L16
def raise_on_invalid_zip(func: Callable) -> Callable: """Raise an exception when there's no data (via a bad ZIP code).""" async def decorator(*args: list, **kwargs: dict) -> dict: """Decorate.""" data = await func(*args, **kwargs) if not data['Location']['periods']: raise InvalidZipError('No data returned for ZIP code') return data return decorator
[ "def", "raise_on_invalid_zip", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "async", "def", "decorator", "(", "*", "args", ":", "list", ",", "*", "*", "kwargs", ":", "dict", ")", "->", "dict", ":", "\"\"\"Decorate.\"\"\"", "data", "=", "awai...
Raise an exception when there's no data (via a bad ZIP code).
[ "Raise", "an", "exception", "when", "there", "s", "no", "data", "(", "via", "a", "bad", "ZIP", "code", ")", "." ]
python
train