repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
pyQode/pyqode.core
pyqode/core/api/client.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/api/client.py#L334-L340
def _on_ready_read(self): """ Read bytes when ready read """ while self.bytesAvailable(): if not self._header_complete: self._read_header() else: self._read_payload()
[ "def", "_on_ready_read", "(", "self", ")", ":", "while", "self", ".", "bytesAvailable", "(", ")", ":", "if", "not", "self", ".", "_header_complete", ":", "self", ".", "_read_header", "(", ")", "else", ":", "self", ".", "_read_payload", "(", ")" ]
Read bytes when ready read
[ "Read", "bytes", "when", "ready", "read" ]
python
train
common-workflow-language/cwltool
cwltool/singularity.py
https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/singularity.py#L133-L152
def get_from_requirements(self, r, # type: Dict[Text, Text] pull_image, # type: bool force_pull=False, # type: bool tmp_outdir_prefix=None # type: Text ): # type: (...) -> Optional[Text] """ Returns the filename of the Singularity image (e.g. hello-world-latest.img). """ if not bool(spawn.find_executable('singularity')): raise WorkflowException('singularity executable is not available') if not self.get_image(r, pull_image, force_pull): raise WorkflowException(u"Container image {} not " "found".format(r["dockerImageId"])) return os.path.abspath(r["dockerImageId"])
[ "def", "get_from_requirements", "(", "self", ",", "r", ",", "# type: Dict[Text, Text]", "pull_image", ",", "# type: bool", "force_pull", "=", "False", ",", "# type: bool", "tmp_outdir_prefix", "=", "None", "# type: Text", ")", ":", "# type: (...) -> Optional[Text]", "if...
Returns the filename of the Singularity image (e.g. hello-world-latest.img).
[ "Returns", "the", "filename", "of", "the", "Singularity", "image", "(", "e", ".", "g", ".", "hello", "-", "world", "-", "latest", ".", "img", ")", "." ]
python
train
5j9/wikitextparser
wikitextparser/_template.py
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L287-L292
def get_arg(self, name: str) -> Optional[Argument]: """Return the last argument with the given name. Return None if no argument with that name is found. """ return get_arg(name, reversed(self.arguments))
[ "def", "get_arg", "(", "self", ",", "name", ":", "str", ")", "->", "Optional", "[", "Argument", "]", ":", "return", "get_arg", "(", "name", ",", "reversed", "(", "self", ".", "arguments", ")", ")" ]
Return the last argument with the given name. Return None if no argument with that name is found.
[ "Return", "the", "last", "argument", "with", "the", "given", "name", "." ]
python
test
BenDoan/perform
perform.py
https://github.com/BenDoan/perform/blob/3434c5c68fb7661d74f03404c71bb5fbebe1900f/perform.py#L79-L126
def _run_program(name, *args, **kwargs): """Runs program name with the arguments of *args :param shell: if true, runs the command in the shell :type shell: bool :param return_object: if true, returns a CommandOutput object :type return_object: bool :param ro: same as return_object :type ro: bool :param no_return: doesn't return results, allowing for non-blocking calls :type no_return: bool :param nr: same as no_return :type nr: bool :param input: specifies a string to send to the process :type input: str :returns: if return_object the output as a CommandOutput object, if no_return nothing, else the stdout of the program :rtype: CommandOutput or str or None """ shell = kwargs.get("shell", False) return_object = kwargs.get("ro", False) return_object = kwargs.get("return_object", return_object) no_return = kwargs.get("nr", False) no_return = kwargs.get("no_return", no_return) inp = kwargs.get("input", None) args = [name] + list(args) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell) if not no_return: if inp: stdout, stderr = tuple(x.decode(sys.getdefaultencoding()).strip() for x in p.communicate(inp)) else: stdout, stderr = tuple(x.decode(sys.getdefaultencoding()).strip() for x in p.communicate()) if return_object: return CommandOutput(stdout, stderr, p.returncode) else: return stdout
[ "def", "_run_program", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "shell", "=", "kwargs", ".", "get", "(", "\"shell\"", ",", "False", ")", "return_object", "=", "kwargs", ".", "get", "(", "\"ro\"", ",", "False", ")", "return_ob...
Runs program name with the arguments of *args :param shell: if true, runs the command in the shell :type shell: bool :param return_object: if true, returns a CommandOutput object :type return_object: bool :param ro: same as return_object :type ro: bool :param no_return: doesn't return results, allowing for non-blocking calls :type no_return: bool :param nr: same as no_return :type nr: bool :param input: specifies a string to send to the process :type input: str :returns: if return_object the output as a CommandOutput object, if no_return nothing, else the stdout of the program :rtype: CommandOutput or str or None
[ "Runs", "program", "name", "with", "the", "arguments", "of", "*", "args" ]
python
train
inspirehep/refextract
refextract/references/engine.py
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L171-L184
def format_report_number(citation_elements): """Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01 """ re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE) for el in citation_elements: if el['type'] == 'REPORTNUMBER': m = re_report.match(el['report_num']) if m: name = m.group('name') if not name.endswith('-'): el['report_num'] = m.group('name') + '-' + m.group('nums') return citation_elements
[ "def", "format_report_number", "(", "citation_elements", ")", ":", "re_report", "=", "re", ".", "compile", "(", "ur'^(?P<name>[A-Z-]+)(?P<nums>[\\d-]+)$'", ",", "re", ".", "UNICODE", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]...
Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01
[ "Format", "report", "numbers", "that", "are", "missing", "a", "dash" ]
python
train
ungarj/mapchete
mapchete/config.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/config.py#L235-L245
def init_bounds(self): """ Process bounds this process is currently initialized with. This gets triggered by using the ``init_bounds`` kwarg. If not set, it will be equal to self.bounds. """ if self._raw["init_bounds"] is None: return self.bounds else: return Bounds(*_validate_bounds(self._raw["init_bounds"]))
[ "def", "init_bounds", "(", "self", ")", ":", "if", "self", ".", "_raw", "[", "\"init_bounds\"", "]", "is", "None", ":", "return", "self", ".", "bounds", "else", ":", "return", "Bounds", "(", "*", "_validate_bounds", "(", "self", ".", "_raw", "[", "\"in...
Process bounds this process is currently initialized with. This gets triggered by using the ``init_bounds`` kwarg. If not set, it will be equal to self.bounds.
[ "Process", "bounds", "this", "process", "is", "currently", "initialized", "with", "." ]
python
valid
MonashBI/arcana
arcana/study/base.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L699-L718
def spec(self, name): """ Returns either the input corresponding to a fileset or field field spec or a spec or parameter that has either been passed to the study as an input or can be derived. Parameters ---------- name : Str | BaseData | Parameter A parameter, fileset or field or name of one """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, (BaseData, Parameter)): name = name.name # If name is a parameter than return the parameter spec if name in self._param_specs: return self._param_specs[name] else: return self.bound_spec(name)
[ "def", "spec", "(", "self", ",", "name", ")", ":", "# If the provided \"name\" is actually a data item or parameter then", "# replace it with its name.", "if", "isinstance", "(", "name", ",", "(", "BaseData", ",", "Parameter", ")", ")", ":", "name", "=", "name", "."...
Returns either the input corresponding to a fileset or field field spec or a spec or parameter that has either been passed to the study as an input or can be derived. Parameters ---------- name : Str | BaseData | Parameter A parameter, fileset or field or name of one
[ "Returns", "either", "the", "input", "corresponding", "to", "a", "fileset", "or", "field", "field", "spec", "or", "a", "spec", "or", "parameter", "that", "has", "either", "been", "passed", "to", "the", "study", "as", "an", "input", "or", "can", "be", "de...
python
train
mozilla/treeherder
treeherder/model/models.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1170-L1183
def replace_with(self, other): """ Replace this instance with the given other. Deletes stale Match objects and updates related TextLogErrorMetadatas' best_classifications to point to the given other. """ match_ids_to_delete = list(self.update_matches(other)) TextLogErrorMatch.objects.filter(id__in=match_ids_to_delete).delete() # Update best classifications self.best_for_errors.update(best_classification=other) self.delete()
[ "def", "replace_with", "(", "self", ",", "other", ")", ":", "match_ids_to_delete", "=", "list", "(", "self", ".", "update_matches", "(", "other", ")", ")", "TextLogErrorMatch", ".", "objects", ".", "filter", "(", "id__in", "=", "match_ids_to_delete", ")", "....
Replace this instance with the given other. Deletes stale Match objects and updates related TextLogErrorMetadatas' best_classifications to point to the given other.
[ "Replace", "this", "instance", "with", "the", "given", "other", "." ]
python
train
raphaelm/python-fints
fints/hhd/flicker.py
https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/hhd/flicker.py#L238-L277
def terminal_flicker_unix(code, field_width=3, space_width=3, height=1, clear=False, wait=0.05): """ Re-encodes a flicker code and prints it on a unix terminal. :param code: Challenge value :param field_width: Width of fields in characters (default: 3). :param space_width: Width of spaces in characters (default: 3). :param height: Height of fields in characters (default: 1). :param clear: Clear terminal after every line (default: ``False``). :param wait: Waiting interval between lines (default: 0.05). """ # Inspired by Andreas Schiermeier # https://git.ccc-ffm.de/?p=smartkram.git;a=blob_plain;f=chiptan/flicker/flicker.sh;h # =7066293b4e790c2c4c1f6cbdab703ed9976ffe1f;hb=refs/heads/master code = parse(code).render() data = swap_bytes(code) high = '\033[48;05;15m' low = '\033[48;05;0m' std = '\033[0m' stream = ['10000', '00000', '11111', '01111', '11111', '01111', '11111'] for c in data: v = int(c, 16) stream.append('1' + str(v & 1) + str((v & 2) >> 1) + str((v & 4) >> 2) + str((v & 8) >> 3)) stream.append('0' + str(v & 1) + str((v & 2) >> 1) + str((v & 4) >> 2) + str((v & 8) >> 3)) while True: for frame in stream: if clear: print('\033c', end='') for i in range(height): for c in frame: print(low + ' ' * space_width, end='') if c == '1': print(high + ' ' * field_width, end='') else: print(low+ ' ' * field_width, end='') print(low + ' ' * space_width + std) time.sleep(wait)
[ "def", "terminal_flicker_unix", "(", "code", ",", "field_width", "=", "3", ",", "space_width", "=", "3", ",", "height", "=", "1", ",", "clear", "=", "False", ",", "wait", "=", "0.05", ")", ":", "# Inspired by Andreas Schiermeier", "# https://git.ccc-ffm.de/?p=sm...
Re-encodes a flicker code and prints it on a unix terminal. :param code: Challenge value :param field_width: Width of fields in characters (default: 3). :param space_width: Width of spaces in characters (default: 3). :param height: Height of fields in characters (default: 1). :param clear: Clear terminal after every line (default: ``False``). :param wait: Waiting interval between lines (default: 0.05).
[ "Re", "-", "encodes", "a", "flicker", "code", "and", "prints", "it", "on", "a", "unix", "terminal", "." ]
python
train
Toilal/rebulk
rebulk/rebulk.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/rebulk.py#L321-L333
def effective_patterns(self, context=None): """ Get effective patterns for this rebulk object and its children. :param context: :type context: :return: :rtype: """ patterns = list(self._patterns) for rebulk in self._rebulks: if not rebulk.disabled(context): extend_safe(patterns, rebulk._patterns) return patterns
[ "def", "effective_patterns", "(", "self", ",", "context", "=", "None", ")", ":", "patterns", "=", "list", "(", "self", ".", "_patterns", ")", "for", "rebulk", "in", "self", ".", "_rebulks", ":", "if", "not", "rebulk", ".", "disabled", "(", "context", "...
Get effective patterns for this rebulk object and its children. :param context: :type context: :return: :rtype:
[ "Get", "effective", "patterns", "for", "this", "rebulk", "object", "and", "its", "children", ".", ":", "param", "context", ":", ":", "type", "context", ":", ":", "return", ":", ":", "rtype", ":" ]
python
train
RetailMeNotSandbox/acky
acky/s3.py
https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/s3.py#L75-L93
def destroy(self, url, recursive=False): """Destroy a bucket, directory, or file. Specifying recursive=True recursively deletes all subdirectories and files.""" bucket, obj_key = _parse_url(url) if not bucket: raise InvalidURL(url, "You must specify a bucket and (optional) path") if obj_key: target = "/".join((bucket, obj_key)) else: target = bucket if recursive: for obj in self.get(url, delimiter=''): self.destroy(obj['url']) return self.call("DeleteBucket", bucket=target)
[ "def", "destroy", "(", "self", ",", "url", ",", "recursive", "=", "False", ")", ":", "bucket", ",", "obj_key", "=", "_parse_url", "(", "url", ")", "if", "not", "bucket", ":", "raise", "InvalidURL", "(", "url", ",", "\"You must specify a bucket and (optional)...
Destroy a bucket, directory, or file. Specifying recursive=True recursively deletes all subdirectories and files.
[ "Destroy", "a", "bucket", "directory", "or", "file", ".", "Specifying", "recursive", "=", "True", "recursively", "deletes", "all", "subdirectories", "and", "files", "." ]
python
train
mitsei/dlkit
dlkit/handcar/osid/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/objects.py#L307-L326
def is_of_genus_type(self, genus_type=None): """Tests if this object is of the given genus Type. The given genus type may be supported by the object through the type hierarchy. | arg: ``genus_type`` (``osid.type.Type``): a genus type | return: (``boolean``) - true if this object is of the given genus Type, false otherwise | raise: ``NullArgument`` - ``genus_type`` is null | *compliance: mandatory - This method must be implemented.* """ if genus_type is None: raise NullArgument() else: my_genus_type = self.get_genus_type() return (genus_type.get_authority() == my_genus_type.get_authority() and genus_type.get_identifier_namespace() == my_genus_type.get_identifier_namespace() and genus_type.get_identifier() == my_genus_type.get_identifier())
[ "def", "is_of_genus_type", "(", "self", ",", "genus_type", "=", "None", ")", ":", "if", "genus_type", "is", "None", ":", "raise", "NullArgument", "(", ")", "else", ":", "my_genus_type", "=", "self", ".", "get_genus_type", "(", ")", "return", "(", "genus_ty...
Tests if this object is of the given genus Type. The given genus type may be supported by the object through the type hierarchy. | arg: ``genus_type`` (``osid.type.Type``): a genus type | return: (``boolean``) - true if this object is of the given genus Type, false otherwise | raise: ``NullArgument`` - ``genus_type`` is null | *compliance: mandatory - This method must be implemented.*
[ "Tests", "if", "this", "object", "is", "of", "the", "given", "genus", "Type", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/datasets/path.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/datasets/path.py#L195-L234
def cleanup_dataset(dataset, data_home=None, ext=".zip"): """ Removes the dataset directory and archive file from the data home directory. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- removed : int The number of objects removed from data_home. """ removed = 0 data_home = get_data_home(data_home) # Paths to remove datadir = os.path.join(data_home, dataset) archive = os.path.join(data_home, dataset+ext) # Remove directory and contents if os.path.exists(datadir): shutil.rmtree(datadir) removed += 1 # Remove the archive file if os.path.exists(archive): os.remove(archive) removed += 1 return removed
[ "def", "cleanup_dataset", "(", "dataset", ",", "data_home", "=", "None", ",", "ext", "=", "\".zip\"", ")", ":", "removed", "=", "0", "data_home", "=", "get_data_home", "(", "data_home", ")", "# Paths to remove", "datadir", "=", "os", ".", "path", ".", "joi...
Removes the dataset directory and archive file from the data home directory. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- removed : int The number of objects removed from data_home.
[ "Removes", "the", "dataset", "directory", "and", "archive", "file", "from", "the", "data", "home", "directory", "." ]
python
train
saltstack/salt
salt/states/macpackage.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/macpackage.py#L248-L269
def _mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True ''' if onlyif: if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0: return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless: if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} # No reason to stop, return True return True
[ "def", "_mod_run_check", "(", "cmd_kwargs", ",", "onlyif", ",", "unless", ")", ":", "if", "onlyif", ":", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "onlyif", ",", "*", "*", "cmd_kwargs", ")", "!=", "0", ":", "return", "{", "'comment'", ":", "'on...
Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True
[ "Execute", "the", "onlyif", "and", "unless", "logic", ".", "Return", "a", "result", "dict", "if", ":", "*", "onlyif", "failed", "(", "onlyif", "!", "=", "0", ")", "*", "unless", "succeeded", "(", "unless", "==", "0", ")", "else", "return", "True" ]
python
train
jaraco/keyring
keyring/util/platform_.py
https://github.com/jaraco/keyring/blob/71c798378e365286b7cc03c06e4d7d24c7de8fc4/keyring/util/platform_.py#L32-L48
def _check_old_config_root(): """ Prior versions of keyring would search for the config in XDG_DATA_HOME, but should probably have been searching for config in XDG_CONFIG_HOME. If the config exists in the former but not in the latter, raise a RuntimeError to force the change. """ # disable the check - once is enough and avoids infinite loop globals()['_check_old_config_root'] = lambda: None config_file_new = os.path.join(_config_root_Linux(), 'keyringrc.cfg') config_file_old = os.path.join(_data_root_Linux(), 'keyringrc.cfg') if os.path.isfile(config_file_old) and not os.path.isfile(config_file_new): msg = ("Keyring config exists only in the old location " "{config_file_old} and should be moved to {config_file_new} " "to work with this version of keyring.") raise RuntimeError(msg.format(**locals()))
[ "def", "_check_old_config_root", "(", ")", ":", "# disable the check - once is enough and avoids infinite loop", "globals", "(", ")", "[", "'_check_old_config_root'", "]", "=", "lambda", ":", "None", "config_file_new", "=", "os", ".", "path", ".", "join", "(", "_confi...
Prior versions of keyring would search for the config in XDG_DATA_HOME, but should probably have been searching for config in XDG_CONFIG_HOME. If the config exists in the former but not in the latter, raise a RuntimeError to force the change.
[ "Prior", "versions", "of", "keyring", "would", "search", "for", "the", "config", "in", "XDG_DATA_HOME", "but", "should", "probably", "have", "been", "searching", "for", "config", "in", "XDG_CONFIG_HOME", ".", "If", "the", "config", "exists", "in", "the", "form...
python
valid
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L1401-L1424
def attach_volume(self, datacenter_id, server_id, volume_id): """ Attaches a volume to a server. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param volume_id: The unique ID of the volume. :type volume_id: ``str`` """ data = '{ "id": "' + volume_id + '" }' response = self._perform_request( url='/datacenters/%s/servers/%s/volumes' % ( datacenter_id, server_id), method='POST', data=data) return response
[ "def", "attach_volume", "(", "self", ",", "datacenter_id", ",", "server_id", ",", "volume_id", ")", ":", "data", "=", "'{ \"id\": \"'", "+", "volume_id", "+", "'\" }'", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/datacenters/%s/servers/...
Attaches a volume to a server. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param volume_id: The unique ID of the volume. :type volume_id: ``str``
[ "Attaches", "a", "volume", "to", "a", "server", "." ]
python
valid
RockFeng0/rtsf-web
webuidriver/remote/SeleniumJar.py
https://github.com/RockFeng0/rtsf-web/blob/ceabcf62ddf1c969a97b5c7a4a4c547198b6ea71/webuidriver/remote/SeleniumJar.py#L40-L47
def node(self,port, hub_address=("localhost", 4444)): ''' java -jar selenium-server.jar -role node -port 5555 -hub http://127.0.0.1:4444/grid/register/ @param port: listen port of selenium node @param hub_address: hub address which node will connect to ''' self._ip, self._port = hub_address self.command = [self._conf["java_path"], "-jar", self._conf["jar_path"], "-port", str(port), "-role", "node", "-hub", "http://%s:%s/grid/register/" %(self._ip, self._port)] return self
[ "def", "node", "(", "self", ",", "port", ",", "hub_address", "=", "(", "\"localhost\"", ",", "4444", ")", ")", ":", "self", ".", "_ip", ",", "self", ".", "_port", "=", "hub_address", "self", ".", "command", "=", "[", "self", ".", "_conf", "[", "\"j...
java -jar selenium-server.jar -role node -port 5555 -hub http://127.0.0.1:4444/grid/register/ @param port: listen port of selenium node @param hub_address: hub address which node will connect to
[ "java", "-", "jar", "selenium", "-", "server", ".", "jar", "-", "role", "node", "-", "port", "5555", "-", "hub", "http", ":", "//", "127", ".", "0", ".", "0", ".", "1", ":", "4444", "/", "grid", "/", "register", "/" ]
python
train
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/cli/ingestdocs.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/cli/ingestdocs.py#L18-L78
def main(): """Command line entrypoint to reduce technote metadata. """ parser = argparse.ArgumentParser( description='Discover and ingest metadata from document sources, ' 'including lsstdoc-based LaTeX documents and ' 'reStructuredText-based technotes. Metadata can be ' 'upserted into the LSST Projectmeta MongoDB.') parser.add_argument( '--ltd-product', dest='ltd_product_url', help='URL of an LSST the Docs product ' '(https://keeper.lsst.codes/products/<slug>). If provided, ' 'only this document will be ingested.') parser.add_argument( '--github-token', help='GitHub personal access token.') parser.add_argument( '--mongodb-uri', help='MongoDB connection URI. If provided, metadata will be loaded ' 'into the Projectmeta database. Omit this argument to just ' 'test the ingest pipeline.') parser.add_argument( '--mongodb-db', default='lsstprojectmeta', help='Name of MongoDB database') parser.add_argument( '--mongodb-collection', default='resources', help='Name of the MongoDB collection for projectmeta resources') args = parser.parse_args() # Configure the root logger stream_handler = logging.StreamHandler() stream_formatter = logging.Formatter( '%(asctime)s %(levelname)8s %(name)s | %(message)s') stream_handler.setFormatter(stream_formatter) root_logger = logging.getLogger() root_logger.addHandler(stream_handler) root_logger.setLevel(logging.WARNING) # Configure app logger app_logger = logging.getLogger('lsstprojectmeta') app_logger.setLevel(logging.DEBUG) if args.mongodb_uri is not None: mongo_client = AsyncIOMotorClient(args.mongodb_uri, ssl=True) collection = mongo_client[args.mongodb_db][args.mongodb_collection] else: collection = None loop = asyncio.get_event_loop() if args.ltd_product_url is not None: # Run single technote loop.run_until_complete(run_single_ltd_doc(args.ltd_product_url, args.github_token, collection)) else: # Run bulk technote processing loop.run_until_complete(run_bulk_etl(args.github_token, collection))
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Discover and ingest metadata from document sources, '", "'including lsstdoc-based LaTeX documents and '", "'reStructuredText-based technotes. Metadata can be '", "'upserted into...
Command line entrypoint to reduce technote metadata.
[ "Command", "line", "entrypoint", "to", "reduce", "technote", "metadata", "." ]
python
valid
ska-sa/katcp-python
katcp/client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L754-L789
def enable_thread_safety(self): """Enable thread-safety features. Must be called before start(). """ if self.threadsafe: return # Already done! if self._running.isSet(): raise RuntimeError('Cannot enable thread safety after start') def _getattr(obj, name): # use 'is True' so mock objects don't return true for everything return getattr(obj, name, False) is True for name in dir(self): try: meth = getattr(self, name) except AttributeError: # Subclasses may have computed attributes that don't work # before they are started, so let's ignore those pass if not callable(meth): continue make_threadsafe = _getattr(meth, 'make_threadsafe') make_threadsafe_blocking = _getattr(meth, 'make_threadsafe_blocking') if make_threadsafe: assert not make_threadsafe_blocking meth = self._make_threadsafe(meth) setattr(self, name, meth) elif make_threadsafe_blocking: meth = self._make_threadsafe_blocking(meth) setattr(self, name, meth) self._threadsafe = True
[ "def", "enable_thread_safety", "(", "self", ")", ":", "if", "self", ".", "threadsafe", ":", "return", "# Already done!", "if", "self", ".", "_running", ".", "isSet", "(", ")", ":", "raise", "RuntimeError", "(", "'Cannot enable thread safety after start'", ")", "...
Enable thread-safety features. Must be called before start().
[ "Enable", "thread", "-", "safety", "features", "." ]
python
train
camptocamp/anthem
anthem/lyrics/loaders.py
https://github.com/camptocamp/anthem/blob/6800730764d31a2edced12049f823fefb367e9ad/anthem/lyrics/loaders.py#L79-L119
def load_csv_stream(ctx, model, data, header=None, header_exclude=None, **fmtparams): """Load a CSV from a stream. :param ctx: current anthem context :param model: model name as string or model klass :param data: csv data to load :param header: csv fieldnames whitelist :param header_exclude: csv fieldnames blacklist Usage example:: from pkg_resources import Requirement, resource_stream req = Requirement.parse('my-project') load_csv_stream(ctx, ctx.env['res.users'], resource_stream(req, 'data/users.csv'), delimiter=',') """ _header, _rows = read_csv(data, **fmtparams) header = header if header else _header if _rows: # check if passed header contains all the fields if header != _header and not header_exclude: # if not, we exclude the rest of the fields header_exclude = [x for x in _header if x not in header] if header_exclude: # exclude fields from header as well as respective values header = [x for x in header if x not in header_exclude] # we must loop trough all the rows too to pop values # since odoo import works only w/ reader and not w/ dictreader pop_idxs = [_header.index(x) for x in header_exclude] rows = [] for i, row in enumerate(_rows): rows.append( [x for j, x in enumerate(row) if j not in pop_idxs] ) else: rows = list(_rows) if rows: load_rows(ctx, model, header, rows)
[ "def", "load_csv_stream", "(", "ctx", ",", "model", ",", "data", ",", "header", "=", "None", ",", "header_exclude", "=", "None", ",", "*", "*", "fmtparams", ")", ":", "_header", ",", "_rows", "=", "read_csv", "(", "data", ",", "*", "*", "fmtparams", ...
Load a CSV from a stream. :param ctx: current anthem context :param model: model name as string or model klass :param data: csv data to load :param header: csv fieldnames whitelist :param header_exclude: csv fieldnames blacklist Usage example:: from pkg_resources import Requirement, resource_stream req = Requirement.parse('my-project') load_csv_stream(ctx, ctx.env['res.users'], resource_stream(req, 'data/users.csv'), delimiter=',')
[ "Load", "a", "CSV", "from", "a", "stream", "." ]
python
train
barnumbirr/coinmarketcap
coinmarketcap/core.py
https://github.com/barnumbirr/coinmarketcap/blob/d1d76a73bc48a64a4c2883dd28c6199bfbd3ebc6/coinmarketcap/core.py#L59-L96
def ticker(self, currency="", **kwargs): """ This endpoint displays cryptocurrency ticker data in order of rank. The maximum number of results per call is 100. Pagination is possible by using the start and limit parameters. GET /ticker/ Optional parameters: (int) start - return results from rank [start] and above (default is 1) (int) limit - return a maximum of [limit] results (default is 100; max is 100) (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" GET /ticker/{id} Optional parameters: (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" """ params = {} params.update(kwargs) # see https://github.com/barnumbirr/coinmarketcap/pull/28 if currency: currency = str(currency) + '/' response = self.__request('ticker/' + currency, params) return response
[ "def", "ticker", "(", "self", ",", "currency", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "}", "params", ".", "update", "(", "kwargs", ")", "# see https://github.com/barnumbirr/coinmarketcap/pull/28", "if", "currency", ":", "currency",...
This endpoint displays cryptocurrency ticker data in order of rank. The maximum number of results per call is 100. Pagination is possible by using the start and limit parameters. GET /ticker/ Optional parameters: (int) start - return results from rank [start] and above (default is 1) (int) limit - return a maximum of [limit] results (default is 100; max is 100) (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" GET /ticker/{id} Optional parameters: (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH"
[ "This", "endpoint", "displays", "cryptocurrency", "ticker", "data", "in", "order", "of", "rank", ".", "The", "maximum", "number", "of", "results", "per", "call", "is", "100", ".", "Pagination", "is", "possible", "by", "using", "the", "start", "and", "limit",...
python
train
google/openhtf
openhtf/output/servers/station_server.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/station_server.py#L152-L173
def run(self): """Call self._poll_for_update() in a loop and handle errors.""" while True: try: self._poll_for_update() except RuntimeError as error: # Note that because logging triggers a call to notify_update(), by # logging a message, we automatically retry publishing the update # after an error occurs. if error.message == 'dictionary changed size during iteration': # These errors occur occasionally and it is infeasible to get rid of # them entirely unless data.convert_to_base_types() is made # thread-safe. Ignore the error and retry quickly. _LOG.debug('Ignoring (probably harmless) error in station watcher: ' '`dictionary changed size during iteration`.') time.sleep(0.1) else: _LOG.exception('Error in station watcher: %s', error) time.sleep(1) except Exception as error: # pylint: disable=broad-except _LOG.exception('Error in station watcher: %s', error) time.sleep(1)
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "try", ":", "self", ".", "_poll_for_update", "(", ")", "except", "RuntimeError", "as", "error", ":", "# Note that because logging triggers a call to notify_update(), by", "# logging a message, we automatically ret...
Call self._poll_for_update() in a loop and handle errors.
[ "Call", "self", ".", "_poll_for_update", "()", "in", "a", "loop", "and", "handle", "errors", "." ]
python
train
xtrementl/focus
focus/environment/__init__.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/__init__.py#L22-L62
def _import_modules(dir_path): """ Attempts to import modules in the specified directory path. `dir_path` Base directory path to attempt to import modules. """ def _import_module(module): """ Imports the specified module. """ # already loaded, skip if module in mods_loaded: return False __import__(module) mods_loaded.append(module) mods_loaded = [] # check if provided path exists if not os.path.isdir(dir_path): return try: # update import search path sys.path.insert(0, dir_path) # check for modules in the dir path for entry in os.listdir(dir_path): path = os.path.join(dir_path, entry) if os.path.isdir(path): # directory _import_module(entry) elif _RE_PY_EXT.search(entry): # python file if not _RE_INIT_PY.match(entry): # exclude init name = _RE_PY_EXT.sub('', entry) _import_module(name) finally: # remove inserted path sys.path.pop(0)
[ "def", "_import_modules", "(", "dir_path", ")", ":", "def", "_import_module", "(", "module", ")", ":", "\"\"\" Imports the specified module.\n \"\"\"", "# already loaded, skip", "if", "module", "in", "mods_loaded", ":", "return", "False", "__import__", "(", "...
Attempts to import modules in the specified directory path. `dir_path` Base directory path to attempt to import modules.
[ "Attempts", "to", "import", "modules", "in", "the", "specified", "directory", "path", "." ]
python
train
PyCQA/astroid
astroid/as_string.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/as_string.py#L425-L434
def visit_return(self, node): """return an astroid.Return node as string""" if node.is_tuple_return() and len(node.value.elts) > 1: elts = [child.accept(self) for child in node.value.elts] return "return %s" % ", ".join(elts) if node.value: return "return %s" % node.value.accept(self) return "return"
[ "def", "visit_return", "(", "self", ",", "node", ")", ":", "if", "node", ".", "is_tuple_return", "(", ")", "and", "len", "(", "node", ".", "value", ".", "elts", ")", ">", "1", ":", "elts", "=", "[", "child", ".", "accept", "(", "self", ")", "for"...
return an astroid.Return node as string
[ "return", "an", "astroid", ".", "Return", "node", "as", "string" ]
python
train
has2k1/plotnine
plotnine/stats/smoothers.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/smoothers.py#L246-L279
def gpr(data, xseq, **params): """ Fit gaussian process """ try: from sklearn import gaussian_process except ImportError: raise PlotnineError( "To use gaussian process smoothing, " "You need to install scikit-learn.") kwargs = params['method_args'] if not kwargs: warnings.warn( "See sklearn.gaussian_process.GaussianProcessRegressor " "for parameters to pass in as 'method_args'", PlotnineWarning) regressor = gaussian_process.GaussianProcessRegressor(**kwargs) X = np.atleast_2d(data['x']).T n = len(data) Xseq = np.atleast_2d(xseq).T regressor.fit(X, data['y']) data = pd.DataFrame({'x': xseq}) if params['se']: y, stderr = regressor.predict(Xseq, return_std=True) data['y'] = y data['se'] = stderr data['ymin'], data['ymax'] = tdist_ci( y, n-1, stderr, params['level']) else: data['y'] = regressor.predict(Xseq, return_std=True) return data
[ "def", "gpr", "(", "data", ",", "xseq", ",", "*", "*", "params", ")", ":", "try", ":", "from", "sklearn", "import", "gaussian_process", "except", "ImportError", ":", "raise", "PlotnineError", "(", "\"To use gaussian process smoothing, \"", "\"You need to install sci...
Fit gaussian process
[ "Fit", "gaussian", "process" ]
python
train
shidenggui/easyquotation
easyquotation/jsl.py
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L121-L128
def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result
[ "def", "formatetfindexjson", "(", "fundbjson", ")", ":", "result", "=", "{", "}", "for", "row", "in", "fundbjson", "[", "\"rows\"", "]", ":", "cell", "=", "row", "[", "\"cell\"", "]", "fundb_id", "=", "cell", "[", "\"fund_id\"", "]", "result", "[", "fu...
格式化集思录返回 指数ETF 的json数据,以字典形式保存
[ "格式化集思录返回", "指数ETF", "的json数据", "以字典形式保存" ]
python
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L451-L463
def _replace_with_specific_page(page, menu_item): """ If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object. """ if type(page) is Page: page = page.specific if isinstance(menu_item, MenuItem): menu_item.link_page = page else: menu_item = page return page, menu_item
[ "def", "_replace_with_specific_page", "(", "page", ",", "menu_item", ")", ":", "if", "type", "(", "page", ")", "is", "Page", ":", "page", "=", "page", ".", "specific", "if", "isinstance", "(", "menu_item", ",", "MenuItem", ")", ":", "menu_item", ".", "li...
If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object.
[ "If", "page", "is", "a", "vanilla", "Page", "object", "replace", "it", "with", "a", "specific", "version", "of", "itself", ".", "Also", "update", "menu_item", "depending", "on", "whether", "it", "s", "a", "MenuItem", "object", "or", "a", "Page", "object", ...
python
train
adamzap/landslide
landslide/generator.py
https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L554-L575
def write_pdf(self, html): """ Tries to write a PDF export from the command line using Prince if available. """ try: f = tempfile.NamedTemporaryFile(delete=False, suffix='.html') f.write(html.encode('utf_8', 'xmlcharrefreplace')) f.close() except Exception: raise IOError(u"Unable to create temporary file, aborting") dummy_fh = open(os.path.devnull, 'w') try: command = ["prince", f.name, "-o", self.destination_file] Popen(command, stderr=dummy_fh).communicate() except Exception: raise EnvironmentError(u"Unable to generate PDF file using " "prince. Is it installed and available?") finally: dummy_fh.close()
[ "def", "write_pdf", "(", "self", ",", "html", ")", ":", "try", ":", "f", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "suffix", "=", "'.html'", ")", "f", ".", "write", "(", "html", ".", "encode", "(", "'utf_8'", ",", ...
Tries to write a PDF export from the command line using Prince if available.
[ "Tries", "to", "write", "a", "PDF", "export", "from", "the", "command", "line", "using", "Prince", "if", "available", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/core/core_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/core/core_client.py#L28-L39
def remove_project_avatar(self, project_id): """RemoveProjectAvatar. [Preview API] Removes the avatar for the project. :param str project_id: The ID or name of the project. """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') self._send(http_method='DELETE', location_id='54b2a2a0-859b-4d05-827c-ec4c862f641a', version='5.1-preview.1', route_values=route_values)
[ "def", "remove_project_avatar", "(", "self", ",", "project_id", ")", ":", "route_values", "=", "{", "}", "if", "project_id", "is", "not", "None", ":", "route_values", "[", "'projectId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project_id'", ...
RemoveProjectAvatar. [Preview API] Removes the avatar for the project. :param str project_id: The ID or name of the project.
[ "RemoveProjectAvatar", ".", "[", "Preview", "API", "]", "Removes", "the", "avatar", "for", "the", "project", ".", ":", "param", "str", "project_id", ":", "The", "ID", "or", "name", "of", "the", "project", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/samtools.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/samtools.py#L19-L43
def shared_variantcall(call_fn, name, align_bams, ref_file, items, assoc_files, region=None, out_file=None): """Provide base functionality for prepping and indexing for variant calling. """ config = items[0]["config"] if out_file is None: if vcfutils.is_paired_analysis(align_bams, items): out_file = "%s-paired-variants.vcf.gz" % config["metdata"]["batch"] else: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not file_exists(out_file): logger.debug("Genotyping with {name}: {region} {fname}".format( name=name, region=region, fname=os.path.basename(align_bams[0]))) variant_regions = bedutils.population_variant_regions(items, merged=True) target_regions = subset_variant_regions(variant_regions, region, out_file, items=items) if (variant_regions is not None and isinstance(target_regions, six.string_types) and not os.path.isfile(target_regions)): vcfutils.write_empty_vcf(out_file, config) else: with file_transaction(config, out_file) as tx_out_file: call_fn(align_bams, ref_file, items, target_regions, tx_out_file) if out_file.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, config) return out_file
[ "def", "shared_variantcall", "(", "call_fn", ",", "name", ",", "align_bams", ",", "ref_file", ",", "items", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"", ...
Provide base functionality for prepping and indexing for variant calling.
[ "Provide", "base", "functionality", "for", "prepping", "and", "indexing", "for", "variant", "calling", "." ]
python
train
20tab/twentytab-tree
tree/menu.py
https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L82-L86
def as_string(self, chars, current_linkable=False, class_current="active_link"): """ It returns menu as string """ return self.__do_menu("as_string", current_linkable, class_current, chars)
[ "def", "as_string", "(", "self", ",", "chars", ",", "current_linkable", "=", "False", ",", "class_current", "=", "\"active_link\"", ")", ":", "return", "self", ".", "__do_menu", "(", "\"as_string\"", ",", "current_linkable", ",", "class_current", ",", "chars", ...
It returns menu as string
[ "It", "returns", "menu", "as", "string" ]
python
train
PmagPy/PmagPy
programs/deprecated/extract_methods.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/extract_methods.py#L6-L55
def main(): """ NAME extract_methods.py DESCRIPTION reads in a magic table and creates a file with method codes SYNTAX extract_methods.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify magic format input file, default is magic_measurements.txt -F FILE: specify method code output file, default is magic_methods.txt """ citation='This study' args=sys.argv outfile='magic_methods.txt' infile='magic_measurements.txt' # # get command line arguments # dir_path='.' if '-WD' in args: ind=args.index("-WD") dir_path=args[ind+1] if "-h" in args: print(main.__doc__) sys.exit() if '-F' in args: ind=args.index("-F") outfile=args[ind+1] if '-f' in args: ind=args.index("-f") infile=args[ind+1] infile=dir_path+'/'+infile outfile=dir_path+'/'+outfile data,file_type=pmag.magic_read(infile) MethRecs=[] methods=[] for rec in data: meths=rec['magic_method_codes'].split(":") for meth in meths: if meth not in methods: MethRec={} methods.append(meth) MethRec['magic_method_code']=meth MethRecs.append(MethRec) pmag.magic_write(outfile,MethRecs,'magic_methods')
[ "def", "main", "(", ")", ":", "citation", "=", "'This study'", "args", "=", "sys", ".", "argv", "outfile", "=", "'magic_methods.txt'", "infile", "=", "'magic_measurements.txt'", "#", "# get command line arguments", "#", "dir_path", "=", "'.'", "if", "'-WD'", "in...
NAME extract_methods.py DESCRIPTION reads in a magic table and creates a file with method codes SYNTAX extract_methods.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify magic format input file, default is magic_measurements.txt -F FILE: specify method code output file, default is magic_methods.txt
[ "NAME", "extract_methods", ".", "py", "DESCRIPTION", "reads", "in", "a", "magic", "table", "and", "creates", "a", "file", "with", "method", "codes" ]
python
train
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L368-L383
def get_country_name_from_m49(cls, m49, use_live=True, exception=None): # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_name_from_iso3(iso3, exception=exception) return None
[ "def", "get_country_name_from_m49", "(", "cls", ",", "m49", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "iso3", "=", "cls", ".", "get_iso3_from_m49", "(", "m49", ",", "...
Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name
[ "Get", "country", "name", "from", "M49", "code" ]
python
train
diffeo/yakonfig
yakonfig/toplevel.py
https://github.com/diffeo/yakonfig/blob/412e195da29b4f4fc7b72967c192714a6f5eaeb5/yakonfig/toplevel.py#L201-L231
def defaulted_config(modules, params=None, yaml=None, filename=None, config=None, validate=True): """Context manager version of :func:`set_default_config()`. Use this with a Python 'with' statement, like >>> config_yaml = ''' ... toplevel: ... param: value ... ''' >>> with yakonfig.defaulted_config([toplevel], yaml=config_yaml) as config: ... assert 'param' in config['toplevel'] ... assert yakonfig.get_global_config('toplevel', 'param') == 'value' On exit the global configuration is restored to its previous state (if any). :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param dict params: dictionary of command-line argument key to values :param str yaml: global configuration file :param str filename: location of global configuration file :param dict config: global configuration object :param bool validate: check configuration after creating :return: the new global configuration """ with _temporary_config(): set_default_config(modules, params=params, yaml=yaml, filename=filename, config=config, validate=validate) yield get_global_config()
[ "def", "defaulted_config", "(", "modules", ",", "params", "=", "None", ",", "yaml", "=", "None", ",", "filename", "=", "None", ",", "config", "=", "None", ",", "validate", "=", "True", ")", ":", "with", "_temporary_config", "(", ")", ":", "set_default_co...
Context manager version of :func:`set_default_config()`. Use this with a Python 'with' statement, like >>> config_yaml = ''' ... toplevel: ... param: value ... ''' >>> with yakonfig.defaulted_config([toplevel], yaml=config_yaml) as config: ... assert 'param' in config['toplevel'] ... assert yakonfig.get_global_config('toplevel', 'param') == 'value' On exit the global configuration is restored to its previous state (if any). :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param dict params: dictionary of command-line argument key to values :param str yaml: global configuration file :param str filename: location of global configuration file :param dict config: global configuration object :param bool validate: check configuration after creating :return: the new global configuration
[ "Context", "manager", "version", "of", ":", "func", ":", "set_default_config", "()", "." ]
python
train
nicfit/nicfit.py
nicfit/console/_io.py
https://github.com/nicfit/nicfit.py/blob/8313f8edbc5e7361ddad496d6d818324b5236c7a/nicfit/console/_io.py#L9-L11
def perr(msg, log=None): """Print 'msg' to stderr, and option 'log' at info level.""" _print(msg, sys.stderr, log_func=log.error if log else None)
[ "def", "perr", "(", "msg", ",", "log", "=", "None", ")", ":", "_print", "(", "msg", ",", "sys", ".", "stderr", ",", "log_func", "=", "log", ".", "error", "if", "log", "else", "None", ")" ]
Print 'msg' to stderr, and option 'log' at info level.
[ "Print", "msg", "to", "stderr", "and", "option", "log", "at", "info", "level", "." ]
python
test
sharibarboza/py_zap
py_zap/py_zap.py
https://github.com/sharibarboza/py_zap/blob/ce90853efcad66d3e28b8f1ac910f275349d016c/py_zap/py_zap.py#L165-L176
def get_json(self): """Serialize ratings object as JSON-formatted string""" ratings_dict = { 'category': self.category, 'date': self.date, 'day': self.weekday, 'next week': self.next_week, 'last week': self.last_week, 'entries': self.entries, 'url': self.url } return to_json(ratings_dict)
[ "def", "get_json", "(", "self", ")", ":", "ratings_dict", "=", "{", "'category'", ":", "self", ".", "category", ",", "'date'", ":", "self", ".", "date", ",", "'day'", ":", "self", ".", "weekday", ",", "'next week'", ":", "self", ".", "next_week", ",", ...
Serialize ratings object as JSON-formatted string
[ "Serialize", "ratings", "object", "as", "JSON", "-", "formatted", "string" ]
python
train
pantsbuild/pants
src/python/pants/pantsd/watchman.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/watchman.py#L155-L164
def watch_project(self, path): """Issues the watch-project command to watchman to begin watching the buildroot. :param string path: the path to the watchman project root/pants build root. """ # TODO(kwlzn): Add a client.query(timeout=X) param to the upstream pywatchman project. try: return self.client.query('watch-project', os.path.realpath(path)) finally: self._attempt_set_timeout(self._timeout)
[ "def", "watch_project", "(", "self", ",", "path", ")", ":", "# TODO(kwlzn): Add a client.query(timeout=X) param to the upstream pywatchman project.", "try", ":", "return", "self", ".", "client", ".", "query", "(", "'watch-project'", ",", "os", ".", "path", ".", "realp...
Issues the watch-project command to watchman to begin watching the buildroot. :param string path: the path to the watchman project root/pants build root.
[ "Issues", "the", "watch", "-", "project", "command", "to", "watchman", "to", "begin", "watching", "the", "buildroot", "." ]
python
train
quantumlib/Cirq
cirq/circuits/circuit.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L1421-L1481
def to_text_diagram_drawer( self, *, use_unicode_characters: bool = True, qubit_namer: Optional[Callable[[ops.Qid], str]] = None, transpose: bool = False, precision: Optional[int] = 3, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, get_circuit_diagram_info: Optional[Callable[[ops.Operation, protocols.CircuitDiagramInfoArgs], protocols.CircuitDiagramInfo]]=None ) -> TextDiagramDrawer: """Returns a TextDiagramDrawer with the circuit drawn into it. Args: use_unicode_characters: Determines if unicode characters are allowed (as opposed to ascii-only diagrams). qubit_namer: Names qubits in diagram. Defaults to str. transpose: Arranges qubit wires vertically instead of horizontally. precision: Number of digits to use when representing numbers. qubit_order: Determines how qubits are ordered in the diagram. get_circuit_diagram_info: Gets circuit diagram info. Defaults to protocol with fallback. Returns: The TextDiagramDrawer instance. """ qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for( self.all_qubits()) qubit_map = {qubits[i]: i for i in range(len(qubits))} if qubit_namer is None: qubit_namer = lambda q: str(q) + ('' if transpose else ': ') diagram = TextDiagramDrawer() for q, i in qubit_map.items(): diagram.write(0, i, qubit_namer(q)) moment_groups = [] # type: List[Tuple[int, int]] for moment in self._moments: _draw_moment_in_diagram(moment, use_unicode_characters, qubit_map, diagram, precision, moment_groups, get_circuit_diagram_info) w = diagram.width() for i in qubit_map.values(): diagram.horizontal_line(i, 0, w) if moment_groups: _draw_moment_groups_in_diagram(moment_groups, use_unicode_characters, diagram) if transpose: diagram = diagram.transpose() return diagram
[ "def", "to_text_diagram_drawer", "(", "self", ",", "*", ",", "use_unicode_characters", ":", "bool", "=", "True", ",", "qubit_namer", ":", "Optional", "[", "Callable", "[", "[", "ops", ".", "Qid", "]", ",", "str", "]", "]", "=", "None", ",", "transpose", ...
Returns a TextDiagramDrawer with the circuit drawn into it. Args: use_unicode_characters: Determines if unicode characters are allowed (as opposed to ascii-only diagrams). qubit_namer: Names qubits in diagram. Defaults to str. transpose: Arranges qubit wires vertically instead of horizontally. precision: Number of digits to use when representing numbers. qubit_order: Determines how qubits are ordered in the diagram. get_circuit_diagram_info: Gets circuit diagram info. Defaults to protocol with fallback. Returns: The TextDiagramDrawer instance.
[ "Returns", "a", "TextDiagramDrawer", "with", "the", "circuit", "drawn", "into", "it", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/scripts.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/scripts.py#L139-L168
def _build_shebang(self, executable, post_interp): """ Build a shebang line. In the simple case (on Windows, or a shebang line which is not too long or contains spaces) use a simple formulation for the shebang. Otherwise, use /bin/sh as the executable, with a contrived shebang which allows the script to run either under Python or sh, using suitable quoting. Thanks to Harald Nordgren for his input. See also: http://www.in-ulm.de/~mascheck/various/shebang/#length https://hg.mozilla.org/mozilla-central/file/tip/mach """ if os.name != 'posix': simple_shebang = True else: # Add 3 for '#!' prefix and newline suffix. shebang_length = len(executable) + len(post_interp) + 3 if sys.platform == 'darwin': max_shebang_length = 512 else: max_shebang_length = 127 simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length)) if simple_shebang: result = b'#!' + executable + post_interp + b'\n' else: result = b'#!/bin/sh\n' result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' result += b"' '''" return result
[ "def", "_build_shebang", "(", "self", ",", "executable", ",", "post_interp", ")", ":", "if", "os", ".", "name", "!=", "'posix'", ":", "simple_shebang", "=", "True", "else", ":", "# Add 3 for '#!' prefix and newline suffix.", "shebang_length", "=", "len", "(", "e...
Build a shebang line. In the simple case (on Windows, or a shebang line which is not too long or contains spaces) use a simple formulation for the shebang. Otherwise, use /bin/sh as the executable, with a contrived shebang which allows the script to run either under Python or sh, using suitable quoting. Thanks to Harald Nordgren for his input. See also: http://www.in-ulm.de/~mascheck/various/shebang/#length https://hg.mozilla.org/mozilla-central/file/tip/mach
[ "Build", "a", "shebang", "line", ".", "In", "the", "simple", "case", "(", "on", "Windows", "or", "a", "shebang", "line", "which", "is", "not", "too", "long", "or", "contains", "spaces", ")", "use", "a", "simple", "formulation", "for", "the", "shebang", ...
python
train
coldfix/udiskie
udiskie/mount.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/mount.py#L201-L242
async def unlock(self, device): """ Unlock the device if not already unlocked. :param device: device object, block device path or mount path :returns: whether the device is unlocked """ device = self._find_device(device) if not self.is_handleable(device) or not device.is_crypto: self._log.warn(_('not unlocking {0}: unhandled device', device)) return False if device.is_unlocked: self._log.info(_('not unlocking {0}: already unlocked', device)) return True if not self._prompt: self._log.error(_('not unlocking {0}: no password prompt', device)) return False unlocked = await self._unlock_from_cache(device) if unlocked: return True unlocked = await self._unlock_from_keyfile(device) if unlocked: return True options = dict(allow_keyfile=self.udisks.keyfile_support, allow_cache=self._cache is not None, cache_hint=self._cache_hint) password = await self._prompt(device, options) # password can be: None, str, or udiskie.prompt.PasswordResult cache_hint = getattr(password, 'cache_hint', self._cache_hint) password = getattr(password, 'password', password) if password is None: self._log.debug(_('not unlocking {0}: cancelled by user', device)) return False if isinstance(password, bytes): self._log.debug(_('unlocking {0} using keyfile', device)) await device.unlock_keyfile(password) else: self._log.debug(_('unlocking {0}', device)) await device.unlock(password) self._update_cache(device, password, cache_hint) self._log.info(_('unlocked {0}', device)) return True
[ "async", "def", "unlock", "(", "self", ",", "device", ")", ":", "device", "=", "self", ".", "_find_device", "(", "device", ")", "if", "not", "self", ".", "is_handleable", "(", "device", ")", "or", "not", "device", ".", "is_crypto", ":", "self", ".", ...
Unlock the device if not already unlocked. :param device: device object, block device path or mount path :returns: whether the device is unlocked
[ "Unlock", "the", "device", "if", "not", "already", "unlocked", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/image_streamer/resources/plan_scripts.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/image_streamer/resources/plan_scripts.py#L172-L183
def get_usedby_and_readonly(self, id): """ Gets the build plans details os teh selected plan script as per the selected attributes. Args: id: ID of the Plan Script. Returns: array of build plans """ uri = self.URI + "/" + id + "/usedby/readonly" return self._client.get(uri)
[ "def", "get_usedby_and_readonly", "(", "self", ",", "id", ")", ":", "uri", "=", "self", ".", "URI", "+", "\"/\"", "+", "id", "+", "\"/usedby/readonly\"", "return", "self", ".", "_client", ".", "get", "(", "uri", ")" ]
Gets the build plans details os teh selected plan script as per the selected attributes. Args: id: ID of the Plan Script. Returns: array of build plans
[ "Gets", "the", "build", "plans", "details", "os", "teh", "selected", "plan", "script", "as", "per", "the", "selected", "attributes", "." ]
python
train
linode/linode_api4-python
linode_api4/objects/nodebalancer.py
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/objects/nodebalancer.py#L73-L84
def nodes(self): """ This is a special derived_class relationship because NodeBalancerNode is the only api object that requires two parent_ids """ if not hasattr(self, '_nodes'): base_url = "{}/{}".format(NodeBalancerConfig.api_endpoint, NodeBalancerNode.derived_url_path) result = self._client._get_objects(base_url, NodeBalancerNode, model=self, parent_id=(self.id, self.nodebalancer_id)) self._set('_nodes', result) return self._nodes
[ "def", "nodes", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_nodes'", ")", ":", "base_url", "=", "\"{}/{}\"", ".", "format", "(", "NodeBalancerConfig", ".", "api_endpoint", ",", "NodeBalancerNode", ".", "derived_url_path", ")", "result...
This is a special derived_class relationship because NodeBalancerNode is the only api object that requires two parent_ids
[ "This", "is", "a", "special", "derived_class", "relationship", "because", "NodeBalancerNode", "is", "the", "only", "api", "object", "that", "requires", "two", "parent_ids" ]
python
train
jssimporter/python-jss
jss/distribution_point.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/distribution_point.py#L692-L713
def delete_with_casper_admin_save(self, pkg): """Delete a pkg from the distribution server. Args: pkg: Can be a jss.Package object, an int ID of a package, or a filename. """ # The POST needs the package ID. if pkg.__class__.__name__ == "Package": package_to_delete = pkg.id elif isinstance(pkg, int): package_to_delete = pkg elif isinstance(pkg, str): package_to_delete = self.connection["jss"].Package(pkg).id else: raise TypeError data_dict = {"username": self.connection["jss"].user, "password": self.connection["jss"].password, "deletedPackageID": package_to_delete} self.connection["jss"].session.post(url=self.connection["delete_url"], data=data_dict)
[ "def", "delete_with_casper_admin_save", "(", "self", ",", "pkg", ")", ":", "# The POST needs the package ID.", "if", "pkg", ".", "__class__", ".", "__name__", "==", "\"Package\"", ":", "package_to_delete", "=", "pkg", ".", "id", "elif", "isinstance", "(", "pkg", ...
Delete a pkg from the distribution server. Args: pkg: Can be a jss.Package object, an int ID of a package, or a filename.
[ "Delete", "a", "pkg", "from", "the", "distribution", "server", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/merge.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/merge.py#L67-L81
def run(self, *args): """Merge two identities. When <from_uuid> or <to_uuid> are empty the command does not have any effect. The same happens when both <from_uuid> and <to_uuid> are the same unique identity. """ params = self.parser.parse_args(args) from_uuid = params.from_uuid to_uuid = params.to_uuid code = self.merge(from_uuid, to_uuid) return code
[ "def", "run", "(", "self", ",", "*", "args", ")", ":", "params", "=", "self", ".", "parser", ".", "parse_args", "(", "args", ")", "from_uuid", "=", "params", ".", "from_uuid", "to_uuid", "=", "params", ".", "to_uuid", "code", "=", "self", ".", "merge...
Merge two identities. When <from_uuid> or <to_uuid> are empty the command does not have any effect. The same happens when both <from_uuid> and <to_uuid> are the same unique identity.
[ "Merge", "two", "identities", "." ]
python
train
i3visio/osrframework
osrframework/domainfy.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/domainfy.py#L424-L542
def main(params=None): """ Main function to launch phonefy. The function is created in this way so as to let other applications make use of the full configuration capabilities of the application. The parameters received are used as parsed by this modules `getParser()`. Args: ----- params: A list with the parameters as grabbed by the terminal. It is None when this is called by an entry_point. If it is called by osrf the data is already parsed. Results: -------- list: Returns a list with i3visio entities. """ if params == None: parser = getParser() args = parser.parse_args(params) else: args = params results = [] if not args.quiet: print(general.title(banner.text)) sayingHello = """ Domainfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018 This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For additional info, visit <{}>. """.format(general.LICENSE_URL) print(general.info(sayingHello)) if args.license: general.showLicense() else: # Processing the options returned to remove the "all" option tlds = [] if "all" in args.tlds: for typeTld in TLD.keys(): for tld in TLD[typeTld]: if tld not in args.exclude: tlds.append({ "tld" : tld, "type" : typeTld }) elif "none" in args.tlds: pass else: for typeTld in TLD.keys(): if typeTld in args.tlds: for tld in TLD[typeTld]: if tld not in args.exclude: tlds.append({ "tld" : tld, "type" : typeTld }) for new in args.user_defined: if new not in args.exclude: tlds.append({"tld": new, "type": "user_defined"}) if args.nicks: domains = createDomains(tlds, nicks=args.nicks) else: # nicks_file domains = createDomains(tlds, nicksFile=args.nicks_file) # Showing the execution time... if not args.quiet: startTime= dt.datetime.now() print("{}\tTrying to resolve {} domain(s)…\n".format(str(startTime), general.emphasis(str(len(domains))))) if len(domains) > 200: print(""" Note that a full '-t all' search may take around 3.5 mins. If that's too long for you, try narrowing the search using '-t cc' or similar arguments. Otherwise, just wait and keep calm! """) print(general.emphasis("\tPress <Ctrl + C> to stop...\n")) # Perform searches, using different Threads results = performSearch(domains, args.threads, args.whois) # Trying to store the information recovered if args.output_folder != None: if not os.path.exists(args.output_folder): os.makedirs(args.output_folder) # Grabbing the results fileHeader = os.path.join(args.output_folder, args.file_header) for ext in args.extension: # Generating output files general.exportUsufy(results, ext, fileHeader) # Showing the information gathered if requested if not args.quiet: now = dt.datetime.now() print("\n{}\tResults obtained:\n".format(str(now))) try: print(general.success(general.usufyToTextExport(results))) except: print(general.warning("\nSomething happened when exporting the results. The Json will be shown instead:\n")) print(general.warning(json.dumps(results, indent=2))) now = dt.datetime.now() print("\n" + str(now) + "\tYou can find all the information collected in the following files:") for ext in args.extension: # Showing the output files print("\t" + general.emphasis(fileHeader + "." + ext)) # Showing the execution time... if not args.quiet: # Showing the execution time... endTime= dt.datetime.now() print("\n{}\tFinishing execution...\n".format(endTime)) print("Total time used:\t" + general.emphasis(str(endTime-startTime))) print("Average seconds/query:\t" + general.emphasis(str((endTime-startTime).total_seconds()/len(domains))) +" seconds\n") # Urging users to place an issue on Github... print(banner.footer) if params: return results
[ "def", "main", "(", "params", "=", "None", ")", ":", "if", "params", "==", "None", ":", "parser", "=", "getParser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "params", ")", "else", ":", "args", "=", "params", "results", "=", "[", "]"...
Main function to launch phonefy. The function is created in this way so as to let other applications make use of the full configuration capabilities of the application. The parameters received are used as parsed by this modules `getParser()`. Args: ----- params: A list with the parameters as grabbed by the terminal. It is None when this is called by an entry_point. If it is called by osrf the data is already parsed. Results: -------- list: Returns a list with i3visio entities.
[ "Main", "function", "to", "launch", "phonefy", "." ]
python
train
trustar/trustar-python
trustar/models/page.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/models/page.py#L91-L114
def to_dict(self, remove_nones=False): """ Creates a dictionary representation of the page. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the page. """ items = [] # attempt to replace each item with its dictionary representation if possible for item in self.items: if hasattr(item, 'to_dict'): items.append(item.to_dict(remove_nones=remove_nones)) else: items.append(item) return { 'items': items, 'pageNumber': self.page_number, 'pageSize': self.page_size, 'totalElements': self.total_elements, 'hasNext': self.has_next }
[ "def", "to_dict", "(", "self", ",", "remove_nones", "=", "False", ")", ":", "items", "=", "[", "]", "# attempt to replace each item with its dictionary representation if possible", "for", "item", "in", "self", ".", "items", ":", "if", "hasattr", "(", "item", ",", ...
Creates a dictionary representation of the page. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the page.
[ "Creates", "a", "dictionary", "representation", "of", "the", "page", "." ]
python
train
cobrateam/splinter
splinter/driver/webdriver/__init__.py
https://github.com/cobrateam/splinter/blob/32f11ff7fd7841b123f157720dcc95740f156ca8/splinter/driver/webdriver/__init__.py#L760-L767
def drag_and_drop(self, droppable): """ Performs drag a element to another elmenet. Currently works only on Chrome driver. """ self.scroll_to() ActionChains(self.parent.driver).drag_and_drop(self._element, droppable._element).perform()
[ "def", "drag_and_drop", "(", "self", ",", "droppable", ")", ":", "self", ".", "scroll_to", "(", ")", "ActionChains", "(", "self", ".", "parent", ".", "driver", ")", ".", "drag_and_drop", "(", "self", ".", "_element", ",", "droppable", ".", "_element", ")...
Performs drag a element to another elmenet. Currently works only on Chrome driver.
[ "Performs", "drag", "a", "element", "to", "another", "elmenet", "." ]
python
train
mpetazzoni/tslib
tslib/__init__.py
https://github.com/mpetazzoni/tslib/blob/b1e15721312c82450f5a3a074d0952fbe85ff849/tslib/__init__.py#L78-L118
def parse_input(s): """Parse the given input and intelligently transform it into an absolute, non-naive, timezone-aware datetime object for the UTC timezone. The input can be specified as a millisecond-precision UTC timestamp (or delta against Epoch), with or without a terminating 'L'. Alternatively, the input can be specified as a human-readable delta string with unit-separated segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as long as the segments are in descending unit span order.""" if isinstance(s, six.integer_types): s = str(s) elif not isinstance(s, six.string_types): raise ValueError(s) original = s if s[-1:] == 'L': s = s[:-1] sign = {'-': -1, '=': 0, '+': 1}.get(s[0], None) if sign is not None: s = s[1:] ts = 0 for unit in _SORTED_UNITS: pos = s.find(unit[0]) if pos == 0: raise ValueError(original) elif pos > 0: # If we find a unit letter, we're dealing with an offset. Default # to positive offset if a sign wasn't specified. if sign is None: sign = 1 ts += int(s[:pos]) * __timedelta_millis(unit[1]) s = s[min(len(s), pos + 1):] if s: ts += int(s) return date_from_utc_ts(ts) if not sign else \ utc() + sign * delta(milliseconds=ts)
[ "def", "parse_input", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "six", ".", "integer_types", ")", ":", "s", "=", "str", "(", "s", ")", "elif", "not", "isinstance", "(", "s", ",", "six", ".", "string_types", ")", ":", "raise", "ValueErr...
Parse the given input and intelligently transform it into an absolute, non-naive, timezone-aware datetime object for the UTC timezone. The input can be specified as a millisecond-precision UTC timestamp (or delta against Epoch), with or without a terminating 'L'. Alternatively, the input can be specified as a human-readable delta string with unit-separated segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as long as the segments are in descending unit span order.
[ "Parse", "the", "given", "input", "and", "intelligently", "transform", "it", "into", "an", "absolute", "non", "-", "naive", "timezone", "-", "aware", "datetime", "object", "for", "the", "UTC", "timezone", "." ]
python
train
tcalmant/ipopo
pelix/framework.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L326-L344
def get_registered_services(self): # type: () -> List[ServiceReference] """ Returns this bundle's ServiceReference list for all services it has registered or an empty list The list is valid at the time of the call to this method, however, as the Framework is a very dynamic environment, services can be modified or unregistered at any time. :return: An array of ServiceReference objects :raise BundleException: If the bundle has been uninstalled """ if self._state == Bundle.UNINSTALLED: raise BundleException( "Can't call 'get_registered_services' on an " "uninstalled bundle" ) return self.__framework._registry.get_bundle_registered_services(self)
[ "def", "get_registered_services", "(", "self", ")", ":", "# type: () -> List[ServiceReference]", "if", "self", ".", "_state", "==", "Bundle", ".", "UNINSTALLED", ":", "raise", "BundleException", "(", "\"Can't call 'get_registered_services' on an \"", "\"uninstalled bundle\"",...
Returns this bundle's ServiceReference list for all services it has registered or an empty list The list is valid at the time of the call to this method, however, as the Framework is a very dynamic environment, services can be modified or unregistered at any time. :return: An array of ServiceReference objects :raise BundleException: If the bundle has been uninstalled
[ "Returns", "this", "bundle", "s", "ServiceReference", "list", "for", "all", "services", "it", "has", "registered", "or", "an", "empty", "list" ]
python
train
galactics/beyond
beyond/utils/matrix.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/utils/matrix.py#L38-L49
def rot3(theta): """ Args: theta (float): Angle in radians Return: Rotation matrix of angle theta around the Z-axis """ return np.array([ [np.cos(theta), np.sin(theta), 0], [-np.sin(theta), np.cos(theta), 0], [0, 0, 1] ])
[ "def", "rot3", "(", "theta", ")", ":", "return", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "np", ".", "sin", "(", "theta", ")", ",", "0", "]", ",", "[", "-", "np", ".", "sin", "(", "theta", ")", ",", "np...
Args: theta (float): Angle in radians Return: Rotation matrix of angle theta around the Z-axis
[ "Args", ":", "theta", "(", "float", ")", ":", "Angle", "in", "radians", "Return", ":", "Rotation", "matrix", "of", "angle", "theta", "around", "the", "Z", "-", "axis" ]
python
train
jkenlooper/chill
src/chill/database.py
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/database.py#L89-L95
def select_node(**kw): """ Select node by id. """ with current_app.app_context(): result = db.execute(text(fetch_query_string('select_node_from_id.sql')), **kw).fetchall() return result
[ "def", "select_node", "(", "*", "*", "kw", ")", ":", "with", "current_app", ".", "app_context", "(", ")", ":", "result", "=", "db", ".", "execute", "(", "text", "(", "fetch_query_string", "(", "'select_node_from_id.sql'", ")", ")", ",", "*", "*", "kw", ...
Select node by id.
[ "Select", "node", "by", "id", "." ]
python
train
revelc/pyaccumulo
pyaccumulo/proxy/AccumuloProxy.py
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L1503-L1510
def listIterators(self, login, tableName): """ Parameters: - login - tableName """ self.send_listIterators(login, tableName) return self.recv_listIterators()
[ "def", "listIterators", "(", "self", ",", "login", ",", "tableName", ")", ":", "self", ".", "send_listIterators", "(", "login", ",", "tableName", ")", "return", "self", ".", "recv_listIterators", "(", ")" ]
Parameters: - login - tableName
[ "Parameters", ":", "-", "login", "-", "tableName" ]
python
train
UCL-INGI/INGInious
inginious/frontend/pages/lti.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/lti.py#L199-L218
def _find_realname(self, post_input): """ Returns the most appropriate name to identify the user """ # First, try the full name if "lis_person_name_full" in post_input: return post_input["lis_person_name_full"] if "lis_person_name_given" in post_input and "lis_person_name_family" in post_input: return post_input["lis_person_name_given"] + post_input["lis_person_name_family"] # Then the email if "lis_person_contact_email_primary" in post_input: return post_input["lis_person_contact_email_primary"] # Then only part of the full name if "lis_person_name_family" in post_input: return post_input["lis_person_name_family"] if "lis_person_name_given" in post_input: return post_input["lis_person_name_given"] return post_input["user_id"]
[ "def", "_find_realname", "(", "self", ",", "post_input", ")", ":", "# First, try the full name", "if", "\"lis_person_name_full\"", "in", "post_input", ":", "return", "post_input", "[", "\"lis_person_name_full\"", "]", "if", "\"lis_person_name_given\"", "in", "post_input",...
Returns the most appropriate name to identify the user
[ "Returns", "the", "most", "appropriate", "name", "to", "identify", "the", "user" ]
python
train
mtien/PeptideBuilder
PeptideBuilder/PeptideBuilder.py
https://github.com/mtien/PeptideBuilder/blob/7b1ddab5199432c1aabc371a34ec42dd386dfa6f/PeptideBuilder/PeptideBuilder.py#L439-L477
def makeAsp(segID, N, CA, C, O, geo): '''Creates an Aspartic Acid residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_OD1_length=geo.CG_OD1_length CB_CG_OD1_angle=geo.CB_CG_OD1_angle CA_CB_CG_OD1_diangle=geo.CA_CB_CG_OD1_diangle CG_OD2_length=geo.CG_OD2_length CB_CG_OD2_angle=geo.CB_CG_OD2_angle CA_CB_CG_OD2_diangle=geo.CA_CB_CG_OD2_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") oxygen_d1= calculateCoordinates(CA, CB, CG, CG_OD1_length, CB_CG_OD1_angle, CA_CB_CG_OD1_diangle) OD1= Atom("OD1", oxygen_d1, 0.0, 1.0, " ", " OD1", 0, "O") oxygen_d2= calculateCoordinates(CA, CB, CG, CG_OD2_length, CB_CG_OD2_angle, CA_CB_CG_OD2_diangle) OD2= Atom("OD2", oxygen_d2, 0.0, 1.0, " ", " OD2", 0, "O") ##Create Residue Data Structure res= Residue((' ', segID, ' '), "ASP", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(OD1) res.add(OD2) return res
[ "def", "makeAsp", "(", "segID", ",", "N", ",", "CA", ",", "C", ",", "O", ",", "geo", ")", ":", "##R-Group", "CA_CB_length", "=", "geo", ".", "CA_CB_length", "C_CA_CB_angle", "=", "geo", ".", "C_CA_CB_angle", "N_C_CA_CB_diangle", "=", "geo", ".", "N_C_CA_...
Creates an Aspartic Acid residue
[ "Creates", "an", "Aspartic", "Acid", "residue" ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/state/client_handlers.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/state/client_handlers.py#L325-L337
def _validate_namespace(self, namespace): """Validates a namespace, raising a ResponseFailed error if invalid. Args: state_root (str): The state_root to validate Raises: ResponseFailed: The state_root was invalid, and a status of INVALID_ROOT will be sent with the response. """ if self._namespace_regex.fullmatch(namespace) is None: LOGGER.debug('Invalid namespace: %s', namespace) raise _ResponseFailed(self._status.INVALID_ADDRESS)
[ "def", "_validate_namespace", "(", "self", ",", "namespace", ")", ":", "if", "self", ".", "_namespace_regex", ".", "fullmatch", "(", "namespace", ")", "is", "None", ":", "LOGGER", ".", "debug", "(", "'Invalid namespace: %s'", ",", "namespace", ")", "raise", ...
Validates a namespace, raising a ResponseFailed error if invalid. Args: state_root (str): The state_root to validate Raises: ResponseFailed: The state_root was invalid, and a status of INVALID_ROOT will be sent with the response.
[ "Validates", "a", "namespace", "raising", "a", "ResponseFailed", "error", "if", "invalid", "." ]
python
train
SheffieldML/GPy
GPy/plotting/matplot_dep/maps.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/plotting/matplot_dep/maps.py#L128-L140
def plot_string_match(sf,regex,field,**kwargs): """ Plot the geometry of a shapefile whose fields match a regular expression given :param sf: shapefile :type sf: shapefile object :regex: regular expression to match :type regex: string :field: field number to be matched with the regex :type field: integer """ index,shape_records = string_match(sf,regex,field) plot(shape_records,**kwargs)
[ "def", "plot_string_match", "(", "sf", ",", "regex", ",", "field", ",", "*", "*", "kwargs", ")", ":", "index", ",", "shape_records", "=", "string_match", "(", "sf", ",", "regex", ",", "field", ")", "plot", "(", "shape_records", ",", "*", "*", "kwargs",...
Plot the geometry of a shapefile whose fields match a regular expression given :param sf: shapefile :type sf: shapefile object :regex: regular expression to match :type regex: string :field: field number to be matched with the regex :type field: integer
[ "Plot", "the", "geometry", "of", "a", "shapefile", "whose", "fields", "match", "a", "regular", "expression", "given" ]
python
train
digidotcom/python-devicecloud
devicecloud/devicecore.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/devicecore.py#L576-L588
def add_to_group(self, group_path): """Add a device to a group, if the group doesn't exist it is created :param group_path: Path or "name" of the group """ if self.get_group_path() != group_path: post_data = ADD_GROUP_TEMPLATE.format(connectware_id=self.get_connectware_id(), group_path=group_path) self._conn.put('/ws/DeviceCore', post_data) # Invalidate cache self._device_json = None
[ "def", "add_to_group", "(", "self", ",", "group_path", ")", ":", "if", "self", ".", "get_group_path", "(", ")", "!=", "group_path", ":", "post_data", "=", "ADD_GROUP_TEMPLATE", ".", "format", "(", "connectware_id", "=", "self", ".", "get_connectware_id", "(", ...
Add a device to a group, if the group doesn't exist it is created :param group_path: Path or "name" of the group
[ "Add", "a", "device", "to", "a", "group", "if", "the", "group", "doesn", "t", "exist", "it", "is", "created" ]
python
train
markchil/gptools
gptools/utils.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1520-L1570
def incomplete_bell_poly(n, k, x): r"""Recursive evaluation of the incomplete Bell polynomial :math:`B_{n, k}(x)`. Evaluates the incomplete Bell polynomial :math:`B_{n, k}(x_1, x_2, \dots, x_{n-k+1})`, also known as the partial Bell polynomial or the Bell polynomial of the second kind. This polynomial is useful in the evaluation of (the univariate) Faa di Bruno's formula which generalizes the chain rule to higher order derivatives. The implementation here is based on the implementation in: :py:func:`sympy.functions.combinatorial.numbers.bell._bell_incomplete_poly` Following that function's documentation, the polynomial is computed according to the recurrence formula: .. math:: B_{n, k}(x_1, x_2, \dots, x_{n-k+1}) = \sum_{m=1}^{n-k+1}x_m\binom{n-1}{m-1}B_{n-m, k-1}(x_1, x_2, \dots, x_{n-m-k}) | The end cases are: | :math:`B_{0, 0} = 1` | :math:`B_{n, 0} = 0` for :math:`n \ge 1` | :math:`B_{0, k} = 0` for :math:`k \ge 1` Parameters ---------- n : scalar int The first subscript of the polynomial. k : scalar int The second subscript of the polynomial. x : :py:class:`Array` of floats, (`p`, `n` - `k` + 1) `p` sets of `n` - `k` + 1 points to use as the arguments to :math:`B_{n,k}`. The second dimension can be longer than required, in which case the extra entries are silently ignored (this facilitates recursion without needing to subset the array `x`). Returns ------- result : :py:class:`Array`, (`p`,) Incomplete Bell polynomial evaluated at the desired values. """ if n == 0 and k == 0: return scipy.ones(x.shape[0], dtype=float) elif k == 0 and n >= 1: return scipy.zeros(x.shape[0], dtype=float) elif n == 0 and k >= 1: return scipy.zeros(x.shape[0], dtype=float) else: result = scipy.zeros(x.shape[0], dtype=float) for m in xrange(0, n - k + 1): result += x[:, m] * scipy.special.binom(n - 1, m) * incomplete_bell_poly(n - (m + 1), k - 1, x) return result
[ "def", "incomplete_bell_poly", "(", "n", ",", "k", ",", "x", ")", ":", "if", "n", "==", "0", "and", "k", "==", "0", ":", "return", "scipy", ".", "ones", "(", "x", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")", "elif", "k", "=="...
r"""Recursive evaluation of the incomplete Bell polynomial :math:`B_{n, k}(x)`. Evaluates the incomplete Bell polynomial :math:`B_{n, k}(x_1, x_2, \dots, x_{n-k+1})`, also known as the partial Bell polynomial or the Bell polynomial of the second kind. This polynomial is useful in the evaluation of (the univariate) Faa di Bruno's formula which generalizes the chain rule to higher order derivatives. The implementation here is based on the implementation in: :py:func:`sympy.functions.combinatorial.numbers.bell._bell_incomplete_poly` Following that function's documentation, the polynomial is computed according to the recurrence formula: .. math:: B_{n, k}(x_1, x_2, \dots, x_{n-k+1}) = \sum_{m=1}^{n-k+1}x_m\binom{n-1}{m-1}B_{n-m, k-1}(x_1, x_2, \dots, x_{n-m-k}) | The end cases are: | :math:`B_{0, 0} = 1` | :math:`B_{n, 0} = 0` for :math:`n \ge 1` | :math:`B_{0, k} = 0` for :math:`k \ge 1` Parameters ---------- n : scalar int The first subscript of the polynomial. k : scalar int The second subscript of the polynomial. x : :py:class:`Array` of floats, (`p`, `n` - `k` + 1) `p` sets of `n` - `k` + 1 points to use as the arguments to :math:`B_{n,k}`. The second dimension can be longer than required, in which case the extra entries are silently ignored (this facilitates recursion without needing to subset the array `x`). Returns ------- result : :py:class:`Array`, (`p`,) Incomplete Bell polynomial evaluated at the desired values.
[ "r", "Recursive", "evaluation", "of", "the", "incomplete", "Bell", "polynomial", ":", "math", ":", "B_", "{", "n", "k", "}", "(", "x", ")", ".", "Evaluates", "the", "incomplete", "Bell", "polynomial", ":", "math", ":", "B_", "{", "n", "k", "}", "(", ...
python
train
nitely/django-djconfig
setup.py
https://github.com/nitely/django-djconfig/blob/5e79a048ef5c9529075cad947b0c309115035d7e/setup.py#L14-L20
def get_version(package): """Get version without importing the lib""" with io.open(os.path.join(BASE_DIR, package, '__init__.py'), encoding='utf-8') as fh: return [ l.split('=', 1)[1].strip().strip("'").strip('"') for l in fh.readlines() if '__version__' in l][0]
[ "def", "get_version", "(", "package", ")", ":", "with", "io", ".", "open", "(", "os", ".", "path", ".", "join", "(", "BASE_DIR", ",", "package", ",", "'__init__.py'", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "return", "[", "l", "...
Get version without importing the lib
[ "Get", "version", "without", "importing", "the", "lib" ]
python
train
ioos/compliance-checker
compliance_checker/acdd.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/acdd.py#L397-L451
def _check_total_z_extents(self, ds, z_variable): ''' Check the entire array of Z for minimum and maximum and compare that to the vertical extents defined in the global attributes :param netCDF4.Dataset ds: An open netCDF dataset :param str z_variable: Name of the variable representing the Z-Axis ''' msgs = [] total = 2 try: vert_min = float(ds.geospatial_vertical_min) except ValueError: msgs.append('geospatial_vertical_min cannot be cast to float') try: vert_max = float(ds.geospatial_vertical_max) except ValueError: msgs.append('geospatial_vertical_max cannot be cast to float') if len(msgs) > 0: return Result(BaseCheck.MEDIUM, (0, total), 'geospatial_vertical_extents_match', msgs) zvalue = ds.variables[z_variable][:] # If the array has fill values, which is allowed in the case of point # features if hasattr(zvalue, 'mask'): zvalue = zvalue[~zvalue.mask] if zvalue.size == 0: msgs.append("Cannot compare geospatial vertical extents " "against min/max of data, as non-masked data " "length is zero") return Result(BaseCheck.MEDIUM, (0, total), 'geospatial_vertical_extents_match', msgs) else: zmin = zvalue.min() zmax = zvalue.max() if not np.isclose(vert_min, zmin): msgs.append("geospatial_vertical_min != min(%s) values, %s != %s" % ( z_variable, vert_min, zmin )) if not np.isclose(vert_max, zmax): msgs.append("geospatial_vertical_max != max(%s) values, %s != %s" % ( z_variable, vert_min, zmax )) return Result(BaseCheck.MEDIUM, (total - len(msgs), total), 'geospatial_vertical_extents_match', msgs)
[ "def", "_check_total_z_extents", "(", "self", ",", "ds", ",", "z_variable", ")", ":", "msgs", "=", "[", "]", "total", "=", "2", "try", ":", "vert_min", "=", "float", "(", "ds", ".", "geospatial_vertical_min", ")", "except", "ValueError", ":", "msgs", "."...
Check the entire array of Z for minimum and maximum and compare that to the vertical extents defined in the global attributes :param netCDF4.Dataset ds: An open netCDF dataset :param str z_variable: Name of the variable representing the Z-Axis
[ "Check", "the", "entire", "array", "of", "Z", "for", "minimum", "and", "maximum", "and", "compare", "that", "to", "the", "vertical", "extents", "defined", "in", "the", "global", "attributes" ]
python
train
bram85/topydo
topydo/lib/Config.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/Config.py#L209-L243
def colors(self, p_hint_possible=True): """ Returns 0, 16 or 256 representing the number of colors that should be used in the output. A hint can be passed whether the device that will output the text supports colors. """ lookup = { 'false': 0, 'no': 0, '0': 0, '1': 16, 'true': 16, 'yes': 16, '16': 16, '256': 256, } try: forced = self.cp.get('topydo', 'force_colors') == '1' except ValueError: forced = self.defaults['topydo']['force_colors'] == '1' try: colors = lookup[self.cp.get('topydo', 'colors').lower()] # pylint: disable=no-member except ValueError: colors = lookup[self.defaults['topydo']['colors'].lower()] # pylint: disable=no-member except KeyError: # for invalid values or 'auto' colors = 16 if p_hint_possible else 0 # disable colors when no colors are enforced on the commandline and # color support is determined automatically return 0 if not forced and not p_hint_possible else colors
[ "def", "colors", "(", "self", ",", "p_hint_possible", "=", "True", ")", ":", "lookup", "=", "{", "'false'", ":", "0", ",", "'no'", ":", "0", ",", "'0'", ":", "0", ",", "'1'", ":", "16", ",", "'true'", ":", "16", ",", "'yes'", ":", "16", ",", ...
Returns 0, 16 or 256 representing the number of colors that should be used in the output. A hint can be passed whether the device that will output the text supports colors.
[ "Returns", "0", "16", "or", "256", "representing", "the", "number", "of", "colors", "that", "should", "be", "used", "in", "the", "output", "." ]
python
train
shaunduncan/nosqlite
nosqlite.py
https://github.com/shaunduncan/nosqlite/blob/3033c029b7c8290c66a8b36dc512e560505d4c85/nosqlite.py#L171-L180
def _load(self, id, data): """ Loads a JSON document taking care to apply the document id """ if isinstance(data, bytes): # pragma: no cover Python >= 3.0 data = data.decode('utf-8') document = json.loads(data) document['_id'] = id return document
[ "def", "_load", "(", "self", ",", "id", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "# pragma: no cover Python >= 3.0", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "document", "=", "json", ".", "loads", ...
Loads a JSON document taking care to apply the document id
[ "Loads", "a", "JSON", "document", "taking", "care", "to", "apply", "the", "document", "id" ]
python
train
Celeo/Pycord
pycord/__init__.py
https://github.com/Celeo/Pycord/blob/15c38e39b508c89c35f7f6d7009fe8e9f161a94e/pycord/__init__.py#L424-L440
def disconnect_from_websocket(self): """Disconnects from the websocket Args: None """ self.logger.warning('Disconnecting from websocket') self.logger.info('Stopping keep alive thread') self._ws_keep_alive.stop() self._ws_keep_alive.join() self.logger.info('Stopped keep alive thread') try: self.logger.warning('Disconnecting from websocket') self._ws.close() self.logger.info('Closed websocket connection') except: self.logger.debug('Couldn\'t terminate previous websocket connection')
[ "def", "disconnect_from_websocket", "(", "self", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Disconnecting from websocket'", ")", "self", ".", "logger", ".", "info", "(", "'Stopping keep alive thread'", ")", "self", ".", "_ws_keep_alive", ".", "stop", ...
Disconnects from the websocket Args: None
[ "Disconnects", "from", "the", "websocket" ]
python
train
ndf-zz/asfv1
asfv1.py
https://github.com/ndf-zz/asfv1/blob/c18f940d7ee86b14e6b201e6d8a4b71e3a57c34a/asfv1.py#L803-L808
def parseerror(self, msg, line=None): """Emit parse error and abort assembly.""" if line is None: line = self.sline error('parse error: ' + msg + ' on line {}'.format(line)) sys.exit(-2)
[ "def", "parseerror", "(", "self", ",", "msg", ",", "line", "=", "None", ")", ":", "if", "line", "is", "None", ":", "line", "=", "self", ".", "sline", "error", "(", "'parse error: '", "+", "msg", "+", "' on line {}'", ".", "format", "(", "line", ")", ...
Emit parse error and abort assembly.
[ "Emit", "parse", "error", "and", "abort", "assembly", "." ]
python
train
lpantano/seqcluster
seqcluster/stats.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/stats.py#L12-L22
def stats(args): """Create stats from the analysis """ logger.info("Reading sequeces") data = parse_ma_file(args.ma) logger.info("Get sequences from sam") is_align = _read_sam(args.sam) is_json, is_db = _read_json(args.json) res = _summarise_sam(data, is_align, is_json, is_db) _write_suma(res, os.path.join(args.out, "stats_align.dat")) logger.info("Done")
[ "def", "stats", "(", "args", ")", ":", "logger", ".", "info", "(", "\"Reading sequeces\"", ")", "data", "=", "parse_ma_file", "(", "args", ".", "ma", ")", "logger", ".", "info", "(", "\"Get sequences from sam\"", ")", "is_align", "=", "_read_sam", "(", "ar...
Create stats from the analysis
[ "Create", "stats", "from", "the", "analysis" ]
python
train
bspaans/python-mingus
mingus/core/intervals.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L292-L302
def invert(interval): """Invert an interval. Example: >>> invert(['C', 'E']) ['E', 'C'] """ interval.reverse() res = list(interval) interval.reverse() return res
[ "def", "invert", "(", "interval", ")", ":", "interval", ".", "reverse", "(", ")", "res", "=", "list", "(", "interval", ")", "interval", ".", "reverse", "(", ")", "return", "res" ]
Invert an interval. Example: >>> invert(['C', 'E']) ['E', 'C']
[ "Invert", "an", "interval", "." ]
python
train
manolomartinez/greg
greg/classes.py
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/classes.py#L52-L58
def list_feeds(self): """ Output a list of all feed names """ feeds = configparser.ConfigParser() feeds.read(self.data_filename) return feeds.sections()
[ "def", "list_feeds", "(", "self", ")", ":", "feeds", "=", "configparser", ".", "ConfigParser", "(", ")", "feeds", ".", "read", "(", "self", ".", "data_filename", ")", "return", "feeds", ".", "sections", "(", ")" ]
Output a list of all feed names
[ "Output", "a", "list", "of", "all", "feed", "names" ]
python
train
wdecoster/nanoget
nanoget/extraction_functions.py
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L200-L223
def extract_from_bam(params): """Extracts metrics from bam. Worker function per chromosome loop over a bam file and create list with tuples containing metrics: -qualities -aligned qualities -lengths -aligned lengths -mapping qualities -edit distances to the reference genome scaled by read length """ bam, chromosome = params samfile = pysam.AlignmentFile(bam, "rb") return [ (read.query_name, nanomath.ave_qual(read.query_qualities), nanomath.ave_qual(read.query_alignment_qualities), read.query_length, read.query_alignment_length, read.mapping_quality, get_pID(read)) for read in samfile.fetch(reference=chromosome, multiple_iterators=True) if not read.is_secondary]
[ "def", "extract_from_bam", "(", "params", ")", ":", "bam", ",", "chromosome", "=", "params", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "return", "[", "(", "read", ".", "query_name", ",", "nanomath", ".", "ave_qual", "...
Extracts metrics from bam. Worker function per chromosome loop over a bam file and create list with tuples containing metrics: -qualities -aligned qualities -lengths -aligned lengths -mapping qualities -edit distances to the reference genome scaled by read length
[ "Extracts", "metrics", "from", "bam", "." ]
python
train
Erotemic/utool
utool/util_class.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_class.py#L705-L785
def reload_class(self, verbose=True, reload_module=True): """ special class reloading function This function is often injected as rrr of classes """ import utool as ut verbose = verbose or VERBOSE_CLASS classname = self.__class__.__name__ try: modname = self.__class__.__module__ if verbose: print('[class] reloading ' + classname + ' from ' + modname) # --HACK-- if hasattr(self, '_on_reload'): if verbose > 1: print('[class] calling _on_reload for ' + classname) self._on_reload() elif verbose > 1: print('[class] ' + classname + ' does not have an _on_reload function') # Do for all inheriting classes def find_base_clases(_class, find_base_clases=None): class_list = [] for _baseclass in _class.__bases__: parents = find_base_clases(_baseclass, find_base_clases) class_list.extend(parents) if _class is not object: class_list.append(_class) return class_list head_class = self.__class__ # Determine if parents need reloading class_list = find_base_clases(head_class, find_base_clases) # HACK ignore = {HashComparable2} class_list = [_class for _class in class_list if _class not in ignore] for _class in class_list: if verbose: print('[class] reloading parent ' + _class.__name__ + ' from ' + _class.__module__) if _class.__module__ == '__main__': # Attempt to find the module that is the main module # This may be very hacky and potentially break main_module_ = sys.modules[_class.__module__] main_modname = ut.get_modname_from_modpath(main_module_.__file__) module_ = sys.modules[main_modname] else: module_ = sys.modules[_class.__module__] if hasattr(module_, 'rrr'): if reload_module: module_.rrr(verbose=verbose) else: if reload_module: import imp if verbose: print('[class] reloading ' + _class.__module__ + ' with imp') try: imp.reload(module_) except (ImportError, AttributeError): print('[class] fallback reloading ' + _class.__module__ + ' with imp') # one last thing to try. probably used ut.import_module_from_fpath # when importing this module imp.load_source(module_.__name__, module_.__file__) # Reset class attributes _newclass = getattr(module_, _class.__name__) reload_class_methods(self, _newclass, verbose=verbose) # --HACK-- # TODO: handle injected definitions if hasattr(self, '_initialize_self'): if verbose > 1: print('[class] calling _initialize_self for ' + classname) self._initialize_self() elif verbose > 1: print('[class] ' + classname + ' does not have an _initialize_self function') except Exception as ex: ut.printex(ex, 'Error Reloading Class', keys=[ 'modname', 'module', 'class_', 'class_list', 'self', ]) raise
[ "def", "reload_class", "(", "self", ",", "verbose", "=", "True", ",", "reload_module", "=", "True", ")", ":", "import", "utool", "as", "ut", "verbose", "=", "verbose", "or", "VERBOSE_CLASS", "classname", "=", "self", ".", "__class__", ".", "__name__", "try...
special class reloading function This function is often injected as rrr of classes
[ "special", "class", "reloading", "function", "This", "function", "is", "often", "injected", "as", "rrr", "of", "classes" ]
python
train
pytroll/trollimage
trollimage/colormap.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/colormap.py#L120-L127
def set_range(self, min_val, max_val): """Set the range of the colormap to [*min_val*, *max_val*] """ if min_val > max_val: max_val, min_val = min_val, max_val self.values = (((self.values * 1.0 - self.values.min()) / (self.values.max() - self.values.min())) * (max_val - min_val) + min_val)
[ "def", "set_range", "(", "self", ",", "min_val", ",", "max_val", ")", ":", "if", "min_val", ">", "max_val", ":", "max_val", ",", "min_val", "=", "min_val", ",", "max_val", "self", ".", "values", "=", "(", "(", "(", "self", ".", "values", "*", "1.0", ...
Set the range of the colormap to [*min_val*, *max_val*]
[ "Set", "the", "range", "of", "the", "colormap", "to", "[", "*", "min_val", "*", "*", "max_val", "*", "]" ]
python
train
pyamg/pyamg
pyamg/relaxation/chebyshev.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/chebyshev.py#L10-L53
def chebyshev_polynomial_coefficients(a, b, degree): """Chebyshev polynomial coefficients for the interval [a,b]. Parameters ---------- a,b : float The left and right endpoints of the interval. degree : int Degree of desired Chebyshev polynomial Returns ------- Coefficients of the Chebyshev polynomial C(t) with minimum magnitude on the interval [a,b] such that C(0) = 1.0. The coefficients are returned in descending order. Notes ----- a,b typically represent the interval of the spectrum for some matrix that you wish to damp with a Chebyshev smoother. Examples -------- >>> from pyamg.relaxation.chebyshev import chebyshev_polynomial_coefficients >>> print chebyshev_polynomial_coefficients(1.0,2.0, 3) [-0.32323232 1.45454545 -2.12121212 1. ] """ if a >= b or a <= 0: raise ValueError('invalid interval [%s,%s]' % (a, b)) # Chebyshev roots for the interval [-1,1] std_roots = np.cos(np.pi * (np.arange(degree) + 0.5) / degree) # Chebyshev roots for the interval [a,b] scaled_roots = 0.5 * (b-a) * (1 + std_roots) + a # Compute monic polynomial coefficients of polynomial with scaled roots scaled_poly = np.poly(scaled_roots) # Scale coefficients to enforce C(0) = 1.0 scaled_poly /= np.polyval(scaled_poly, 0) return scaled_poly
[ "def", "chebyshev_polynomial_coefficients", "(", "a", ",", "b", ",", "degree", ")", ":", "if", "a", ">=", "b", "or", "a", "<=", "0", ":", "raise", "ValueError", "(", "'invalid interval [%s,%s]'", "%", "(", "a", ",", "b", ")", ")", "# Chebyshev roots for th...
Chebyshev polynomial coefficients for the interval [a,b]. Parameters ---------- a,b : float The left and right endpoints of the interval. degree : int Degree of desired Chebyshev polynomial Returns ------- Coefficients of the Chebyshev polynomial C(t) with minimum magnitude on the interval [a,b] such that C(0) = 1.0. The coefficients are returned in descending order. Notes ----- a,b typically represent the interval of the spectrum for some matrix that you wish to damp with a Chebyshev smoother. Examples -------- >>> from pyamg.relaxation.chebyshev import chebyshev_polynomial_coefficients >>> print chebyshev_polynomial_coefficients(1.0,2.0, 3) [-0.32323232 1.45454545 -2.12121212 1. ]
[ "Chebyshev", "polynomial", "coefficients", "for", "the", "interval", "[", "a", "b", "]", "." ]
python
train
projectshift/shift-boiler
boiler/cli/db.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L78-L90
def revision(revision, path, branch_label, splice, head, sql, autogenerate, message): """ Create new revision file """ alembic_command.revision( config=get_config(), rev_id=revision, version_path=path, branch_label=branch_label, splice=splice, head=head, sql=sql, autogenerate=autogenerate, message=message )
[ "def", "revision", "(", "revision", ",", "path", ",", "branch_label", ",", "splice", ",", "head", ",", "sql", ",", "autogenerate", ",", "message", ")", ":", "alembic_command", ".", "revision", "(", "config", "=", "get_config", "(", ")", ",", "rev_id", "=...
Create new revision file
[ "Create", "new", "revision", "file" ]
python
train
django-leonardo/django-leonardo
leonardo/conf/base.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/conf/base.py#L21-L24
def get_property(self, key): """Expect Django Conf property""" _key = DJANGO_CONF[key] return getattr(self, _key, CONF_SPEC[_key])
[ "def", "get_property", "(", "self", ",", "key", ")", ":", "_key", "=", "DJANGO_CONF", "[", "key", "]", "return", "getattr", "(", "self", ",", "_key", ",", "CONF_SPEC", "[", "_key", "]", ")" ]
Expect Django Conf property
[ "Expect", "Django", "Conf", "property" ]
python
train
juju/charm-helpers
charmhelpers/contrib/charmsupport/nrpe.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/charmsupport/nrpe.py#L341-L352
def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
[ "def", "get_nagios_unit_name", "(", "relation_name", "=", "'nrpe-external-master'", ")", ":", "host_context", "=", "get_nagios_hostcontext", "(", "relation_name", ")", "if", "host_context", ":", "unit", "=", "\"%s:%s\"", "%", "(", "host_context", ",", "local_unit", ...
Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to
[ "Return", "the", "nagios", "unit", "name", "prepended", "with", "host_context", "if", "needed" ]
python
train
lsst-epo/vela
astropixie/astropixie/data.py
https://github.com/lsst-epo/vela/blob/8e17ebec509be5c3cc2063f4645dfe9e26b49c18/astropixie/astropixie/data.py#L296-L323
def pprint(arr, columns=('temperature', 'luminosity'), names=('Temperature (Kelvin)', 'Luminosity (solar units)'), max_rows=32, precision=2): """ Create a pandas DataFrame from a numpy ndarray. By default use temp and lum with max rows of 32 and precision of 2. arr - An numpy.ndarray. columns - The columns to include in the pandas DataFrame. Defaults to temperature and luminosity. names - The column names for the pandas DataFrame. Defaults to Temperature and Luminosity. max_rows - If max_rows is an integer then set the pandas display.max_rows option to that value. If max_rows is True then set display.max_rows option to 1000. precision - An integer to set the pandas precision option. """ if max_rows is True: pd.set_option('display.max_rows', 1000) elif type(max_rows) is int: pd.set_option('display.max_rows', max_rows) pd.set_option('precision', precision) df = pd.DataFrame(arr.flatten(), index=arr['id'].flatten(), columns=columns) df.columns = names return df.style.format({names[0]: '{:.0f}', names[1]: '{:.2f}'})
[ "def", "pprint", "(", "arr", ",", "columns", "=", "(", "'temperature'", ",", "'luminosity'", ")", ",", "names", "=", "(", "'Temperature (Kelvin)'", ",", "'Luminosity (solar units)'", ")", ",", "max_rows", "=", "32", ",", "precision", "=", "2", ")", ":", "i...
Create a pandas DataFrame from a numpy ndarray. By default use temp and lum with max rows of 32 and precision of 2. arr - An numpy.ndarray. columns - The columns to include in the pandas DataFrame. Defaults to temperature and luminosity. names - The column names for the pandas DataFrame. Defaults to Temperature and Luminosity. max_rows - If max_rows is an integer then set the pandas display.max_rows option to that value. If max_rows is True then set display.max_rows option to 1000. precision - An integer to set the pandas precision option.
[ "Create", "a", "pandas", "DataFrame", "from", "a", "numpy", "ndarray", "." ]
python
valid
wonambi-python/wonambi
wonambi/attr/annotations.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/annotations.py#L248-L255
def save(self): """Save xml to file.""" if self.rater is not None: self.rater.set('modified', datetime.now().isoformat()) xml = parseString(tostring(self.root)) with open(self.xml_file, 'w') as f: f.write(xml.toxml())
[ "def", "save", "(", "self", ")", ":", "if", "self", ".", "rater", "is", "not", "None", ":", "self", ".", "rater", ".", "set", "(", "'modified'", ",", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", ")", "xml", "=", "parseString", "(...
Save xml to file.
[ "Save", "xml", "to", "file", "." ]
python
train
openstack/pyghmi
pyghmi/ipmi/command.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L489-L502
def get_power(self): """Get current power state of the managed system The response, if successful, should contain 'powerstate' key and either 'on' or 'off' to indicate current state. :returns: dict -- {'powerstate': value} """ response = self.raw_command(netfn=0, command=1) if 'error' in response: raise exc.IpmiException(response['error']) assert (response['command'] == 1 and response['netfn'] == 1) powerstate = 'on' if (response['data'][0] & 1) else 'off' return {'powerstate': powerstate}
[ "def", "get_power", "(", "self", ")", ":", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "0", ",", "command", "=", "1", ")", "if", "'error'", "in", "response", ":", "raise", "exc", ".", "IpmiException", "(", "response", "[", "'error'"...
Get current power state of the managed system The response, if successful, should contain 'powerstate' key and either 'on' or 'off' to indicate current state. :returns: dict -- {'powerstate': value}
[ "Get", "current", "power", "state", "of", "the", "managed", "system" ]
python
train
reingart/gui2py
gui/controls/listview.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listview.py#L58-L62
def GetPyData(self, item): "Returns the pyth item data associated with the item" wx_data = self.GetItemData(item) py_data = self._py_data_map.get(wx_data) return py_data
[ "def", "GetPyData", "(", "self", ",", "item", ")", ":", "wx_data", "=", "self", ".", "GetItemData", "(", "item", ")", "py_data", "=", "self", ".", "_py_data_map", ".", "get", "(", "wx_data", ")", "return", "py_data" ]
Returns the pyth item data associated with the item
[ "Returns", "the", "pyth", "item", "data", "associated", "with", "the", "item" ]
python
test
nwhitehead/pyfluidsynth
fluidsynth.py
https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L486-L503
def router_begin(self, type): """types are [note|cc|prog|pbend|cpress|kpress]""" if self.router is not None: if type=='note': self.router.cmd_rule_type=0 elif type=='cc': self.router.cmd_rule_type=1 elif type=='prog': self.router.cmd_rule_type=2 elif type=='pbend': self.router.cmd_rule_type=3 elif type=='cpress': self.router.cmd_rule_type=4 elif type=='kpress': self.router.cmd_rule_type=5 if 'self.router.cmd_rule' in globals(): delete_fluid_midi_router_rule(self.router.cmd_rule) self.router.cmd_rule = new_fluid_midi_router_rule()
[ "def", "router_begin", "(", "self", ",", "type", ")", ":", "if", "self", ".", "router", "is", "not", "None", ":", "if", "type", "==", "'note'", ":", "self", ".", "router", ".", "cmd_rule_type", "=", "0", "elif", "type", "==", "'cc'", ":", "self", "...
types are [note|cc|prog|pbend|cpress|kpress]
[ "types", "are", "[", "note|cc|prog|pbend|cpress|kpress", "]" ]
python
train
briancappello/flask-unchained
flask_unchained/bundles/security/views/security_controller.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/views/security_controller.py#L93-L105
def register(self): """ View function to register user. Supports html and json requests. """ form = self._get_form('SECURITY_REGISTER_FORM') if form.validate_on_submit(): user = self.security_service.user_manager.create(**form.to_dict()) self.security_service.register_user(user) return self.redirect('SECURITY_POST_REGISTER_REDIRECT_ENDPOINT') return self.render('register', register_user_form=form, **self.security.run_ctx_processor('register'))
[ "def", "register", "(", "self", ")", ":", "form", "=", "self", ".", "_get_form", "(", "'SECURITY_REGISTER_FORM'", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "user", "=", "self", ".", "security_service", ".", "user_manager", ".", "create", ...
View function to register user. Supports html and json requests.
[ "View", "function", "to", "register", "user", ".", "Supports", "html", "and", "json", "requests", "." ]
python
train
todddeluca/python-vagrant
vagrant/__init__.py
https://github.com/todddeluca/python-vagrant/blob/83b26f9337b1f2cb6314210923bbd189e7c9199e/vagrant/__init__.py#L735-L744
def snapshot_list(self): ''' This command will list all the snapshots taken. ''' NO_SNAPSHOTS_TAKEN = 'No snapshots have been taken yet!' output = self._run_vagrant_command(['snapshot', 'list']) if NO_SNAPSHOTS_TAKEN in output: return [] else: return output.splitlines()
[ "def", "snapshot_list", "(", "self", ")", ":", "NO_SNAPSHOTS_TAKEN", "=", "'No snapshots have been taken yet!'", "output", "=", "self", ".", "_run_vagrant_command", "(", "[", "'snapshot'", ",", "'list'", "]", ")", "if", "NO_SNAPSHOTS_TAKEN", "in", "output", ":", "...
This command will list all the snapshots taken.
[ "This", "command", "will", "list", "all", "the", "snapshots", "taken", "." ]
python
train
darothen/xbpch
xbpch/util/gridspec.py
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/util/gridspec.py#L477-L519
def prof_altitude(pressure, p_coef=(-0.028389, -0.0493698, 0.485718, 0.278656, -17.5703, 48.0926)): """ Return altitude for given pressure. This function evaluates a polynomial at log10(pressure) values. Parameters ---------- pressure : array-like pressure values [hPa]. p_coef : array-like coefficients of the polynomial (default values are for the US Standard Atmosphere). Returns ------- altitude : array-like altitude values [km] (same shape than the pressure input array). See Also -------- prof_pressure : Returns pressure for given altitude. prof_temperature : Returns air temperature for given altitude. Notes ----- Default coefficient values represent a 5th degree polynomial which had been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for 0-100 km and 0.5% below 30 km. This function, with default values, may thus produce bad results with pressure less than about 3e-4 hPa. Examples -------- >>> prof_altitude([1000, 800, 600]) array([ 0.1065092 , 1.95627858, 4.2060627 ]) """ pressure = np.asarray(pressure) altitude = np.polyval(p_coef, np.log10(pressure.flatten())) return altitude.reshape(pressure.shape)
[ "def", "prof_altitude", "(", "pressure", ",", "p_coef", "=", "(", "-", "0.028389", ",", "-", "0.0493698", ",", "0.485718", ",", "0.278656", ",", "-", "17.5703", ",", "48.0926", ")", ")", ":", "pressure", "=", "np", ".", "asarray", "(", "pressure", ")",...
Return altitude for given pressure. This function evaluates a polynomial at log10(pressure) values. Parameters ---------- pressure : array-like pressure values [hPa]. p_coef : array-like coefficients of the polynomial (default values are for the US Standard Atmosphere). Returns ------- altitude : array-like altitude values [km] (same shape than the pressure input array). See Also -------- prof_pressure : Returns pressure for given altitude. prof_temperature : Returns air temperature for given altitude. Notes ----- Default coefficient values represent a 5th degree polynomial which had been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for 0-100 km and 0.5% below 30 km. This function, with default values, may thus produce bad results with pressure less than about 3e-4 hPa. Examples -------- >>> prof_altitude([1000, 800, 600]) array([ 0.1065092 , 1.95627858, 4.2060627 ])
[ "Return", "altitude", "for", "given", "pressure", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASU/save_tdx.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASU/save_tdx.py#L3566-L3710
def QA_SU_save_option_day(client=DATABASE, ui_log=None, ui_progress=None): ''' :param client: :return: ''' option_contract_list = QA_fetch_get_option_50etf_contract_time_to_market() coll_option_day = client.option_day coll_option_day.create_index( [("code", pymongo.ASCENDING), ("date_stamp", pymongo.ASCENDING)] ) err = [] # 索引 code def __saving_work(code, coll_option_day): try: QA_util_log_info( '##JOB12 Now Saving OPTION_DAY==== {}'.format(str(code)), ui_log=ui_log ) # 首选查找数据库 是否 有 这个代码的数据 # 期权代码 从 10000001 开始编码 10001228 ref = coll_option_day.find({'code': str(code)[0:8]}) end_date = str(now_time())[0:10] # 当前数据库已经包含了这个代码的数据, 继续增量更新 # 加入这个判断的原因是因为如果是刚上市的 数据库会没有数据 所以会有负索引问题出现 if ref.count() > 0: # 接着上次获取的日期继续更新 start_date = ref[ref.count() - 1]['date'] QA_util_log_info( ' 上次获取期权日线数据的最后日期是 {}'.format(start_date), ui_log=ui_log ) QA_util_log_info( 'UPDATE_OPTION_DAY \n 从上一次下载数据开始继续 Trying update {} from {} to {}' .format(code, start_date, end_date), ui_log=ui_log ) if start_date != end_date: start_date0 = QA_util_get_next_day(start_date) df0 = QA_fetch_get_option_day( code=code, start_date=start_date0, end_date=end_date, frequence='day', ip=None, port=None ) retCount = df0.iloc[:, 0].size QA_util_log_info( "日期从开始{}-结束{} , 合约代码{} , 返回了{}条记录 , 准备写入数据库".format( start_date0, end_date, code, retCount ), ui_log=ui_log ) coll_option_day.insert_many( QA_util_to_json_from_pandas(df0) ) else: QA_util_log_info( "^已经获取过这天的数据了^ {}".format(start_date), ui_log=ui_log ) else: start_date = '1990-01-01' QA_util_log_info( 'UPDATE_OPTION_DAY \n 从新开始下载数据 Trying update {} from {} to {}' .format(code, start_date, end_date), ui_log=ui_log ) if start_date != end_date: df0 = QA_fetch_get_option_day( code=code, start_date=start_date, end_date=end_date, frequence='day', ip=None, port=None ) retCount = df0.iloc[:, 0].size QA_util_log_info( "日期从开始{}-结束{} , 合约代码{} , 获取了{}条记录 , 准备写入数据库^_^ ".format( start_date, end_date, code, retCount ), ui_log=ui_log ) coll_option_day.insert_many( QA_util_to_json_from_pandas(df0) ) else: QA_util_log_info( "*已经获取过这天的数据了* {}".format(start_date), ui_log=ui_log ) except Exception as error0: print(error0) err.append(str(code)) for item in range(len(option_contract_list)): QA_util_log_info( 'The {} of Total {}'.format(item, len(option_contract_list)), ui_log=ui_log ) strLogProgress = 'DOWNLOAD PROGRESS {} '.format( str(float(item / len(option_contract_list) * 100))[0:4] + '%' ) intLogProgress = int(float(item / len(option_contract_list) * 10000.0)) QA_util_log_info( strLogProgress, ui_log=ui_log, ui_progress=ui_progress, ui_progress_int_value=intLogProgress ) __saving_work(option_contract_list[item].code, coll_option_day) if len(err) < 1: QA_util_log_info('SUCCESS save option day ^_^ ', ui_log=ui_log) else: QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log) QA_util_log_info(err, ui_log=ui_log)
[ "def", "QA_SU_save_option_day", "(", "client", "=", "DATABASE", ",", "ui_log", "=", "None", ",", "ui_progress", "=", "None", ")", ":", "option_contract_list", "=", "QA_fetch_get_option_50etf_contract_time_to_market", "(", ")", "coll_option_day", "=", "client", ".", ...
:param client: :return:
[ ":", "param", "client", ":", ":", "return", ":" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/connector_ca/apis/developer_certificate_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/connector_ca/apis/developer_certificate_api.py#L139-L160
def get_developer_certificate(self, developer_certificate_id, authorization, **kwargs): # noqa: E501 """Fetch an existing developer certificate to connect to the bootstrap server. # noqa: E501 This REST API is intended to be used by customers to fetch an existing developer certificate (a certificate that can be flashed into multiple devices to connect to bootstrap server). **Example usage:** curl -X GET \"http://api.us-east-1.mbedcloud.com/v3/developer-certificates/THE_CERTIFICATE_ID\" -H \"accept: application/json\" -H \"Authorization: Bearer THE_ACCESS_TOKEN\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_developer_certificate(developer_certificate_id, authorization, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str developer_certificate_id: A unique identifier for the developer certificate. (required) :param str authorization: Bearer {Access Token}. (required) :return: DeveloperCertificateResponseData If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_developer_certificate_with_http_info(developer_certificate_id, authorization, **kwargs) # noqa: E501 else: (data) = self.get_developer_certificate_with_http_info(developer_certificate_id, authorization, **kwargs) # noqa: E501 return data
[ "def", "get_developer_certificate", "(", "self", ",", "developer_certificate_id", ",", "authorization", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'...
Fetch an existing developer certificate to connect to the bootstrap server. # noqa: E501 This REST API is intended to be used by customers to fetch an existing developer certificate (a certificate that can be flashed into multiple devices to connect to bootstrap server). **Example usage:** curl -X GET \"http://api.us-east-1.mbedcloud.com/v3/developer-certificates/THE_CERTIFICATE_ID\" -H \"accept: application/json\" -H \"Authorization: Bearer THE_ACCESS_TOKEN\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_developer_certificate(developer_certificate_id, authorization, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str developer_certificate_id: A unique identifier for the developer certificate. (required) :param str authorization: Bearer {Access Token}. (required) :return: DeveloperCertificateResponseData If the method is called asynchronously, returns the request thread.
[ "Fetch", "an", "existing", "developer", "certificate", "to", "connect", "to", "the", "bootstrap", "server", ".", "#", "noqa", ":", "E501" ]
python
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py#L59-L62
def visitNodeConstraintDatatype(self, ctx: ShExDocParser.NodeConstraintDatatypeContext): """ nodeConstraint: datatype xsFacet* # nodeConstraintDatatype """ self.nodeconstraint.datatype = self.context.iri_to_iriref(ctx.datatype().iri()) self.visitChildren(ctx)
[ "def", "visitNodeConstraintDatatype", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "NodeConstraintDatatypeContext", ")", ":", "self", ".", "nodeconstraint", ".", "datatype", "=", "self", ".", "context", ".", "iri_to_iriref", "(", "ctx", ".", "datatype", "...
nodeConstraint: datatype xsFacet* # nodeConstraintDatatype
[ "nodeConstraint", ":", "datatype", "xsFacet", "*", "#", "nodeConstraintDatatype" ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/load.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/load.py#L567-L576
def _merge(self, from_uid, to_uid, verbose): """Merge unique identity uid on match""" if verbose: self.display('match.tmpl', uid=from_uid, match=to_uid) api.merge_unique_identities(self.db, from_uid.uuid, to_uid.uuid) if verbose: self.display('merge.tmpl', from_uuid=from_uid.uuid, to_uuid=to_uid.uuid)
[ "def", "_merge", "(", "self", ",", "from_uid", ",", "to_uid", ",", "verbose", ")", ":", "if", "verbose", ":", "self", ".", "display", "(", "'match.tmpl'", ",", "uid", "=", "from_uid", ",", "match", "=", "to_uid", ")", "api", ".", "merge_unique_identities...
Merge unique identity uid on match
[ "Merge", "unique", "identity", "uid", "on", "match" ]
python
train
madeindjs/Super-Markdown
SuperMarkdown/SuperMarkdown.py
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L58-L64
def export(self): """return the object in a file""" with open(self.export_url, 'w', encoding='utf-8') as file: file.write(self.build()) if self.open_browser: webbrowser.open_new_tab(self.export_url)
[ "def", "export", "(", "self", ")", ":", "with", "open", "(", "self", ".", "export_url", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "file", ".", "write", "(", "self", ".", "build", "(", ")", ")", "if", "self", ".", "open_...
return the object in a file
[ "return", "the", "object", "in", "a", "file" ]
python
train
saltstack/salt
salt/states/junos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/junos.py#L214-L243
def cli(name, format='text', **kwargs): ''' Executes the CLI commands and reuturns the text output. .. code-block:: yaml show version: junos: - cli - format: xml Parameters: Required * command: The command that need to be executed on Junos CLI. (default = None) Optional * format: Format in which to get the CLI output. (text or xml, \ default = 'text') * kwargs: Keyworded arguments which can be provided like- * timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * dest: The destination file where the CLI output can be stored.\ (default = None) ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} ret['changes'] = __salt__['junos.cli'](name, format, **kwargs) return ret
[ "def", "cli", "(", "name", ",", "format", "=", "'text'", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "ret", "[", "'c...
Executes the CLI commands and reuturns the text output. .. code-block:: yaml show version: junos: - cli - format: xml Parameters: Required * command: The command that need to be executed on Junos CLI. (default = None) Optional * format: Format in which to get the CLI output. (text or xml, \ default = 'text') * kwargs: Keyworded arguments which can be provided like- * timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * dest: The destination file where the CLI output can be stored.\ (default = None)
[ "Executes", "the", "CLI", "commands", "and", "reuturns", "the", "text", "output", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/utils.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/utils.py#L495-L528
def abi_splitext(filename): """ Split the ABINIT extension from a filename. "Extension" are found by searching in an internal database. Returns "(root, ext)" where ext is the registered ABINIT extension The final ".nc" is included (if any) >>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK') >>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc') """ filename = os.path.basename(filename) is_ncfile = False if filename.endswith(".nc"): is_ncfile = True filename = filename[:-3] known_extensions = abi_extensions() # This algorith fails if we have two files # e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE for i in range(len(filename)-1, -1, -1): ext = filename[i:] if ext in known_extensions: break else: raise ValueError("Cannot find a registered extension in %s" % filename) root = filename[:i] if is_ncfile: ext += ".nc" return root, ext
[ "def", "abi_splitext", "(", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "is_ncfile", "=", "False", "if", "filename", ".", "endswith", "(", "\".nc\"", ")", ":", "is_ncfile", "=", "True", "filename", "="...
Split the ABINIT extension from a filename. "Extension" are found by searching in an internal database. Returns "(root, ext)" where ext is the registered ABINIT extension The final ".nc" is included (if any) >>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK') >>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
[ "Split", "the", "ABINIT", "extension", "from", "a", "filename", ".", "Extension", "are", "found", "by", "searching", "in", "an", "internal", "database", "." ]
python
train
evansherlock/nytimesarticle
nytimesarticle.py
https://github.com/evansherlock/nytimesarticle/blob/89f551699ffb11f71b47271246d350a1043e9326/nytimesarticle.py#L46-L55
def _bool_encode(self, d): """ Converts bool values to lowercase strings """ for k, v in d.items(): if isinstance(v, bool): d[k] = str(v).lower() return d
[ "def", "_bool_encode", "(", "self", ",", "d", ")", ":", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "bool", ")", ":", "d", "[", "k", "]", "=", "str", "(", "v", ")", ".", "lower", "(", "...
Converts bool values to lowercase strings
[ "Converts", "bool", "values", "to", "lowercase", "strings" ]
python
train
sixty-north/asq
asq/queryables.py
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L739-L777
def skip(self, count=1): '''Skip the first count contiguous elements of the source sequence. If the source sequence contains fewer than count elements returns an empty sequence and does not raise an exception. Note: This method uses deferred execution. Args: count: The number of elements to skip from the beginning of the sequence. If omitted defaults to one. If count is less than one the result sequence will be empty. Returns: A Queryable over the elements of source excluding the first count elements. Raises: ValueError: If the Queryable is closed(). ''' if self.closed(): raise ValueError("Attempt to call skip() on a closed Queryable.") count = max(0, count) if count == 0: return self # Try an optimised version if hasattr(self._iterable, "__getitem__"): try: stop = len(self._iterable) return self._create(self._generate_optimized_skip_result(count, stop)) except TypeError: pass # Fall back to the unoptimized version return self._create(self._generate_skip_result(count))
[ "def", "skip", "(", "self", ",", "count", "=", "1", ")", ":", "if", "self", ".", "closed", "(", ")", ":", "raise", "ValueError", "(", "\"Attempt to call skip() on a closed Queryable.\"", ")", "count", "=", "max", "(", "0", ",", "count", ")", "if", "count...
Skip the first count contiguous elements of the source sequence. If the source sequence contains fewer than count elements returns an empty sequence and does not raise an exception. Note: This method uses deferred execution. Args: count: The number of elements to skip from the beginning of the sequence. If omitted defaults to one. If count is less than one the result sequence will be empty. Returns: A Queryable over the elements of source excluding the first count elements. Raises: ValueError: If the Queryable is closed().
[ "Skip", "the", "first", "count", "contiguous", "elements", "of", "the", "source", "sequence", "." ]
python
train
Pylons/plaster
src/plaster/loaders.py
https://github.com/Pylons/plaster/blob/e70e55c182a8300d7ccf67e54d47740c72e72cd8/src/plaster/loaders.py#L84-L117
def get_loader(config_uri, protocols=None): """ Find a :class:`plaster.ILoader` object capable of handling ``config_uri``. :param config_uri: Anything that can be parsed by :func:`plaster.parse_uri`. :param protocols: Zero or more :term:`loader protocol` identifiers that the loader must implement to match the desired ``config_uri``. :returns: A :class:`plaster.ILoader` object. :raises plaster.LoaderNotFound: If no loader could be found. :raises plaster.MultipleLoadersFound: If multiple loaders match the requested criteria. If this happens, you can disambiguate the lookup by appending the package name to the scheme for the loader you wish to use. For example if ``ini`` is ambiguous then specify ``ini+myapp`` to use the ini loader from the ``myapp`` package. """ config_uri = parse_uri(config_uri) requested_scheme = config_uri.scheme matched_loaders = find_loaders(requested_scheme, protocols=protocols) if len(matched_loaders) < 1: raise LoaderNotFound(requested_scheme, protocols=protocols) if len(matched_loaders) > 1: raise MultipleLoadersFound( requested_scheme, matched_loaders, protocols=protocols) loader_info = matched_loaders[0] loader = loader_info.load(config_uri) return loader
[ "def", "get_loader", "(", "config_uri", ",", "protocols", "=", "None", ")", ":", "config_uri", "=", "parse_uri", "(", "config_uri", ")", "requested_scheme", "=", "config_uri", ".", "scheme", "matched_loaders", "=", "find_loaders", "(", "requested_scheme", ",", "...
Find a :class:`plaster.ILoader` object capable of handling ``config_uri``. :param config_uri: Anything that can be parsed by :func:`plaster.parse_uri`. :param protocols: Zero or more :term:`loader protocol` identifiers that the loader must implement to match the desired ``config_uri``. :returns: A :class:`plaster.ILoader` object. :raises plaster.LoaderNotFound: If no loader could be found. :raises plaster.MultipleLoadersFound: If multiple loaders match the requested criteria. If this happens, you can disambiguate the lookup by appending the package name to the scheme for the loader you wish to use. For example if ``ini`` is ambiguous then specify ``ini+myapp`` to use the ini loader from the ``myapp`` package.
[ "Find", "a", ":", "class", ":", "plaster", ".", "ILoader", "object", "capable", "of", "handling", "config_uri", "." ]
python
train
hvac/hvac
hvac/api/secrets_engines/identity.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/identity.py#L466-L485
def delete_entity_alias(self, alias_id, mount_point=DEFAULT_MOUNT_POINT): """Delete a entity alias. Supported methods: DELETE: /{mount_point}/entity-alias/id/{alias_id}. Produces: 204 (empty body) :param alias_id: Identifier of the entity. :type alias_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ api_path = '/v1/{mount_point}/entity-alias/id/{id}'.format( mount_point=mount_point, id=alias_id, ) return self._adapter.delete( url=api_path, )
[ "def", "delete_entity_alias", "(", "self", ",", "alias_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/entity-alias/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "alias_id", ",",...
Delete a entity alias. Supported methods: DELETE: /{mount_point}/entity-alias/id/{alias_id}. Produces: 204 (empty body) :param alias_id: Identifier of the entity. :type alias_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Delete", "a", "entity", "alias", "." ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/table.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/table.py#L67-L80
def delete_column(self,column=None,table=None,verbose=None): """ Remove a column from a table, specified by its name. Returns the name of the column removed. :param column (string, optional): Specifies the name of a column in the tab le :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d. """ PARAMS=set_param(['column','table'],[column,table]) response=api(url=self.__url+"/delete column", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "delete_column", "(", "self", ",", "column", "=", "None", ",", "table", "=", "None", ",", "verbose", "=", "None", ")", ":", "PARAMS", "=", "set_param", "(", "[", "'column'", ",", "'table'", "]", ",", "[", "column", ",", "table", "]", ")", "r...
Remove a column from a table, specified by its name. Returns the name of the column removed. :param column (string, optional): Specifies the name of a column in the tab le :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d.
[ "Remove", "a", "column", "from", "a", "table", "specified", "by", "its", "name", ".", "Returns", "the", "name", "of", "the", "column", "removed", "." ]
python
train
saltstack/salt
salt/modules/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L4544-L4623
def stash(cwd, action='save', opts='', git_opts='', user=None, password=None, ignore_retcode=False, output_encoding=None): ''' Interface to `git-stash(1)`_, returns the stdout from the git command cwd The path to the git checkout opts Any additional options to add to the command line, in a single string. Use this to complete the ``git stash`` command by adding the remaining arguments (i.e. ``'save <stash comment>'``, ``'apply stash@{2}'``, ``'show'``, etc.). Omitting this argument will simply run ``git stash``. git_opts Any additional options to add to git command itself (not the ``stash`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-stash(1)`: http://git-scm.com/docs/git-stash CLI Examples: .. code-block:: bash salt myminion git.stash /path/to/repo save opts='work in progress' salt myminion git.stash /path/to/repo apply opts='stash@{1}' salt myminion git.stash /path/to/repo drop opts='stash@{1}' salt myminion git.stash /path/to/repo list ''' cwd = _expand_path(cwd, user) command = ['git'] + _format_git_opts(git_opts) command.extend(['stash', action]) command.extend(_format_opts(opts)) return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding)['stdout']
[ "def", "stash", "(", "cwd", ",", "action", "=", "'save'", ",", "opts", "=", "''", ",", "git_opts", "=", "''", ",", "user", "=", "None", ",", "password", "=", "None", ",", "ignore_retcode", "=", "False", ",", "output_encoding", "=", "None", ")", ":", ...
Interface to `git-stash(1)`_, returns the stdout from the git command cwd The path to the git checkout opts Any additional options to add to the command line, in a single string. Use this to complete the ``git stash`` command by adding the remaining arguments (i.e. ``'save <stash comment>'``, ``'apply stash@{2}'``, ``'show'``, etc.). Omitting this argument will simply run ``git stash``. git_opts Any additional options to add to git command itself (not the ``stash`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-stash(1)`: http://git-scm.com/docs/git-stash CLI Examples: .. code-block:: bash salt myminion git.stash /path/to/repo save opts='work in progress' salt myminion git.stash /path/to/repo apply opts='stash@{1}' salt myminion git.stash /path/to/repo drop opts='stash@{1}' salt myminion git.stash /path/to/repo list
[ "Interface", "to", "git", "-", "stash", "(", "1", ")", "_", "returns", "the", "stdout", "from", "the", "git", "command" ]
python
train
Ex-Mente/auxi.0
auxi/modelling/process/materials/slurry.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/slurry.py#L230-L256
def add_assay(self, name, solid_density, H2O_fraction, assay): """Add an assay to the material. :param name: The name of the new assay. :param assay: A numpy array containing the size class mass fractions for the assay. The sequence of the assay's elements must correspond to the sequence of the material's size classes. """ if not type(solid_density) is float: raise Exception("Invalid solid density. It must be a float.") self.solid_densities[name] = solid_density if not type(H2O_fraction) is float: raise Exception("Invalid H2O fraction. It must be a float.") self.H2O_fractions[name] = H2O_fraction if not type(assay) is numpy.ndarray: raise Exception("Invalid assay. It must be a numpy array.") elif not assay.shape == (self.size_class_count,): raise Exception( "Invalid assay: It must have the same number of elements as " "the material has size classes.") elif name in self.assays.keys(): raise Exception( "Invalid assay: An assay with that name already exists.") self.assays[name] = assay
[ "def", "add_assay", "(", "self", ",", "name", ",", "solid_density", ",", "H2O_fraction", ",", "assay", ")", ":", "if", "not", "type", "(", "solid_density", ")", "is", "float", ":", "raise", "Exception", "(", "\"Invalid solid density. It must be a float.\"", ")",...
Add an assay to the material. :param name: The name of the new assay. :param assay: A numpy array containing the size class mass fractions for the assay. The sequence of the assay's elements must correspond to the sequence of the material's size classes.
[ "Add", "an", "assay", "to", "the", "material", "." ]
python
valid
ManiacalLabs/BiblioPixel
bibliopixel/builder/saved_description.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/builder/saved_description.py#L25-L28
def save(self, project_file=''): """Save the description as a YML file. Prompt if no file given.""" self._request_project_file(project_file) data_file.dump(self.desc.as_dict(), self.project_file)
[ "def", "save", "(", "self", ",", "project_file", "=", "''", ")", ":", "self", ".", "_request_project_file", "(", "project_file", ")", "data_file", ".", "dump", "(", "self", ".", "desc", ".", "as_dict", "(", ")", ",", "self", ".", "project_file", ")" ]
Save the description as a YML file. Prompt if no file given.
[ "Save", "the", "description", "as", "a", "YML", "file", ".", "Prompt", "if", "no", "file", "given", "." ]
python
valid
inasafe/inasafe
safe/gui/tools/options_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/options_dialog.py#L925-L940
def show_welcome_dialog(self): """Setup for showing welcome message dialog. This method will setup several things: - Only show welcome, organisation profile, and population parameter tab. Currently, they are the first 3 tabs. - Set the title - Move the check box for always showing welcome message. """ self.welcome_layout.addWidget(self.welcome_message_check_box) while self.tabWidget.count() > 3: self.tabWidget.removeTab(self.tabWidget.count() - 1) self.setWindowTitle(self.tr('Welcome to InaSAFE %s' % get_version())) # Hide the export import button self.export_button.hide() self.import_button.hide()
[ "def", "show_welcome_dialog", "(", "self", ")", ":", "self", ".", "welcome_layout", ".", "addWidget", "(", "self", ".", "welcome_message_check_box", ")", "while", "self", ".", "tabWidget", ".", "count", "(", ")", ">", "3", ":", "self", ".", "tabWidget", "....
Setup for showing welcome message dialog. This method will setup several things: - Only show welcome, organisation profile, and population parameter tab. Currently, they are the first 3 tabs. - Set the title - Move the check box for always showing welcome message.
[ "Setup", "for", "showing", "welcome", "message", "dialog", "." ]
python
train
citronneur/rdpy
rdpy/ui/view.py
https://github.com/citronneur/rdpy/blob/4109b7a6fe2abf3ddbaed54e29d2f31e63ed97f6/rdpy/ui/view.py#L137-L159
def update(self, render, force = False): """ Draw GUI that list active session """ if not force and not self._needUpdate: return self._needUpdate = False i = 0 drawArea = QtGui.QImage(self._width, self._height, render.getImageFormat()) #fill with background Color drawArea.fill(self._backgroudColor) with QtGui.QPainter(drawArea) as qp: for label in self._labels: rect = QtCore.QRect(0, i * self._cellHeight, self._width - 2, self._cellHeight) if i == self._current: qp.setPen(QtCore.Qt.darkGreen) qp.drawRoundedRect(rect, 5.0, 5.0) qp.setPen(QtCore.Qt.white) qp.setFont(QtGui.QFont('arial', self._fontSize, QtGui.QFont.Bold)) qp.drawText(rect, QtCore.Qt.AlignCenter, label) i += 1 render.drawImage(drawArea)
[ "def", "update", "(", "self", ",", "render", ",", "force", "=", "False", ")", ":", "if", "not", "force", "and", "not", "self", ".", "_needUpdate", ":", "return", "self", ".", "_needUpdate", "=", "False", "i", "=", "0", "drawArea", "=", "QtGui", ".", ...
Draw GUI that list active session
[ "Draw", "GUI", "that", "list", "active", "session" ]
python
train
LogicalDash/LiSE
allegedb/allegedb/graph.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/graph.py#L1350-L1373
def add_edge(self, u, v, attr_dict=None, **attr): """Version of add_edge that only writes to the database once""" if attr_dict is None: attr_dict = attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError( "The attr_dict argument must be a dictionary." ) if u not in self.node: self.node[u] = {} if v not in self.node: self.node[v] = {} if u in self.adj: datadict = self.adj[u].get(v, {}) else: self.adj[u] = {v: {}} datadict = self.adj[u][v] datadict.update(attr_dict) self.succ[u][v] = datadict assert u in self.succ, "Failed to add edge {u}->{v} ({u} not in successors)".format(u=u, v=v) assert v in self.succ[u], "Failed to add edge {u}->{v} ({v} not in succ[{u}])".format(u=u, v=v)
[ "def", "add_edge", "(", "self", ",", "u", ",", "v", ",", "attr_dict", "=", "None", ",", "*", "*", "attr", ")", ":", "if", "attr_dict", "is", "None", ":", "attr_dict", "=", "attr", "else", ":", "try", ":", "attr_dict", ".", "update", "(", "attr", ...
Version of add_edge that only writes to the database once
[ "Version", "of", "add_edge", "that", "only", "writes", "to", "the", "database", "once" ]
python
train