code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def copy(self, object_version=None, key=None):
"""Copy a tag to a given object version.
:param object_version: The object version instance to copy the tag to.
Default: current object version.
:param key: Key of destination tag.
Default: current tag key.
:return: The copied object version tag.
"""
return ObjectVersionTag.create(
self.object_version if object_version is None else object_version,
key or self.key,
self.value
) | Copy a tag to a given object version.
:param object_version: The object version instance to copy the tag to.
Default: current object version.
:param key: Key of destination tag.
Default: current tag key.
:return: The copied object version tag. | Below is the the instruction that describes the task:
### Input:
Copy a tag to a given object version.
:param object_version: The object version instance to copy the tag to.
Default: current object version.
:param key: Key of destination tag.
Default: current tag key.
:return: The copied object version tag.
### Response:
def copy(self, object_version=None, key=None):
"""Copy a tag to a given object version.
:param object_version: The object version instance to copy the tag to.
Default: current object version.
:param key: Key of destination tag.
Default: current tag key.
:return: The copied object version tag.
"""
return ObjectVersionTag.create(
self.object_version if object_version is None else object_version,
key or self.key,
self.value
) |
def snapshot(gta, plotter, key, do_weighted=True, make_plots=True):
"""Take a snapshot of the ROI
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
plotter : `fermipy.plotting.AnalysisPlotter`
The object that makes the plots
key : str
Key for this snapshot, used to create filenames
do_weighted : bool
If True, include weighted version of outputs
make_plots : bool
If True, make plots
"""
gta.write_roi(key, save_model_map=True, make_plots=make_plots, save_weight_map=do_weighted)
if make_plots:
o = gta.residmap(key)
plotter.make_residmap_plots(o, gta.roi)
if do_weighted:
gta.make_plots("%s_wt"%key, weighted=True)
o = gta.residmap("%s_wt"%key, use_weights=True)
plotter.make_residmap_plots(o, gta.roi) | Take a snapshot of the ROI
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
plotter : `fermipy.plotting.AnalysisPlotter`
The object that makes the plots
key : str
Key for this snapshot, used to create filenames
do_weighted : bool
If True, include weighted version of outputs
make_plots : bool
If True, make plots | Below is the the instruction that describes the task:
### Input:
Take a snapshot of the ROI
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
plotter : `fermipy.plotting.AnalysisPlotter`
The object that makes the plots
key : str
Key for this snapshot, used to create filenames
do_weighted : bool
If True, include weighted version of outputs
make_plots : bool
If True, make plots
### Response:
def snapshot(gta, plotter, key, do_weighted=True, make_plots=True):
"""Take a snapshot of the ROI
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
plotter : `fermipy.plotting.AnalysisPlotter`
The object that makes the plots
key : str
Key for this snapshot, used to create filenames
do_weighted : bool
If True, include weighted version of outputs
make_plots : bool
If True, make plots
"""
gta.write_roi(key, save_model_map=True, make_plots=make_plots, save_weight_map=do_weighted)
if make_plots:
o = gta.residmap(key)
plotter.make_residmap_plots(o, gta.roi)
if do_weighted:
gta.make_plots("%s_wt"%key, weighted=True)
o = gta.residmap("%s_wt"%key, use_weights=True)
plotter.make_residmap_plots(o, gta.roi) |
def run(self):
"""Run command."""
onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py"))
returncode = subprocess.call([sys.executable, onnx_script])
sys.exit(returncode) | Run command. | Below is the the instruction that describes the task:
### Input:
Run command.
### Response:
def run(self):
"""Run command."""
onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py"))
returncode = subprocess.call([sys.executable, onnx_script])
sys.exit(returncode) |
def walk_interface(self, name, data, in_location):
interface = Interface()
location = "{}.{}".format(in_location, name)
if isinstance(data, dict):
interface.name = de_identifier(self.require_field(location, "name", data, "", str))
location = "{}.{}".format(in_location, clean_identifier(interface.name))
interface.returns = de_identifier(self.require_field(location, "returns", data, "", str))
interface.description = self.recommend_field(location, "description", data, "", str)
# Implementations
if "production" in data:
interface.production = self.walk_implementation("production", data["production"], location)
else:
self.not_found_error("{}.{}".format(location, "production"))
if "test" in data:
interface.test = self.walk_implementation("test", data["test"], location)
else:
interface.test = None
# Arguments
interface.args = list(self.walk_list("{}.args".format(location), "args", data, self.walk_interface_arg))
interface.cache = self.typecheck_field(location, "cache", data, [], list)
else:
self.type_error(location, dict, type(data))
return interface
'''
self.require_field("url", "{}.url".format(location), data, str)
self.require_field("verb", "{}.verb".format(location), data, set(("get", "post", "delete", "put")))
self.recommend_field("format", "{}.format".format(location), data, set(("json", "xml", "html", "csv", "text")), not_found="Assuming json.")
self.require_field("output", "{}.output".format(location), data, str)
self.recommend_field(
"description",
"{}.description".format(name),
data, str, not_found="There will be no documentation for {}!".format(name))
self.typecheck_field("comment", "{}.comment".format(location), data, str)
if "inputs" in data:
self.walk_list("{}.inputs".format(location), "inputs", data, self.walk_input)
# Ensure that every url has the requsite paths!
if "url" in data:
url_input_names = set(map(str, re.findall("<(.*?)>", data["url"])))
given_input_names = set([input['path'] for input in data["inputs"].values() if 'path' in input])
if not url_input_names.issubset(given_input_names):
self.error("Expected full list of url parameters {} for {}, given only {}.".format(list(url_input_names), location, list(given_input_names)))
else:
self.type_error(location, dict, type(data))
return interface''' | self.require_field("url", "{}.url".format(location), data, str)
self.require_field("verb", "{}.verb".format(location), data, set(("get", "post", "delete", "put")))
self.recommend_field("format", "{}.format".format(location), data, set(("json", "xml", "html", "csv", "text")), not_found="Assuming json.")
self.require_field("output", "{}.output".format(location), data, str)
self.recommend_field(
"description",
"{}.description".format(name),
data, str, not_found="There will be no documentation for {}!".format(name))
self.typecheck_field("comment", "{}.comment".format(location), data, str)
if "inputs" in data:
self.walk_list("{}.inputs".format(location), "inputs", data, self.walk_input)
# Ensure that every url has the requsite paths!
if "url" in data:
url_input_names = set(map(str, re.findall("<(.*?)>", data["url"])))
given_input_names = set([input['path'] for input in data["inputs"].values() if 'path' in input])
if not url_input_names.issubset(given_input_names):
self.error("Expected full list of url parameters {} for {}, given only {}.".format(list(url_input_names), location, list(given_input_names)))
else:
self.type_error(location, dict, type(data))
return interface | Below is the the instruction that describes the task:
### Input:
self.require_field("url", "{}.url".format(location), data, str)
self.require_field("verb", "{}.verb".format(location), data, set(("get", "post", "delete", "put")))
self.recommend_field("format", "{}.format".format(location), data, set(("json", "xml", "html", "csv", "text")), not_found="Assuming json.")
self.require_field("output", "{}.output".format(location), data, str)
self.recommend_field(
"description",
"{}.description".format(name),
data, str, not_found="There will be no documentation for {}!".format(name))
self.typecheck_field("comment", "{}.comment".format(location), data, str)
if "inputs" in data:
self.walk_list("{}.inputs".format(location), "inputs", data, self.walk_input)
# Ensure that every url has the requsite paths!
if "url" in data:
url_input_names = set(map(str, re.findall("<(.*?)>", data["url"])))
given_input_names = set([input['path'] for input in data["inputs"].values() if 'path' in input])
if not url_input_names.issubset(given_input_names):
self.error("Expected full list of url parameters {} for {}, given only {}.".format(list(url_input_names), location, list(given_input_names)))
else:
self.type_error(location, dict, type(data))
return interface
### Response:
def walk_interface(self, name, data, in_location):
interface = Interface()
location = "{}.{}".format(in_location, name)
if isinstance(data, dict):
interface.name = de_identifier(self.require_field(location, "name", data, "", str))
location = "{}.{}".format(in_location, clean_identifier(interface.name))
interface.returns = de_identifier(self.require_field(location, "returns", data, "", str))
interface.description = self.recommend_field(location, "description", data, "", str)
# Implementations
if "production" in data:
interface.production = self.walk_implementation("production", data["production"], location)
else:
self.not_found_error("{}.{}".format(location, "production"))
if "test" in data:
interface.test = self.walk_implementation("test", data["test"], location)
else:
interface.test = None
# Arguments
interface.args = list(self.walk_list("{}.args".format(location), "args", data, self.walk_interface_arg))
interface.cache = self.typecheck_field(location, "cache", data, [], list)
else:
self.type_error(location, dict, type(data))
return interface
'''
self.require_field("url", "{}.url".format(location), data, str)
self.require_field("verb", "{}.verb".format(location), data, set(("get", "post", "delete", "put")))
self.recommend_field("format", "{}.format".format(location), data, set(("json", "xml", "html", "csv", "text")), not_found="Assuming json.")
self.require_field("output", "{}.output".format(location), data, str)
self.recommend_field(
"description",
"{}.description".format(name),
data, str, not_found="There will be no documentation for {}!".format(name))
self.typecheck_field("comment", "{}.comment".format(location), data, str)
if "inputs" in data:
self.walk_list("{}.inputs".format(location), "inputs", data, self.walk_input)
# Ensure that every url has the requsite paths!
if "url" in data:
url_input_names = set(map(str, re.findall("<(.*?)>", data["url"])))
given_input_names = set([input['path'] for input in data["inputs"].values() if 'path' in input])
if not url_input_names.issubset(given_input_names):
self.error("Expected full list of url parameters {} for {}, given only {}.".format(list(url_input_names), location, list(given_input_names)))
else:
self.type_error(location, dict, type(data))
return interface''' |
def is_token_from_emulator(auth_header: str) -> bool:
""" Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false.
"""
# The Auth Header generally looks like this:
# "Bearer eyJ0e[...Big Long String...]XAiO"
if not auth_header:
# No token. Can't be an emulator token.
return False
parts = auth_header.split(' ')
if len(parts) != 2:
# Emulator tokens MUST have exactly 2 parts.
# If we don't have 2 parts, it's not an emulator token
return False
auth_scheme = parts[0]
bearer_token = parts[1]
# We now have an array that should be:
# [0] = "Bearer"
# [1] = "[Big Long String]"
if auth_scheme != 'Bearer':
# The scheme from the emulator MUST be "Bearer"
return False
# Parse the Big Long String into an actual token.
token = jwt.decode(bearer_token, verify=False)
if not token:
return False
# Is there an Issuer?
issuer = token['iss']
if not issuer:
# No Issuer, means it's not from the Emulator.
return False
# Is the token issues by a source we consider to be the emulator?
issuer_list = EmulatorValidation.TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.issuer
if issuer_list and not issuer in issuer_list:
# Not a Valid Issuer. This is NOT a Bot Framework Emulator Token.
return False
# The Token is from the Bot Framework Emulator. Success!
return True | Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false. | Below is the the instruction that describes the task:
### Input:
Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false.
### Response:
def is_token_from_emulator(auth_header: str) -> bool:
""" Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false.
"""
# The Auth Header generally looks like this:
# "Bearer eyJ0e[...Big Long String...]XAiO"
if not auth_header:
# No token. Can't be an emulator token.
return False
parts = auth_header.split(' ')
if len(parts) != 2:
# Emulator tokens MUST have exactly 2 parts.
# If we don't have 2 parts, it's not an emulator token
return False
auth_scheme = parts[0]
bearer_token = parts[1]
# We now have an array that should be:
# [0] = "Bearer"
# [1] = "[Big Long String]"
if auth_scheme != 'Bearer':
# The scheme from the emulator MUST be "Bearer"
return False
# Parse the Big Long String into an actual token.
token = jwt.decode(bearer_token, verify=False)
if not token:
return False
# Is there an Issuer?
issuer = token['iss']
if not issuer:
# No Issuer, means it's not from the Emulator.
return False
# Is the token issues by a source we consider to be the emulator?
issuer_list = EmulatorValidation.TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.issuer
if issuer_list and not issuer in issuer_list:
# Not a Valid Issuer. This is NOT a Bot Framework Emulator Token.
return False
# The Token is from the Bot Framework Emulator. Success!
return True |
def _rewrite_ser_data(self, ser, series_data, date_1904):
"""
Rewrite the ``<c:tx>``, ``<c:xVal>`` and ``<c:yVal>`` child elements
of *ser* based on the values in *series_data*.
"""
ser._remove_tx()
ser._remove_xVal()
ser._remove_yVal()
xml_writer = _XySeriesXmlWriter(series_data)
ser._insert_tx(xml_writer.tx)
ser._insert_xVal(xml_writer.xVal)
ser._insert_yVal(xml_writer.yVal) | Rewrite the ``<c:tx>``, ``<c:xVal>`` and ``<c:yVal>`` child elements
of *ser* based on the values in *series_data*. | Below is the the instruction that describes the task:
### Input:
Rewrite the ``<c:tx>``, ``<c:xVal>`` and ``<c:yVal>`` child elements
of *ser* based on the values in *series_data*.
### Response:
def _rewrite_ser_data(self, ser, series_data, date_1904):
"""
Rewrite the ``<c:tx>``, ``<c:xVal>`` and ``<c:yVal>`` child elements
of *ser* based on the values in *series_data*.
"""
ser._remove_tx()
ser._remove_xVal()
ser._remove_yVal()
xml_writer = _XySeriesXmlWriter(series_data)
ser._insert_tx(xml_writer.tx)
ser._insert_xVal(xml_writer.xVal)
ser._insert_yVal(xml_writer.yVal) |
def from_hdf(cls, filename):
"""Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance
"""
import h5py
with h5py.File(filename, 'r') as f:
wc = f["wc"].value
lgamma = f["lgamma"].value
K = f["K"].value
readout = f["readout"].value
image_size = f["size"].value
fps = f["fps"].value
instance = cls(image_size, fps, readout, K, wc, lgamma)
return instance | Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance | Below is the the instruction that describes the task:
### Input:
Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance
### Response:
def from_hdf(cls, filename):
"""Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance
"""
import h5py
with h5py.File(filename, 'r') as f:
wc = f["wc"].value
lgamma = f["lgamma"].value
K = f["K"].value
readout = f["readout"].value
image_size = f["size"].value
fps = f["fps"].value
instance = cls(image_size, fps, readout, K, wc, lgamma)
return instance |
def connect(self, agent='Python'):
"""
Context manager for HTTP Connection state and ensures proper handling
of network sockets, sends a GET request.
Exception is raised at the yield statement.
:yield request: FileIO<Socket>
"""
headers = {'User-Agent': agent}
request = urlopen(Request(self.url, headers=headers))
try:
yield request
finally:
request.close() | Context manager for HTTP Connection state and ensures proper handling
of network sockets, sends a GET request.
Exception is raised at the yield statement.
:yield request: FileIO<Socket> | Below is the the instruction that describes the task:
### Input:
Context manager for HTTP Connection state and ensures proper handling
of network sockets, sends a GET request.
Exception is raised at the yield statement.
:yield request: FileIO<Socket>
### Response:
def connect(self, agent='Python'):
"""
Context manager for HTTP Connection state and ensures proper handling
of network sockets, sends a GET request.
Exception is raised at the yield statement.
:yield request: FileIO<Socket>
"""
headers = {'User-Agent': agent}
request = urlopen(Request(self.url, headers=headers))
try:
yield request
finally:
request.close() |
def _create_pipeline_parser():
""" Create the parser for the %pipeline magics.
Note that because we use the func default handler dispatch mechanism of
argparse, our handlers can take only one argument which is the parsed args. So
we must create closures for the handlers that bind the cell contents and thus
must recreate this parser for each cell upon execution.
"""
parser = google.datalab.utils.commands.CommandParser(
prog='%pipeline', description="""
Execute various pipeline-related operations. Use "%pipeline <command> -h"
for help on a specific command.
""")
# %%pipeline create
_add_command(parser, _create_create_subparser, _create_cell)
return parser | Create the parser for the %pipeline magics.
Note that because we use the func default handler dispatch mechanism of
argparse, our handlers can take only one argument which is the parsed args. So
we must create closures for the handlers that bind the cell contents and thus
must recreate this parser for each cell upon execution. | Below is the the instruction that describes the task:
### Input:
Create the parser for the %pipeline magics.
Note that because we use the func default handler dispatch mechanism of
argparse, our handlers can take only one argument which is the parsed args. So
we must create closures for the handlers that bind the cell contents and thus
must recreate this parser for each cell upon execution.
### Response:
def _create_pipeline_parser():
""" Create the parser for the %pipeline magics.
Note that because we use the func default handler dispatch mechanism of
argparse, our handlers can take only one argument which is the parsed args. So
we must create closures for the handlers that bind the cell contents and thus
must recreate this parser for each cell upon execution.
"""
parser = google.datalab.utils.commands.CommandParser(
prog='%pipeline', description="""
Execute various pipeline-related operations. Use "%pipeline <command> -h"
for help on a specific command.
""")
# %%pipeline create
_add_command(parser, _create_create_subparser, _create_cell)
return parser |
def done(message):
"""Create a Deleted response builder with specified message."""
def done(value, _context, **_params):
return Done(value, message)
return done | Create a Deleted response builder with specified message. | Below is the the instruction that describes the task:
### Input:
Create a Deleted response builder with specified message.
### Response:
def done(message):
"""Create a Deleted response builder with specified message."""
def done(value, _context, **_params):
return Done(value, message)
return done |
def complete(self, text, state):
'''
Alternate entry point for using the argcomplete completer in a readline-based REPL. See also
`rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.
Usage:
.. code-block:: python
import argcomplete, argparse, readline
parser = argparse.ArgumentParser()
...
completer = argcomplete.CompletionFinder(parser)
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
result = input("prompt> ")
(Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).
'''
if state == 0:
print("Retrieving matches for", text)
cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos = split_line(text)
print("Split line into prequote={}, prefix={}, suffix={}, words={}, fcp={}".format(cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos))
comp_words.insert(0, "prog")
self.matches = self._get_completions(comp_words, cword_prefix, cword_prequote, first_colon_pos)
print("Set matches to", self.matches)
if state < len(self.matches):
print("Returning", self.matches[state])
return self.matches[state]
else:
return None | Alternate entry point for using the argcomplete completer in a readline-based REPL. See also
`rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.
Usage:
.. code-block:: python
import argcomplete, argparse, readline
parser = argparse.ArgumentParser()
...
completer = argcomplete.CompletionFinder(parser)
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
result = input("prompt> ")
(Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_). | Below is the the instruction that describes the task:
### Input:
Alternate entry point for using the argcomplete completer in a readline-based REPL. See also
`rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.
Usage:
.. code-block:: python
import argcomplete, argparse, readline
parser = argparse.ArgumentParser()
...
completer = argcomplete.CompletionFinder(parser)
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
result = input("prompt> ")
(Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).
### Response:
def complete(self, text, state):
'''
Alternate entry point for using the argcomplete completer in a readline-based REPL. See also
`rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.
Usage:
.. code-block:: python
import argcomplete, argparse, readline
parser = argparse.ArgumentParser()
...
completer = argcomplete.CompletionFinder(parser)
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
result = input("prompt> ")
(Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).
'''
if state == 0:
print("Retrieving matches for", text)
cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos = split_line(text)
print("Split line into prequote={}, prefix={}, suffix={}, words={}, fcp={}".format(cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos))
comp_words.insert(0, "prog")
self.matches = self._get_completions(comp_words, cword_prefix, cword_prequote, first_colon_pos)
print("Set matches to", self.matches)
if state < len(self.matches):
print("Returning", self.matches[state])
return self.matches[state]
else:
return None |
def siteblock(parser, token):
"""Two notation types are acceptable:
1. Two arguments:
{% siteblock "myblock" %}
Used to render "myblock" site block.
2. Four arguments:
{% siteblock "myblock" as myvar %}
Used to put "myblock" site block into "myvar" template variable.
"""
tokens = token.split_contents()
tokens_num = len(tokens)
if tokens_num not in (2, 4):
raise template.TemplateSyntaxError(
'%r tag requires two or four arguments. '
'E.g.: {%% siteblock "myblock" %%} or {%% siteblock "myblock" as myvar %%}.' % tokens[0])
block_alias = parser.compile_filter(tokens[1])
as_var = None
tokens = tokens[2:]
if len(tokens) >= 2 and tokens[-2] == 'as':
as_var = tokens[-1]
return siteblockNode(block_alias, as_var) | Two notation types are acceptable:
1. Two arguments:
{% siteblock "myblock" %}
Used to render "myblock" site block.
2. Four arguments:
{% siteblock "myblock" as myvar %}
Used to put "myblock" site block into "myvar" template variable. | Below is the the instruction that describes the task:
### Input:
Two notation types are acceptable:
1. Two arguments:
{% siteblock "myblock" %}
Used to render "myblock" site block.
2. Four arguments:
{% siteblock "myblock" as myvar %}
Used to put "myblock" site block into "myvar" template variable.
### Response:
def siteblock(parser, token):
"""Two notation types are acceptable:
1. Two arguments:
{% siteblock "myblock" %}
Used to render "myblock" site block.
2. Four arguments:
{% siteblock "myblock" as myvar %}
Used to put "myblock" site block into "myvar" template variable.
"""
tokens = token.split_contents()
tokens_num = len(tokens)
if tokens_num not in (2, 4):
raise template.TemplateSyntaxError(
'%r tag requires two or four arguments. '
'E.g.: {%% siteblock "myblock" %%} or {%% siteblock "myblock" as myvar %%}.' % tokens[0])
block_alias = parser.compile_filter(tokens[1])
as_var = None
tokens = tokens[2:]
if len(tokens) >= 2 and tokens[-2] == 'as':
as_var = tokens[-1]
return siteblockNode(block_alias, as_var) |
def get(self, timeout=None):
"""Retrieve results from all the output tubes."""
valid = False
result = None
for tube in self._output_tubes:
if timeout:
valid, result = tube.get(timeout)
if valid:
result = result[0]
else:
result = tube.get()[0]
if timeout:
return valid, result
return result | Retrieve results from all the output tubes. | Below is the the instruction that describes the task:
### Input:
Retrieve results from all the output tubes.
### Response:
def get(self, timeout=None):
"""Retrieve results from all the output tubes."""
valid = False
result = None
for tube in self._output_tubes:
if timeout:
valid, result = tube.get(timeout)
if valid:
result = result[0]
else:
result = tube.get()[0]
if timeout:
return valid, result
return result |
def stop(self):
"""Close serial port."""
self.logger.warning("Stop executed")
try:
self._reader.close()
except serial.serialutil.SerialException:
self.logger.error("Error while closing device")
raise VelbusException("Error while closing device")
time.sleep(1) | Close serial port. | Below is the the instruction that describes the task:
### Input:
Close serial port.
### Response:
def stop(self):
"""Close serial port."""
self.logger.warning("Stop executed")
try:
self._reader.close()
except serial.serialutil.SerialException:
self.logger.error("Error while closing device")
raise VelbusException("Error while closing device")
time.sleep(1) |
def save_beat(
self,
output_file_name,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
'''
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
'''
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
frame_list = self.read_stream(left_chunk, right_chunk, volume)
wf = wave.open(output_file_name, 'wb')
wf.setparams((2, 2, sample_rate, 0, 'NONE', 'not compressed'))
wf.writeframes(b''.join(frame_list))
wf.close() | 引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void | Below is the the instruction that describes the task:
### Input:
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
### Response:
def save_beat(
self,
output_file_name,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
'''
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
'''
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
frame_list = self.read_stream(left_chunk, right_chunk, volume)
wf = wave.open(output_file_name, 'wb')
wf.setparams((2, 2, sample_rate, 0, 'NONE', 'not compressed'))
wf.writeframes(b''.join(frame_list))
wf.close() |
def writeExpression(self, rnaQuantificationId, quantfilename):
"""
Reads the quantification results file and adds entries to the
specified database.
"""
isNormalized = self._isNormalized
units = self._units
with open(quantfilename, "r") as quantFile:
quantificationReader = csv.reader(quantFile, delimiter=b"\t")
header = next(quantificationReader)
expressionLevelColNum = self.setColNum(
header, self._expressionLevelCol)
nameColNum = self.setColNum(header, self._nameCol)
countColNum = self.setColNum(header, self._countCol, -1)
confColLowNum = self.setColNum(header, self._confColLow, -1)
confColHiNum = self.setColNum(header, self._confColHi, -1)
expressionId = 0
for expression in quantificationReader:
expressionLevel = expression[expressionLevelColNum]
name = expression[nameColNum]
rawCount = 0.0
if countColNum != -1:
rawCount = expression[countColNum]
confidenceLow = 0.0
confidenceHi = 0.0
score = 0.0
if confColLowNum != -1 and confColHiNum != -1:
confidenceLow = float(expression[confColLowNum])
confidenceHi = float(expression[confColHiNum])
score = (confidenceLow + confidenceHi)/2
datafields = (expressionId, rnaQuantificationId, name,
expressionLevel, isNormalized, rawCount, score,
units, confidenceLow, confidenceHi)
self._db.addExpression(datafields)
expressionId += 1
self._db.batchAddExpression() | Reads the quantification results file and adds entries to the
specified database. | Below is the the instruction that describes the task:
### Input:
Reads the quantification results file and adds entries to the
specified database.
### Response:
def writeExpression(self, rnaQuantificationId, quantfilename):
"""
Reads the quantification results file and adds entries to the
specified database.
"""
isNormalized = self._isNormalized
units = self._units
with open(quantfilename, "r") as quantFile:
quantificationReader = csv.reader(quantFile, delimiter=b"\t")
header = next(quantificationReader)
expressionLevelColNum = self.setColNum(
header, self._expressionLevelCol)
nameColNum = self.setColNum(header, self._nameCol)
countColNum = self.setColNum(header, self._countCol, -1)
confColLowNum = self.setColNum(header, self._confColLow, -1)
confColHiNum = self.setColNum(header, self._confColHi, -1)
expressionId = 0
for expression in quantificationReader:
expressionLevel = expression[expressionLevelColNum]
name = expression[nameColNum]
rawCount = 0.0
if countColNum != -1:
rawCount = expression[countColNum]
confidenceLow = 0.0
confidenceHi = 0.0
score = 0.0
if confColLowNum != -1 and confColHiNum != -1:
confidenceLow = float(expression[confColLowNum])
confidenceHi = float(expression[confColHiNum])
score = (confidenceLow + confidenceHi)/2
datafields = (expressionId, rnaQuantificationId, name,
expressionLevel, isNormalized, rawCount, score,
units, confidenceLow, confidenceHi)
self._db.addExpression(datafields)
expressionId += 1
self._db.batchAddExpression() |
def choices(self):
"""Gets the experiment choices"""
if self._choices == None:
self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]
return self._choices | Gets the experiment choices | Below is the the instruction that describes the task:
### Input:
Gets the experiment choices
### Response:
def choices(self):
"""Gets the experiment choices"""
if self._choices == None:
self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]
return self._choices |
def discrete_binary_search(tab, lo, hi):
"""Binary search in a table
:param tab: boolean monotone table with tab[hi] = True
:param int lo:
:param int hi: with hi >= lo
:returns: first index i in [lo,hi] such that tab[i]
:complexity: `O(log(hi-lo))`
"""
while lo < hi:
mid = lo + (hi - lo) // 2
if tab[mid]:
hi = mid
else:
lo = mid + 1
return lo | Binary search in a table
:param tab: boolean monotone table with tab[hi] = True
:param int lo:
:param int hi: with hi >= lo
:returns: first index i in [lo,hi] such that tab[i]
:complexity: `O(log(hi-lo))` | Below is the the instruction that describes the task:
### Input:
Binary search in a table
:param tab: boolean monotone table with tab[hi] = True
:param int lo:
:param int hi: with hi >= lo
:returns: first index i in [lo,hi] such that tab[i]
:complexity: `O(log(hi-lo))`
### Response:
def discrete_binary_search(tab, lo, hi):
"""Binary search in a table
:param tab: boolean monotone table with tab[hi] = True
:param int lo:
:param int hi: with hi >= lo
:returns: first index i in [lo,hi] such that tab[i]
:complexity: `O(log(hi-lo))`
"""
while lo < hi:
mid = lo + (hi - lo) // 2
if tab[mid]:
hi = mid
else:
lo = mid + 1
return lo |
def build_rectangle_dict(self,
north,
west,
south,
east,
stroke_color='#FF0000',
stroke_opacity=.8,
stroke_weight=2,
fill_color='#FF0000',
fill_opacity=.3,
):
""" Set a dictionary with the javascript class Rectangle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
north (float): The north latitude bound
west (float): The west longitude bound
south (float): The south latitude bound
east (float): The east longitude bound
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the rectangle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the rectangle fill
"""
rectangle = {
'stroke_color': stroke_color,
'stroke_opacity': stroke_opacity,
'stroke_weight': stroke_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'bounds': {'north': north,
'west': west,
'south': south,
'east': east,
}
}
return rectangle | Set a dictionary with the javascript class Rectangle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
north (float): The north latitude bound
west (float): The west longitude bound
south (float): The south latitude bound
east (float): The east longitude bound
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the rectangle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the rectangle fill | Below is the the instruction that describes the task:
### Input:
Set a dictionary with the javascript class Rectangle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
north (float): The north latitude bound
west (float): The west longitude bound
south (float): The south latitude bound
east (float): The east longitude bound
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the rectangle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the rectangle fill
### Response:
def build_rectangle_dict(self,
north,
west,
south,
east,
stroke_color='#FF0000',
stroke_opacity=.8,
stroke_weight=2,
fill_color='#FF0000',
fill_opacity=.3,
):
""" Set a dictionary with the javascript class Rectangle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
north (float): The north latitude bound
west (float): The west longitude bound
south (float): The south latitude bound
east (float): The east longitude bound
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the rectangle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the rectangle fill
"""
rectangle = {
'stroke_color': stroke_color,
'stroke_opacity': stroke_opacity,
'stroke_weight': stroke_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'bounds': {'north': north,
'west': west,
'south': south,
'east': east,
}
}
return rectangle |
def notify(self, subsystem, recipient, subject, body_html, body_text):
"""You can send messages either to channels and private groups by using the following formats
#channel-name
@username-direct-message
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipient (`str`): Recipient
subject (`str`): Subject / title of the notification, not used for this notifier
body_html (`str)`: HTML formatted version of the message, not used for this notifier
body_text (`str`): Text formatted version of the message
Returns:
`None`
"""
if not re.match(self.validation, recipient, re.I):
raise ValueError('Invalid recipient provided')
if recipient.startswith('#'):
target_type = 'channel'
elif recipient.find('@') != -1:
target_type = 'user'
else:
self.log.error('Unknown contact type for Slack: {}'.format(recipient))
return
try:
self._send_message(
target_type=target_type,
target=recipient,
message=body_text,
title=subject
)
except SlackError as ex:
self.log.error('Failed sending message to {}: {}'.format(recipient, ex)) | You can send messages either to channels and private groups by using the following formats
#channel-name
@username-direct-message
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipient (`str`): Recipient
subject (`str`): Subject / title of the notification, not used for this notifier
body_html (`str)`: HTML formatted version of the message, not used for this notifier
body_text (`str`): Text formatted version of the message
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
You can send messages either to channels and private groups by using the following formats
#channel-name
@username-direct-message
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipient (`str`): Recipient
subject (`str`): Subject / title of the notification, not used for this notifier
body_html (`str)`: HTML formatted version of the message, not used for this notifier
body_text (`str`): Text formatted version of the message
Returns:
`None`
### Response:
def notify(self, subsystem, recipient, subject, body_html, body_text):
"""You can send messages either to channels and private groups by using the following formats
#channel-name
@username-direct-message
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipient (`str`): Recipient
subject (`str`): Subject / title of the notification, not used for this notifier
body_html (`str)`: HTML formatted version of the message, not used for this notifier
body_text (`str`): Text formatted version of the message
Returns:
`None`
"""
if not re.match(self.validation, recipient, re.I):
raise ValueError('Invalid recipient provided')
if recipient.startswith('#'):
target_type = 'channel'
elif recipient.find('@') != -1:
target_type = 'user'
else:
self.log.error('Unknown contact type for Slack: {}'.format(recipient))
return
try:
self._send_message(
target_type=target_type,
target=recipient,
message=body_text,
title=subject
)
except SlackError as ex:
self.log.error('Failed sending message to {}: {}'.format(recipient, ex)) |
def stop(self):
"""
Terminate any outstanding requests.
:returns: :class:``Deferred` which fires when fully stopped.
"""
self.stopping = True
# Cancel any outstanding request to our client
if self._batch_send_d:
self._batch_send_d.cancel()
# Do we have to worry about our looping call?
if self.batch_every_t is not None:
# Stop our looping call, and wait for the deferred to be called
if self._sendLooper is not None:
self._sendLooper.stop()
# Make sure requests that wasn't cancelled above are now
self._cancel_outstanding()
return self._sendLooperD or succeed(None) | Terminate any outstanding requests.
:returns: :class:``Deferred` which fires when fully stopped. | Below is the the instruction that describes the task:
### Input:
Terminate any outstanding requests.
:returns: :class:``Deferred` which fires when fully stopped.
### Response:
def stop(self):
"""
Terminate any outstanding requests.
:returns: :class:``Deferred` which fires when fully stopped.
"""
self.stopping = True
# Cancel any outstanding request to our client
if self._batch_send_d:
self._batch_send_d.cancel()
# Do we have to worry about our looping call?
if self.batch_every_t is not None:
# Stop our looping call, and wait for the deferred to be called
if self._sendLooper is not None:
self._sendLooper.stop()
# Make sure requests that wasn't cancelled above are now
self._cancel_outstanding()
return self._sendLooperD or succeed(None) |
def _set_nport_menu(self, v, load=False):
"""
Setter method for nport_menu, mapped from YANG variable /rbridge_id/ag/nport_menu (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_nport_menu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nport_menu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=nport_menu.nport_menu, is_container='container', presence=False, yang_name="nport-menu", rest_name="nport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set N_Port properties.', u'alt-name': u'nport'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nport_menu must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=nport_menu.nport_menu, is_container='container', presence=False, yang_name="nport-menu", rest_name="nport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set N_Port properties.', u'alt-name': u'nport'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)""",
})
self.__nport_menu = t
if hasattr(self, '_set'):
self._set() | Setter method for nport_menu, mapped from YANG variable /rbridge_id/ag/nport_menu (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_nport_menu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nport_menu() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for nport_menu, mapped from YANG variable /rbridge_id/ag/nport_menu (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_nport_menu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nport_menu() directly.
### Response:
def _set_nport_menu(self, v, load=False):
"""
Setter method for nport_menu, mapped from YANG variable /rbridge_id/ag/nport_menu (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_nport_menu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nport_menu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=nport_menu.nport_menu, is_container='container', presence=False, yang_name="nport-menu", rest_name="nport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set N_Port properties.', u'alt-name': u'nport'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nport_menu must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=nport_menu.nport_menu, is_container='container', presence=False, yang_name="nport-menu", rest_name="nport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set N_Port properties.', u'alt-name': u'nport'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)""",
})
self.__nport_menu = t
if hasattr(self, '_set'):
self._set() |
def set_topological_dag_upstreams(dag, ops, op_runs, runs_by_ops):
"""Set the upstream runs for the operation runs in the dag following the topological sort."""
sorted_ops = dags.sort_topologically(dag=dag)
for op_id in sorted_ops:
op_run_id = runs_by_ops[op_id]
op_run = op_runs[op_run_id]
set_op_upstreams(op_run=op_run, op=ops[op_id]) | Set the upstream runs for the operation runs in the dag following the topological sort. | Below is the the instruction that describes the task:
### Input:
Set the upstream runs for the operation runs in the dag following the topological sort.
### Response:
def set_topological_dag_upstreams(dag, ops, op_runs, runs_by_ops):
"""Set the upstream runs for the operation runs in the dag following the topological sort."""
sorted_ops = dags.sort_topologically(dag=dag)
for op_id in sorted_ops:
op_run_id = runs_by_ops[op_id]
op_run = op_runs[op_run_id]
set_op_upstreams(op_run=op_run, op=ops[op_id]) |
def epiweeks_in_year(year: int) -> int:
"""
Return number of epiweeks in a year
"""
if date_to_epiweek(epiweek_to_date(Epiweek(year, 53))).year == year:
return 53
else:
return 52 | Return number of epiweeks in a year | Below is the the instruction that describes the task:
### Input:
Return number of epiweeks in a year
### Response:
def epiweeks_in_year(year: int) -> int:
"""
Return number of epiweeks in a year
"""
if date_to_epiweek(epiweek_to_date(Epiweek(year, 53))).year == year:
return 53
else:
return 52 |
def provider(self, value):
"""
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
"""
result = None
# `None` defaults to `ProviderArchitecture.DEFAULT`
defaulted_value = value or ProviderArchitecture.DEFAULT
try:
parsed_value = int(defaulted_value)
except ValueError:
pass
else:
if parsed_value in ProviderArchitecture:
result = parsed_value
if result is None:
self.logger.error(u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value)
self._provider = result or ProviderArchitecture.DEFAULT | Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT` | Below is the the instruction that describes the task:
### Input:
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
### Response:
def provider(self, value):
"""
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
"""
result = None
# `None` defaults to `ProviderArchitecture.DEFAULT`
defaulted_value = value or ProviderArchitecture.DEFAULT
try:
parsed_value = int(defaulted_value)
except ValueError:
pass
else:
if parsed_value in ProviderArchitecture:
result = parsed_value
if result is None:
self.logger.error(u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value)
self._provider = result or ProviderArchitecture.DEFAULT |
def format_exception(cls, instance, trcback, context=1):
"""
| Formats given exception.
| The code produce a similar output to :func:`traceback.format_exception` except that it allows frames to be excluded
from the stack if the given stack trace frame tag is found in the frame locals and set **True**.
:param cls: Exception class.
:type cls: object
:param instance: Exception instance.
:type instance: object
:param trcback: Traceback.
:type trcback: Traceback
:param context: Context being included.
:type context: int
:return: Formated exception.
:rtype: list
"""
stack = extract_stack(get_inner_most_frame(trcback), context=context)
output = []
output.append("Traceback (most recent call last):")
for frame, file_name, line_number, name, context, index in stack:
output.append(" File \"{0}\", line {1}, in {2}".format(file_name, line_number, name))
for line in context:
output.append(" {0}".format(line.strip()))
for line in traceback.format_exception_only(cls, instance):
output.append("{0}".format(line))
return output | | Formats given exception.
| The code produce a similar output to :func:`traceback.format_exception` except that it allows frames to be excluded
from the stack if the given stack trace frame tag is found in the frame locals and set **True**.
:param cls: Exception class.
:type cls: object
:param instance: Exception instance.
:type instance: object
:param trcback: Traceback.
:type trcback: Traceback
:param context: Context being included.
:type context: int
:return: Formated exception.
:rtype: list | Below is the the instruction that describes the task:
### Input:
| Formats given exception.
| The code produce a similar output to :func:`traceback.format_exception` except that it allows frames to be excluded
from the stack if the given stack trace frame tag is found in the frame locals and set **True**.
:param cls: Exception class.
:type cls: object
:param instance: Exception instance.
:type instance: object
:param trcback: Traceback.
:type trcback: Traceback
:param context: Context being included.
:type context: int
:return: Formated exception.
:rtype: list
### Response:
def format_exception(cls, instance, trcback, context=1):
"""
| Formats given exception.
| The code produce a similar output to :func:`traceback.format_exception` except that it allows frames to be excluded
from the stack if the given stack trace frame tag is found in the frame locals and set **True**.
:param cls: Exception class.
:type cls: object
:param instance: Exception instance.
:type instance: object
:param trcback: Traceback.
:type trcback: Traceback
:param context: Context being included.
:type context: int
:return: Formated exception.
:rtype: list
"""
stack = extract_stack(get_inner_most_frame(trcback), context=context)
output = []
output.append("Traceback (most recent call last):")
for frame, file_name, line_number, name, context, index in stack:
output.append(" File \"{0}\", line {1}, in {2}".format(file_name, line_number, name))
for line in context:
output.append(" {0}".format(line.strip()))
for line in traceback.format_exception_only(cls, instance):
output.append("{0}".format(line))
return output |
def get_sheet_list(xl_path: str) -> List:
"""Return a list with the name of the sheets in
the Excel file in `xl_path`.
"""
wb = read_xl(xl_path)
if hasattr(wb, 'sheetnames'):
return wb.sheetnames
else:
return wb.sheet_names() | Return a list with the name of the sheets in
the Excel file in `xl_path`. | Below is the the instruction that describes the task:
### Input:
Return a list with the name of the sheets in
the Excel file in `xl_path`.
### Response:
def get_sheet_list(xl_path: str) -> List:
"""Return a list with the name of the sheets in
the Excel file in `xl_path`.
"""
wb = read_xl(xl_path)
if hasattr(wb, 'sheetnames'):
return wb.sheetnames
else:
return wb.sheet_names() |
def extract(self, dest_fldr, password=''):
"""
unzip the file contents to the dest_folder
(create if it doesn't exist)
and then return the list of files extracted
"""
#print('extracting to ' + dest_fldr)
if self.type == 'ZIP':
self._extract_zip(dest_fldr, password)
elif self.type == 'GZ':
self._extract_gz(dest_fldr, password)
elif self.type == 'TAR':
self._extract_tar(dest_fldr, self.fname)
else:
raise('Unknown archive file type') | unzip the file contents to the dest_folder
(create if it doesn't exist)
and then return the list of files extracted | Below is the the instruction that describes the task:
### Input:
unzip the file contents to the dest_folder
(create if it doesn't exist)
and then return the list of files extracted
### Response:
def extract(self, dest_fldr, password=''):
"""
unzip the file contents to the dest_folder
(create if it doesn't exist)
and then return the list of files extracted
"""
#print('extracting to ' + dest_fldr)
if self.type == 'ZIP':
self._extract_zip(dest_fldr, password)
elif self.type == 'GZ':
self._extract_gz(dest_fldr, password)
elif self.type == 'TAR':
self._extract_tar(dest_fldr, self.fname)
else:
raise('Unknown archive file type') |
def insert(self, matcher, obj):
'''
Insert a new matcher
:param matcher: an EventMatcher
:param obj: object to return
'''
current = self.subtree(matcher, True)
#current.matchers[(obj, matcher)] = None
if current._use_dict:
current.matchers_dict[(obj, matcher)] = None
else:
current.matchers_list.append((obj, matcher))
return current | Insert a new matcher
:param matcher: an EventMatcher
:param obj: object to return | Below is the the instruction that describes the task:
### Input:
Insert a new matcher
:param matcher: an EventMatcher
:param obj: object to return
### Response:
def insert(self, matcher, obj):
'''
Insert a new matcher
:param matcher: an EventMatcher
:param obj: object to return
'''
current = self.subtree(matcher, True)
#current.matchers[(obj, matcher)] = None
if current._use_dict:
current.matchers_dict[(obj, matcher)] = None
else:
current.matchers_list.append((obj, matcher))
return current |
def config(name='DATABASE_URL', default='sqlite://:memory:'):
"""Returns configured DATABASE dictionary from DATABASE_URL."""
config = {}
s = env(name, default)
if s:
config = parse_database_url(s)
return config | Returns configured DATABASE dictionary from DATABASE_URL. | Below is the the instruction that describes the task:
### Input:
Returns configured DATABASE dictionary from DATABASE_URL.
### Response:
def config(name='DATABASE_URL', default='sqlite://:memory:'):
"""Returns configured DATABASE dictionary from DATABASE_URL."""
config = {}
s = env(name, default)
if s:
config = parse_database_url(s)
return config |
def process_ssh(self, data, name):
"""
Processes SSH keys
:param data:
:param name:
:return:
"""
if data is None or len(data) == 0:
return
ret = []
try:
lines = [x.strip() for x in data.split(b'\n')]
for idx, line in enumerate(lines):
ret.append(self.process_ssh_line(line, name, idx))
except Exception as e:
logger.debug('Exception in processing SSH public key %s : %s' % (name, e))
self.trace_logger.log(e)
return ret | Processes SSH keys
:param data:
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
Processes SSH keys
:param data:
:param name:
:return:
### Response:
def process_ssh(self, data, name):
"""
Processes SSH keys
:param data:
:param name:
:return:
"""
if data is None or len(data) == 0:
return
ret = []
try:
lines = [x.strip() for x in data.split(b'\n')]
for idx, line in enumerate(lines):
ret.append(self.process_ssh_line(line, name, idx))
except Exception as e:
logger.debug('Exception in processing SSH public key %s : %s' % (name, e))
self.trace_logger.log(e)
return ret |
def is_lower(self):
"""Asserts that val is non-empty string and all characters are lowercase."""
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if len(self.val) == 0:
raise ValueError('val is empty')
if self.val != self.val.lower():
self._err('Expected <%s> to contain only lowercase chars, but did not.' % self.val)
return self | Asserts that val is non-empty string and all characters are lowercase. | Below is the the instruction that describes the task:
### Input:
Asserts that val is non-empty string and all characters are lowercase.
### Response:
def is_lower(self):
"""Asserts that val is non-empty string and all characters are lowercase."""
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if len(self.val) == 0:
raise ValueError('val is empty')
if self.val != self.val.lower():
self._err('Expected <%s> to contain only lowercase chars, but did not.' % self.val)
return self |
def idle_send_acks_and_nacks(self):
'''Send packets to UAV in idle loop'''
max_blocks_to_send = 10
blocks_sent = 0
i=0
now = time.time()
while i < len(self.blocks_to_ack_and_nack) and blocks_sent < max_blocks_to_send:
# print("ACKLIST: %s" % ([x[1] for x in self.blocks_to_ack_and_nack],))
stuff = self.blocks_to_ack_and_nack[i]
[master, block, status, first_sent, last_sent] = stuff
if status == 1:
# print("DFLogger: ACKing block (%d)" % (block,))
self.master.mav.remote_log_block_status_send(block,status)
blocks_sent += 1
del self.acking_blocks[block]
del self.blocks_to_ack_and_nack[i]
continue
if block not in self.missing_blocks:
# we've received this block now
del self.blocks_to_ack_and_nack[i]
continue
# give up on packet if we have seen one with a much higher
# number:
if self.block_cnt - block > 200 or \
now - first_sent > 60:
print("DFLogger: Abandoning block (%d)" % (block,))
del self.blocks_to_ack_and_nack[i]
del self.missing_blocks[block]
self.abandoned += 1
continue
i += 1
# only send each nack every-so-often:
if last_sent is not None:
if now - last_sent < 0.1:
continue
print("DFLogger: NACKing block (%d)" % (block,))
self.master.mav.remote_log_block_status_send(block,status)
blocks_sent += 1
stuff[4] = now | Send packets to UAV in idle loop | Below is the the instruction that describes the task:
### Input:
Send packets to UAV in idle loop
### Response:
def idle_send_acks_and_nacks(self):
'''Send packets to UAV in idle loop'''
max_blocks_to_send = 10
blocks_sent = 0
i=0
now = time.time()
while i < len(self.blocks_to_ack_and_nack) and blocks_sent < max_blocks_to_send:
# print("ACKLIST: %s" % ([x[1] for x in self.blocks_to_ack_and_nack],))
stuff = self.blocks_to_ack_and_nack[i]
[master, block, status, first_sent, last_sent] = stuff
if status == 1:
# print("DFLogger: ACKing block (%d)" % (block,))
self.master.mav.remote_log_block_status_send(block,status)
blocks_sent += 1
del self.acking_blocks[block]
del self.blocks_to_ack_and_nack[i]
continue
if block not in self.missing_blocks:
# we've received this block now
del self.blocks_to_ack_and_nack[i]
continue
# give up on packet if we have seen one with a much higher
# number:
if self.block_cnt - block > 200 or \
now - first_sent > 60:
print("DFLogger: Abandoning block (%d)" % (block,))
del self.blocks_to_ack_and_nack[i]
del self.missing_blocks[block]
self.abandoned += 1
continue
i += 1
# only send each nack every-so-often:
if last_sent is not None:
if now - last_sent < 0.1:
continue
print("DFLogger: NACKing block (%d)" % (block,))
self.master.mav.remote_log_block_status_send(block,status)
blocks_sent += 1
stuff[4] = now |
def list_all_option_values(cls, **kwargs):
"""List OptionValues
Return a list of OptionValues
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_option_values(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OptionValue]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_option_values_with_http_info(**kwargs)
else:
(data) = cls._list_all_option_values_with_http_info(**kwargs)
return data | List OptionValues
Return a list of OptionValues
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_option_values(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OptionValue]
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List OptionValues
Return a list of OptionValues
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_option_values(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OptionValue]
If the method is called asynchronously,
returns the request thread.
### Response:
def list_all_option_values(cls, **kwargs):
"""List OptionValues
Return a list of OptionValues
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_option_values(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OptionValue]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_option_values_with_http_info(**kwargs)
else:
(data) = cls._list_all_option_values_with_http_info(**kwargs)
return data |
def _delta_t_supconj_perpass(period, ecc, per0):
"""
time shift between superior conjuction and periastron passage
"""
ups_sc = np.pi/2-per0
E_sc = 2*np.arctan( np.sqrt((1-ecc)/(1+ecc)) * np.tan(ups_sc/2) )
M_sc = E_sc - ecc*np.sin(E_sc)
return period*(M_sc/2./np.pi) | time shift between superior conjuction and periastron passage | Below is the the instruction that describes the task:
### Input:
time shift between superior conjuction and periastron passage
### Response:
def _delta_t_supconj_perpass(period, ecc, per0):
"""
time shift between superior conjuction and periastron passage
"""
ups_sc = np.pi/2-per0
E_sc = 2*np.arctan( np.sqrt((1-ecc)/(1+ecc)) * np.tan(ups_sc/2) )
M_sc = E_sc - ecc*np.sin(E_sc)
return period*(M_sc/2./np.pi) |
def ToolMatches(tools=None, version='HEAD'):
""" Get the tools paths and versions that were specified """
matches = []
if tools:
for tool in tools:
match_version = version
if tool[1] != '':
match_version = tool[1]
match = ''
if tool[0].endswith('/'):
match = tool[0][:-1]
elif tool[0] != '.':
match = tool[0]
if not match.startswith('/') and match != '':
match = '/'+match
matches.append((match, match_version))
return matches | Get the tools paths and versions that were specified | Below is the the instruction that describes the task:
### Input:
Get the tools paths and versions that were specified
### Response:
def ToolMatches(tools=None, version='HEAD'):
""" Get the tools paths and versions that were specified """
matches = []
if tools:
for tool in tools:
match_version = version
if tool[1] != '':
match_version = tool[1]
match = ''
if tool[0].endswith('/'):
match = tool[0][:-1]
elif tool[0] != '.':
match = tool[0]
if not match.startswith('/') and match != '':
match = '/'+match
matches.append((match, match_version))
return matches |
def switch_tab(self):
"""
takes care of the action that happen when switching between tabs
e.g. activates and deactives probes
"""
current_tab = str(self.tabWidget.tabText(self.tabWidget.currentIndex()))
if self.current_script is None:
if current_tab == 'Probes':
self.read_probes.start()
self.read_probes.updateProgress.connect(self.update_probes)
else:
try:
self.read_probes.updateProgress.disconnect()
self.read_probes.quit()
except TypeError:
pass
if current_tab == 'Instruments':
self.refresh_instruments()
else:
self.log('updating probes / instruments disabled while script is running!') | takes care of the action that happen when switching between tabs
e.g. activates and deactives probes | Below is the the instruction that describes the task:
### Input:
takes care of the action that happen when switching between tabs
e.g. activates and deactives probes
### Response:
def switch_tab(self):
"""
takes care of the action that happen when switching between tabs
e.g. activates and deactives probes
"""
current_tab = str(self.tabWidget.tabText(self.tabWidget.currentIndex()))
if self.current_script is None:
if current_tab == 'Probes':
self.read_probes.start()
self.read_probes.updateProgress.connect(self.update_probes)
else:
try:
self.read_probes.updateProgress.disconnect()
self.read_probes.quit()
except TypeError:
pass
if current_tab == 'Instruments':
self.refresh_instruments()
else:
self.log('updating probes / instruments disabled while script is running!') |
def distort(value):
"""
Distorts a string by randomly replacing characters in it.
:param value: a string to distort.
:return: a distored string.
"""
value = value.lower()
if (RandomBoolean.chance(1, 5)):
value = value[0:1].upper() + value[1:]
if (RandomBoolean.chance(1, 3)):
value = value + random.choice(_symbols)
return value | Distorts a string by randomly replacing characters in it.
:param value: a string to distort.
:return: a distored string. | Below is the the instruction that describes the task:
### Input:
Distorts a string by randomly replacing characters in it.
:param value: a string to distort.
:return: a distored string.
### Response:
def distort(value):
"""
Distorts a string by randomly replacing characters in it.
:param value: a string to distort.
:return: a distored string.
"""
value = value.lower()
if (RandomBoolean.chance(1, 5)):
value = value[0:1].upper() + value[1:]
if (RandomBoolean.chance(1, 3)):
value = value + random.choice(_symbols)
return value |
def _normalize_cwl_inputs(items):
"""Extract variation and validation data from CWL input list of batched samples.
"""
with_validate = {}
vrn_files = []
ready_items = []
batch_samples = []
for data in (cwlutils.normalize_missing(utils.to_single_data(d)) for d in items):
batch_samples.append(dd.get_sample_name(data))
if tz.get_in(["config", "algorithm", "validate"], data):
with_validate[_checksum(tz.get_in(["config", "algorithm", "validate"], data))] = data
if data.get("vrn_file"):
vrn_files.append(data["vrn_file"])
ready_items.append(data)
if len(with_validate) == 0:
data = _pick_lead_item(ready_items)
data["batch_samples"] = batch_samples
return data
else:
assert len(with_validate) == 1, len(with_validate)
assert len(set(vrn_files)) == 1, set(vrn_files)
data = _pick_lead_item(with_validate.values())
data["batch_samples"] = batch_samples
data["vrn_file"] = vrn_files[0]
return data | Extract variation and validation data from CWL input list of batched samples. | Below is the the instruction that describes the task:
### Input:
Extract variation and validation data from CWL input list of batched samples.
### Response:
def _normalize_cwl_inputs(items):
"""Extract variation and validation data from CWL input list of batched samples.
"""
with_validate = {}
vrn_files = []
ready_items = []
batch_samples = []
for data in (cwlutils.normalize_missing(utils.to_single_data(d)) for d in items):
batch_samples.append(dd.get_sample_name(data))
if tz.get_in(["config", "algorithm", "validate"], data):
with_validate[_checksum(tz.get_in(["config", "algorithm", "validate"], data))] = data
if data.get("vrn_file"):
vrn_files.append(data["vrn_file"])
ready_items.append(data)
if len(with_validate) == 0:
data = _pick_lead_item(ready_items)
data["batch_samples"] = batch_samples
return data
else:
assert len(with_validate) == 1, len(with_validate)
assert len(set(vrn_files)) == 1, set(vrn_files)
data = _pick_lead_item(with_validate.values())
data["batch_samples"] = batch_samples
data["vrn_file"] = vrn_files[0]
return data |
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None):
"""
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError("Can't find any %s named %r" % (tagtype, name))
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data) | Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak') | Below is the the instruction that describes the task:
### Input:
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
### Response:
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None):
"""
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError("Can't find any %s named %r" % (tagtype, name))
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data) |
def query(cls, database, map_fun, reduce_fun,
language='javascript', **options):
"""Execute a CouchDB temporary view and map the result values back to
objects of this mapping.
Note that by default, any properties of the document that are not
included in the values of the view will be treated as if they were
missing from the document. If you want to load the full document for
every row, set the ``include_docs`` option to ``True``.
"""
return database.query(map_fun, reduce_fun=reduce_fun, language=language,
wrapper=cls._wrap_row, **options) | Execute a CouchDB temporary view and map the result values back to
objects of this mapping.
Note that by default, any properties of the document that are not
included in the values of the view will be treated as if they were
missing from the document. If you want to load the full document for
every row, set the ``include_docs`` option to ``True``. | Below is the the instruction that describes the task:
### Input:
Execute a CouchDB temporary view and map the result values back to
objects of this mapping.
Note that by default, any properties of the document that are not
included in the values of the view will be treated as if they were
missing from the document. If you want to load the full document for
every row, set the ``include_docs`` option to ``True``.
### Response:
def query(cls, database, map_fun, reduce_fun,
language='javascript', **options):
"""Execute a CouchDB temporary view and map the result values back to
objects of this mapping.
Note that by default, any properties of the document that are not
included in the values of the view will be treated as if they were
missing from the document. If you want to load the full document for
every row, set the ``include_docs`` option to ``True``.
"""
return database.query(map_fun, reduce_fun=reduce_fun, language=language,
wrapper=cls._wrap_row, **options) |
def get_key_signature_accidentals(key='C'):
"""Return the list of accidentals present into the key signature."""
accidentals = get_key_signature(key)
res = []
if accidentals < 0:
for i in range(-accidentals):
res.append('{0}{1}'.format(list(reversed(notes.fifths))[i], 'b'))
elif accidentals > 0:
for i in range(accidentals):
res.append('{0}{1}'.format(notes.fifths[i], '#'))
return res | Return the list of accidentals present into the key signature. | Below is the the instruction that describes the task:
### Input:
Return the list of accidentals present into the key signature.
### Response:
def get_key_signature_accidentals(key='C'):
"""Return the list of accidentals present into the key signature."""
accidentals = get_key_signature(key)
res = []
if accidentals < 0:
for i in range(-accidentals):
res.append('{0}{1}'.format(list(reversed(notes.fifths))[i], 'b'))
elif accidentals > 0:
for i in range(accidentals):
res.append('{0}{1}'.format(notes.fifths[i], '#'))
return res |
def Sphere(pos=(0, 0, 0), r=1, c="r", alpha=1, res=24):
"""Build a sphere at position `pos` of radius `r`.
|Sphere|
"""
ss = vtk.vtkSphereSource()
ss.SetRadius(r)
ss.SetThetaResolution(2 * res)
ss.SetPhiResolution(res)
ss.Update()
pd = ss.GetOutput()
actor = Actor(pd, c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor | Build a sphere at position `pos` of radius `r`.
|Sphere| | Below is the the instruction that describes the task:
### Input:
Build a sphere at position `pos` of radius `r`.
|Sphere|
### Response:
def Sphere(pos=(0, 0, 0), r=1, c="r", alpha=1, res=24):
"""Build a sphere at position `pos` of radius `r`.
|Sphere|
"""
ss = vtk.vtkSphereSource()
ss.SetRadius(r)
ss.SetThetaResolution(2 * res)
ss.SetPhiResolution(res)
ss.Update()
pd = ss.GetOutput()
actor = Actor(pd, c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor |
def on_message(self, headers, message):
"""
Event method that gets called when this listener has received a JMS
message (representing an HMC notification).
Parameters:
headers (dict): JMS message headers, as described for `headers` tuple
item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method.
message (string): JMS message body as a string, which contains a
serialized JSON object. The JSON object is described in the
`message` tuple item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method).
"""
with self._handover_cond:
# Wait until receiver has processed the previous notification
while len(self._handover_dict) > 0:
self._handover_cond.wait(self._wait_timeout)
# Indicate to receiver that there is a new notification
self._handover_dict['headers'] = headers
try:
msg_obj = json.loads(message)
except Exception:
raise # TODO: Find better exception for this case
self._handover_dict['message'] = msg_obj
self._handover_cond.notifyAll() | Event method that gets called when this listener has received a JMS
message (representing an HMC notification).
Parameters:
headers (dict): JMS message headers, as described for `headers` tuple
item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method.
message (string): JMS message body as a string, which contains a
serialized JSON object. The JSON object is described in the
`message` tuple item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method). | Below is the the instruction that describes the task:
### Input:
Event method that gets called when this listener has received a JMS
message (representing an HMC notification).
Parameters:
headers (dict): JMS message headers, as described for `headers` tuple
item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method.
message (string): JMS message body as a string, which contains a
serialized JSON object. The JSON object is described in the
`message` tuple item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method).
### Response:
def on_message(self, headers, message):
"""
Event method that gets called when this listener has received a JMS
message (representing an HMC notification).
Parameters:
headers (dict): JMS message headers, as described for `headers` tuple
item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method.
message (string): JMS message body as a string, which contains a
serialized JSON object. The JSON object is described in the
`message` tuple item returned by the
:meth:`~zhmcclient.NotificationReceiver.notifications` method).
"""
with self._handover_cond:
# Wait until receiver has processed the previous notification
while len(self._handover_dict) > 0:
self._handover_cond.wait(self._wait_timeout)
# Indicate to receiver that there is a new notification
self._handover_dict['headers'] = headers
try:
msg_obj = json.loads(message)
except Exception:
raise # TODO: Find better exception for this case
self._handover_dict['message'] = msg_obj
self._handover_cond.notifyAll() |
def constant_tuples_ordered_by_id(self):
"""
Returns
-------
constants: [(str, Constant)]
A list of tuples mapping strings to constants constants ordered by id
"""
return sorted(list(self.constant_tuple_dict), key=lambda constant_tuple: constant_tuple.constant.id) | Returns
-------
constants: [(str, Constant)]
A list of tuples mapping strings to constants constants ordered by id | Below is the the instruction that describes the task:
### Input:
Returns
-------
constants: [(str, Constant)]
A list of tuples mapping strings to constants constants ordered by id
### Response:
def constant_tuples_ordered_by_id(self):
"""
Returns
-------
constants: [(str, Constant)]
A list of tuples mapping strings to constants constants ordered by id
"""
return sorted(list(self.constant_tuple_dict), key=lambda constant_tuple: constant_tuple.constant.id) |
def rgb2term(r: int, g: int, b: int) -> str:
""" Convert an rgb value to a terminal code. """
return hex2term_map[rgb2termhex(r, g, b)] | Convert an rgb value to a terminal code. | Below is the the instruction that describes the task:
### Input:
Convert an rgb value to a terminal code.
### Response:
def rgb2term(r: int, g: int, b: int) -> str:
""" Convert an rgb value to a terminal code. """
return hex2term_map[rgb2termhex(r, g, b)] |
async def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await discord.utils.maybe_coroutine(prefix, self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.Iterable):
raise
raise TypeError("command_prefix must be plain string, iterable of strings, or callable "
"returning either of these, not {}".format(ret.__class__.__name__))
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret | |coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for. | Below is the the instruction that describes the task:
### Input:
|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
### Response:
async def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await discord.utils.maybe_coroutine(prefix, self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.Iterable):
raise
raise TypeError("command_prefix must be plain string, iterable of strings, or callable "
"returning either of these, not {}".format(ret.__class__.__name__))
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret |
def get_token_stream(source: str) -> CommonTokenStream:
""" Get the antlr token stream.
"""
lexer = LuaLexer(InputStream(source))
stream = CommonTokenStream(lexer)
return stream | Get the antlr token stream. | Below is the the instruction that describes the task:
### Input:
Get the antlr token stream.
### Response:
def get_token_stream(source: str) -> CommonTokenStream:
""" Get the antlr token stream.
"""
lexer = LuaLexer(InputStream(source))
stream = CommonTokenStream(lexer)
return stream |
def _parse_alt_title(html_chunk):
"""
Parse title from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's title.
"""
title = html_chunk.find("img", fn=has_param("alt"))
if not title:
raise UserWarning("Can't find alternative title source!")
return title[0].params["alt"].strip() | Parse title from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's title. | Below is the the instruction that describes the task:
### Input:
Parse title from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's title.
### Response:
def _parse_alt_title(html_chunk):
"""
Parse title from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's title.
"""
title = html_chunk.find("img", fn=has_param("alt"))
if not title:
raise UserWarning("Can't find alternative title source!")
return title[0].params["alt"].strip() |
def _create_more_application():
"""
Create an `Application` instance that displays the "--MORE--".
"""
from prompt_toolkit.shortcuts import create_prompt_application
registry = Registry()
@registry.add_binding(' ')
@registry.add_binding('y')
@registry.add_binding('Y')
@registry.add_binding(Keys.ControlJ)
@registry.add_binding(Keys.ControlI) # Tab.
def _(event):
event.cli.set_return_value(True)
@registry.add_binding('n')
@registry.add_binding('N')
@registry.add_binding('q')
@registry.add_binding('Q')
@registry.add_binding(Keys.ControlC)
def _(event):
event.cli.set_return_value(False)
return create_prompt_application(
'--MORE--', key_bindings_registry=registry, erase_when_done=True) | Create an `Application` instance that displays the "--MORE--". | Below is the the instruction that describes the task:
### Input:
Create an `Application` instance that displays the "--MORE--".
### Response:
def _create_more_application():
"""
Create an `Application` instance that displays the "--MORE--".
"""
from prompt_toolkit.shortcuts import create_prompt_application
registry = Registry()
@registry.add_binding(' ')
@registry.add_binding('y')
@registry.add_binding('Y')
@registry.add_binding(Keys.ControlJ)
@registry.add_binding(Keys.ControlI) # Tab.
def _(event):
event.cli.set_return_value(True)
@registry.add_binding('n')
@registry.add_binding('N')
@registry.add_binding('q')
@registry.add_binding('Q')
@registry.add_binding(Keys.ControlC)
def _(event):
event.cli.set_return_value(False)
return create_prompt_application(
'--MORE--', key_bindings_registry=registry, erase_when_done=True) |
def join_states(*states: State) -> State:
"""Join two state vectors into a larger qubit state"""
vectors = [ket.vec for ket in states]
vec = reduce(outer_product, vectors)
return State(vec.tensor, vec.qubits) | Join two state vectors into a larger qubit state | Below is the the instruction that describes the task:
### Input:
Join two state vectors into a larger qubit state
### Response:
def join_states(*states: State) -> State:
"""Join two state vectors into a larger qubit state"""
vectors = [ket.vec for ket in states]
vec = reduce(outer_product, vectors)
return State(vec.tensor, vec.qubits) |
def expand_date_param(param, lower_upper):
"""
Expands a (possibly) incomplete date string to either the lowest
or highest possible contained date and returns
datetime.datetime for that string.
0753 (lower) => 0753-01-01
2012 (upper) => 2012-12-31
2012 (lower) => 2012-01-01
201208 (upper) => 2012-08-31
etc.
"""
year = datetime.MINYEAR
month = 1
day = 1
hour = 0
minute = 0
second = 0
if lower_upper == 'upper':
year = datetime.MAXYEAR
month = 12
day = 31
hour = 23
minute = 59
second = 59
if len(param) == 0:
# leave defaults
pass
elif len(param) == 4:
year = int(param)
if lower_upper == 'lower':
month = 1
day = 1
hour = 0
minute = 0
second = 0
else:
month = 12
day = 31
hour = 23
minute = 59
second = 59
elif len(param) == 6:
year = int(param[0:4])
month = int(param[4:6])
if lower_upper == 'lower':
day = 1
else:
(firstday, dayspermonth) = monthrange(year, month)
day = dayspermonth
elif len(param) == 8:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
elif len(param) == 10:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
hour = int(param[8:10])
elif len(param) == 12:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
hour = int(param[8:10])
minute = int(param[10:12])
elif len(param) == 14:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
hour = int(param[8:10])
minute = int(param[10:12])
second = int(param[12:14])
else:
# wrong input length
raise ValueError('Bad date string provided. Use YYYY, YYYYMM or YYYYMMDD.')
# force numbers into valid ranges
#print (param, lower_upper), [year, month, day, hour, minute, second]
year = min(datetime.MAXYEAR, max(datetime.MINYEAR, year))
return datetime.datetime(year=year, month=month, day=day,
hour=hour, minute=minute, second=second) | Expands a (possibly) incomplete date string to either the lowest
or highest possible contained date and returns
datetime.datetime for that string.
0753 (lower) => 0753-01-01
2012 (upper) => 2012-12-31
2012 (lower) => 2012-01-01
201208 (upper) => 2012-08-31
etc. | Below is the the instruction that describes the task:
### Input:
Expands a (possibly) incomplete date string to either the lowest
or highest possible contained date and returns
datetime.datetime for that string.
0753 (lower) => 0753-01-01
2012 (upper) => 2012-12-31
2012 (lower) => 2012-01-01
201208 (upper) => 2012-08-31
etc.
### Response:
def expand_date_param(param, lower_upper):
"""
Expands a (possibly) incomplete date string to either the lowest
or highest possible contained date and returns
datetime.datetime for that string.
0753 (lower) => 0753-01-01
2012 (upper) => 2012-12-31
2012 (lower) => 2012-01-01
201208 (upper) => 2012-08-31
etc.
"""
year = datetime.MINYEAR
month = 1
day = 1
hour = 0
minute = 0
second = 0
if lower_upper == 'upper':
year = datetime.MAXYEAR
month = 12
day = 31
hour = 23
minute = 59
second = 59
if len(param) == 0:
# leave defaults
pass
elif len(param) == 4:
year = int(param)
if lower_upper == 'lower':
month = 1
day = 1
hour = 0
minute = 0
second = 0
else:
month = 12
day = 31
hour = 23
minute = 59
second = 59
elif len(param) == 6:
year = int(param[0:4])
month = int(param[4:6])
if lower_upper == 'lower':
day = 1
else:
(firstday, dayspermonth) = monthrange(year, month)
day = dayspermonth
elif len(param) == 8:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
elif len(param) == 10:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
hour = int(param[8:10])
elif len(param) == 12:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
hour = int(param[8:10])
minute = int(param[10:12])
elif len(param) == 14:
year = int(param[0:4])
month = int(param[4:6])
day = int(param[6:8])
hour = int(param[8:10])
minute = int(param[10:12])
second = int(param[12:14])
else:
# wrong input length
raise ValueError('Bad date string provided. Use YYYY, YYYYMM or YYYYMMDD.')
# force numbers into valid ranges
#print (param, lower_upper), [year, month, day, hour, minute, second]
year = min(datetime.MAXYEAR, max(datetime.MINYEAR, year))
return datetime.datetime(year=year, month=month, day=day,
hour=hour, minute=minute, second=second) |
def get_nodes(self):
"""
Get the list of all nodes.
"""
return self.fold_up(lambda n, fl, fg: [n] + fl + fg, lambda leaf: [leaf]) | Get the list of all nodes. | Below is the the instruction that describes the task:
### Input:
Get the list of all nodes.
### Response:
def get_nodes(self):
"""
Get the list of all nodes.
"""
return self.fold_up(lambda n, fl, fg: [n] + fl + fg, lambda leaf: [leaf]) |
def create(**kwargs):
"""
Create and a return a specialized contract based on the given secType,
or a general Contract if secType is not given.
"""
secType = kwargs.get('secType', '')
cls = {
'': Contract,
'STK': Stock,
'OPT': Option,
'FUT': Future,
'CONTFUT': ContFuture,
'CASH': Forex,
'IND': Index,
'CFD': CFD,
'BOND': Bond,
'CMDTY': Commodity,
'FOP': FuturesOption,
'FUND': MutualFund,
'WAR': Warrant,
'IOPT': Warrant,
'BAG': Bag,
'NEWS': Contract
}.get(secType, Contract)
if cls is not Contract:
kwargs.pop('secType', '')
return cls(**kwargs) | Create and a return a specialized contract based on the given secType,
or a general Contract if secType is not given. | Below is the the instruction that describes the task:
### Input:
Create and a return a specialized contract based on the given secType,
or a general Contract if secType is not given.
### Response:
def create(**kwargs):
"""
Create and a return a specialized contract based on the given secType,
or a general Contract if secType is not given.
"""
secType = kwargs.get('secType', '')
cls = {
'': Contract,
'STK': Stock,
'OPT': Option,
'FUT': Future,
'CONTFUT': ContFuture,
'CASH': Forex,
'IND': Index,
'CFD': CFD,
'BOND': Bond,
'CMDTY': Commodity,
'FOP': FuturesOption,
'FUND': MutualFund,
'WAR': Warrant,
'IOPT': Warrant,
'BAG': Bag,
'NEWS': Contract
}.get(secType, Contract)
if cls is not Contract:
kwargs.pop('secType', '')
return cls(**kwargs) |
def _cleanup(self, lr_decay_opt_states_reset: str, process_manager: Optional['DecoderProcessManager'] = None,
keep_training_state = False):
"""
Cleans parameter files, training state directory and waits for remaining decoding processes.
"""
utils.cleanup_params_files(self.model.output_dir, self.max_params_files_to_keep,
self.state.checkpoint, self.state.best_checkpoint, self.keep_initializations)
if process_manager is not None:
result = process_manager.collect_results()
if result is not None:
decoded_checkpoint, decoder_metrics = result
self.state.metrics[decoded_checkpoint - 1].update(decoder_metrics)
self.tflogger.log_metrics(decoder_metrics, decoded_checkpoint)
utils.write_metrics_file(self.state.metrics, self.metrics_fname)
self.state.save(os.path.join(self.training_state_dirname, C.TRAINING_STATE_NAME))
if not keep_training_state:
final_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_DIRNAME)
if os.path.exists(final_training_state_dirname):
shutil.rmtree(final_training_state_dirname)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST:
best_opt_states_fname = os.path.join(self.model.output_dir, C.OPT_STATES_BEST)
if os.path.exists(best_opt_states_fname):
os.remove(best_opt_states_fname)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_INITIAL:
initial_opt_states_fname = os.path.join(self.model.output_dir, C.OPT_STATES_INITIAL)
if os.path.exists(initial_opt_states_fname):
os.remove(initial_opt_states_fname) | Cleans parameter files, training state directory and waits for remaining decoding processes. | Below is the the instruction that describes the task:
### Input:
Cleans parameter files, training state directory and waits for remaining decoding processes.
### Response:
def _cleanup(self, lr_decay_opt_states_reset: str, process_manager: Optional['DecoderProcessManager'] = None,
keep_training_state = False):
"""
Cleans parameter files, training state directory and waits for remaining decoding processes.
"""
utils.cleanup_params_files(self.model.output_dir, self.max_params_files_to_keep,
self.state.checkpoint, self.state.best_checkpoint, self.keep_initializations)
if process_manager is not None:
result = process_manager.collect_results()
if result is not None:
decoded_checkpoint, decoder_metrics = result
self.state.metrics[decoded_checkpoint - 1].update(decoder_metrics)
self.tflogger.log_metrics(decoder_metrics, decoded_checkpoint)
utils.write_metrics_file(self.state.metrics, self.metrics_fname)
self.state.save(os.path.join(self.training_state_dirname, C.TRAINING_STATE_NAME))
if not keep_training_state:
final_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_DIRNAME)
if os.path.exists(final_training_state_dirname):
shutil.rmtree(final_training_state_dirname)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST:
best_opt_states_fname = os.path.join(self.model.output_dir, C.OPT_STATES_BEST)
if os.path.exists(best_opt_states_fname):
os.remove(best_opt_states_fname)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_INITIAL:
initial_opt_states_fname = os.path.join(self.model.output_dir, C.OPT_STATES_INITIAL)
if os.path.exists(initial_opt_states_fname):
os.remove(initial_opt_states_fname) |
def do_help(self, arg):
"""Sets up the header for the help command that explains the background on how to use
the script generally. Help for each command then stands alone in the context of this
documentation. Although we could have documented this on the wiki, it is better served
when shipped with the shell.
"""
if arg == "":
lines = [("The fortpy unit testing analysis shell makes it easy to analyze the results "
"of multiple test cases, make plots of trends and tabulate values for use in "
"other applications. This documentation will provide an overview of the basics. "
"Use 'help <command>' to get specific command help."),
("Each fortpy shell session can hold the results of multiple unit tests. You can "
"load a unit test's results into the session using one of the 'parse' commands. "
"Once the test is loaded you can tabulate and plot results by setting test case "
"filters ('filter'), and independent and dependent variables ('indep', 'dep')."
"Switch between different unit tests loaded into the session using 'set'."),
("To make multiple plots/tables for the same unit test, create new analysis "
"groups ('group'). "
"Each group has its own set of properties that can be set (e.g. variables, plot "
"labels for axes, filters for test cases, etc.) The possible properties that affect "
"each command are listed in the specific help for that command."),
("You can save the state of a shell session using 'save' and then recover it at "
"a later time using 'load'. When a session is re-loaded, all the variables and "
"properties/settings for plots/tables are maintained and the latest state of the "
"unit test's results are used. A console history is also maintained with bash-like "
"commands (e.g. Ctrl-R for reverse history search, etc.) across sessions. You can "
"manipulate its behavior with 'history'.")]
self._fixed_width_info(lines)
cmd.Cmd.do_help(self, arg) | Sets up the header for the help command that explains the background on how to use
the script generally. Help for each command then stands alone in the context of this
documentation. Although we could have documented this on the wiki, it is better served
when shipped with the shell. | Below is the the instruction that describes the task:
### Input:
Sets up the header for the help command that explains the background on how to use
the script generally. Help for each command then stands alone in the context of this
documentation. Although we could have documented this on the wiki, it is better served
when shipped with the shell.
### Response:
def do_help(self, arg):
"""Sets up the header for the help command that explains the background on how to use
the script generally. Help for each command then stands alone in the context of this
documentation. Although we could have documented this on the wiki, it is better served
when shipped with the shell.
"""
if arg == "":
lines = [("The fortpy unit testing analysis shell makes it easy to analyze the results "
"of multiple test cases, make plots of trends and tabulate values for use in "
"other applications. This documentation will provide an overview of the basics. "
"Use 'help <command>' to get specific command help."),
("Each fortpy shell session can hold the results of multiple unit tests. You can "
"load a unit test's results into the session using one of the 'parse' commands. "
"Once the test is loaded you can tabulate and plot results by setting test case "
"filters ('filter'), and independent and dependent variables ('indep', 'dep')."
"Switch between different unit tests loaded into the session using 'set'."),
("To make multiple plots/tables for the same unit test, create new analysis "
"groups ('group'). "
"Each group has its own set of properties that can be set (e.g. variables, plot "
"labels for axes, filters for test cases, etc.) The possible properties that affect "
"each command are listed in the specific help for that command."),
("You can save the state of a shell session using 'save' and then recover it at "
"a later time using 'load'. When a session is re-loaded, all the variables and "
"properties/settings for plots/tables are maintained and the latest state of the "
"unit test's results are used. A console history is also maintained with bash-like "
"commands (e.g. Ctrl-R for reverse history search, etc.) across sessions. You can "
"manipulate its behavior with 'history'.")]
self._fixed_width_info(lines)
cmd.Cmd.do_help(self, arg) |
def execute(self,
command,
data=None,
returning=True,
mapper=dict,
writeAccess=False,
dryRun=False,
locale=None):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
returning | <bool>
mapper | <variant>
retries | <int>
:return [{<str> key: <variant>, ..}, ..], <int> rowcount
"""
command = command.strip()
if not command:
raise orb.errors.EmptyCommand()
elif dryRun:
print command % data
raise orb.errors.DryRun()
# define properties for execution
data = data or {}
command = command.strip()
data.setdefault('locale', locale or orb.Context().locale)
start = datetime.datetime.now()
try:
with self.native(writeAccess=writeAccess) as conn:
results, rowcount = self._execute(conn,
command,
data,
returning,
mapper)
# always raise interruption errors as these need to be handled
# from a thread properly
except orb.errors.Interruption:
delta = datetime.datetime.now() - start
log.critical('Query took: %s' % delta)
raise
# handle any known a database errors with feedback information
except orb.errors.DatabaseError as err:
delta = datetime.datetime.now() - start
log.error(u'{0}: \n {1}'.format(err, command))
log.error('Query took: %s' % delta)
raise
# always raise any unknown issues for the developer
except StandardError as err:
delta = datetime.datetime.now() - start
log.error(u'{0}: \n {1}'.format(err, command))
log.error('Query took: %s' % delta)
raise
delta = (datetime.datetime.now() - start).total_seconds()
if delta * 1000 < 3000:
lvl = logging.DEBUG
elif delta * 1000 < 6000:
lvl = logging.WARNING
else:
lvl = logging.CRITICAL
log.log(lvl, 'Query took: %s' % delta)
return results, rowcount | Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
returning | <bool>
mapper | <variant>
retries | <int>
:return [{<str> key: <variant>, ..}, ..], <int> rowcount | Below is the the instruction that describes the task:
### Input:
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
returning | <bool>
mapper | <variant>
retries | <int>
:return [{<str> key: <variant>, ..}, ..], <int> rowcount
### Response:
def execute(self,
command,
data=None,
returning=True,
mapper=dict,
writeAccess=False,
dryRun=False,
locale=None):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
returning | <bool>
mapper | <variant>
retries | <int>
:return [{<str> key: <variant>, ..}, ..], <int> rowcount
"""
command = command.strip()
if not command:
raise orb.errors.EmptyCommand()
elif dryRun:
print command % data
raise orb.errors.DryRun()
# define properties for execution
data = data or {}
command = command.strip()
data.setdefault('locale', locale or orb.Context().locale)
start = datetime.datetime.now()
try:
with self.native(writeAccess=writeAccess) as conn:
results, rowcount = self._execute(conn,
command,
data,
returning,
mapper)
# always raise interruption errors as these need to be handled
# from a thread properly
except orb.errors.Interruption:
delta = datetime.datetime.now() - start
log.critical('Query took: %s' % delta)
raise
# handle any known a database errors with feedback information
except orb.errors.DatabaseError as err:
delta = datetime.datetime.now() - start
log.error(u'{0}: \n {1}'.format(err, command))
log.error('Query took: %s' % delta)
raise
# always raise any unknown issues for the developer
except StandardError as err:
delta = datetime.datetime.now() - start
log.error(u'{0}: \n {1}'.format(err, command))
log.error('Query took: %s' % delta)
raise
delta = (datetime.datetime.now() - start).total_seconds()
if delta * 1000 < 3000:
lvl = logging.DEBUG
elif delta * 1000 < 6000:
lvl = logging.WARNING
else:
lvl = logging.CRITICAL
log.log(lvl, 'Query took: %s' % delta)
return results, rowcount |
def get_line_data(self):
"""Return the line data collected.
Data is { filename: { lineno: None, ...}, ...}
"""
if self.branch:
# If we were measuring branches, then we have to re-build the dict
# to show line data.
line_data = {}
for f, arcs in self.data.items():
line_data[f] = ldf = {}
for l1, _ in list(arcs.keys()):
if l1:
ldf[l1] = None
return line_data
else:
return self.data | Return the line data collected.
Data is { filename: { lineno: None, ...}, ...} | Below is the the instruction that describes the task:
### Input:
Return the line data collected.
Data is { filename: { lineno: None, ...}, ...}
### Response:
def get_line_data(self):
"""Return the line data collected.
Data is { filename: { lineno: None, ...}, ...}
"""
if self.branch:
# If we were measuring branches, then we have to re-build the dict
# to show line data.
line_data = {}
for f, arcs in self.data.items():
line_data[f] = ldf = {}
for l1, _ in list(arcs.keys()):
if l1:
ldf[l1] = None
return line_data
else:
return self.data |
def create(self, **attributes):
"""
Creates a new record suject to the restructions in the query and with
the passed +attributes+. Operates using `build`.
"""
record = self.build(**attributes)
record.save()
return record | Creates a new record suject to the restructions in the query and with
the passed +attributes+. Operates using `build`. | Below is the the instruction that describes the task:
### Input:
Creates a new record suject to the restructions in the query and with
the passed +attributes+. Operates using `build`.
### Response:
def create(self, **attributes):
"""
Creates a new record suject to the restructions in the query and with
the passed +attributes+. Operates using `build`.
"""
record = self.build(**attributes)
record.save()
return record |
def __init_keystone_session(self):
"""Create and return a Keystone session object."""
api = self._identity_api_version # for readability
tried = []
if api in ['3', None]:
sess = self.__init_keystone_session_v3(check=(api is None))
tried.append('v3')
if sess:
return sess
if api in ['2', None]:
sess = self.__init_keystone_session_v2(check=(api is None))
tried.append('v2')
if sess:
return sess
raise RuntimeError(
"Cannot establish Keystone session (tried: {0})."
.format(', '.join(tried))) | Create and return a Keystone session object. | Below is the the instruction that describes the task:
### Input:
Create and return a Keystone session object.
### Response:
def __init_keystone_session(self):
"""Create and return a Keystone session object."""
api = self._identity_api_version # for readability
tried = []
if api in ['3', None]:
sess = self.__init_keystone_session_v3(check=(api is None))
tried.append('v3')
if sess:
return sess
if api in ['2', None]:
sess = self.__init_keystone_session_v2(check=(api is None))
tried.append('v2')
if sess:
return sess
raise RuntimeError(
"Cannot establish Keystone session (tried: {0})."
.format(', '.join(tried))) |
def mrz():
"""
Command-line script for extracting MRZ from a given image
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.')
parser.add_argument('filename')
parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
parser.add_argument('-r', '--save-roi', default=None,
help='Output the region of the image that is detected to contain the MRZ to the given png file')
parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__)
args = parser.parse_args()
try:
extra_params = '--oem 0' if args.legacy else ''
filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params))
except TesseractNotFoundError:
sys.stderr.write("ERROR: The tesseract executable was not found.\n"
"Please, make sure Tesseract is installed and the appropriate directory is included "
"in your PATH environment variable.\n")
sys.exit(1)
except TesseractError as ex:
sys.stderr.write("ERROR: %s" % ex.message)
sys.exit(ex.status)
d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0}
d['walltime'] = walltime
d['filename'] = filename
if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux:
io.imsave(args.save_roi, mrz_.aux['roi'])
if not args.json:
for k in d:
print("%s\t%s" % (k, str(d[k])))
else:
print(json.dumps(d, indent=2)) | Command-line script for extracting MRZ from a given image | Below is the the instruction that describes the task:
### Input:
Command-line script for extracting MRZ from a given image
### Response:
def mrz():
"""
Command-line script for extracting MRZ from a given image
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.')
parser.add_argument('filename')
parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
parser.add_argument('-r', '--save-roi', default=None,
help='Output the region of the image that is detected to contain the MRZ to the given png file')
parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__)
args = parser.parse_args()
try:
extra_params = '--oem 0' if args.legacy else ''
filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params))
except TesseractNotFoundError:
sys.stderr.write("ERROR: The tesseract executable was not found.\n"
"Please, make sure Tesseract is installed and the appropriate directory is included "
"in your PATH environment variable.\n")
sys.exit(1)
except TesseractError as ex:
sys.stderr.write("ERROR: %s" % ex.message)
sys.exit(ex.status)
d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0}
d['walltime'] = walltime
d['filename'] = filename
if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux:
io.imsave(args.save_roi, mrz_.aux['roi'])
if not args.json:
for k in d:
print("%s\t%s" % (k, str(d[k])))
else:
print(json.dumps(d, indent=2)) |
def _cutadapt_trim(fastq_files, quality_format, adapters, out_files, log_file, data):
"""Trimming with cutadapt.
"""
if all([utils.file_exists(x) for x in out_files]):
return out_files
cmd = _cutadapt_trim_cmd(fastq_files, quality_format, adapters, out_files, data)
if len(fastq_files) == 1:
of = [out_files[0], log_file]
message = "Trimming %s in single end mode with cutadapt." % (fastq_files[0])
with file_transaction(data, of) as of_tx:
of1_tx, log_tx = of_tx
do.run(cmd.format(**locals()), message)
else:
of = out_files + [log_file]
with file_transaction(data, of) as tx_out_files:
of1_tx, of2_tx, log_tx = tx_out_files
tmp_fq1 = utils.append_stem(of1_tx, ".tmp")
tmp_fq2 = utils.append_stem(of2_tx, ".tmp")
singles_file = of1_tx + ".single"
message = "Trimming %s and %s in paired end mode with cutadapt." % (fastq_files[0],
fastq_files[1])
do.run(cmd.format(**locals()), message)
return out_files | Trimming with cutadapt. | Below is the the instruction that describes the task:
### Input:
Trimming with cutadapt.
### Response:
def _cutadapt_trim(fastq_files, quality_format, adapters, out_files, log_file, data):
"""Trimming with cutadapt.
"""
if all([utils.file_exists(x) for x in out_files]):
return out_files
cmd = _cutadapt_trim_cmd(fastq_files, quality_format, adapters, out_files, data)
if len(fastq_files) == 1:
of = [out_files[0], log_file]
message = "Trimming %s in single end mode with cutadapt." % (fastq_files[0])
with file_transaction(data, of) as of_tx:
of1_tx, log_tx = of_tx
do.run(cmd.format(**locals()), message)
else:
of = out_files + [log_file]
with file_transaction(data, of) as tx_out_files:
of1_tx, of2_tx, log_tx = tx_out_files
tmp_fq1 = utils.append_stem(of1_tx, ".tmp")
tmp_fq2 = utils.append_stem(of2_tx, ".tmp")
singles_file = of1_tx + ".single"
message = "Trimming %s and %s in paired end mode with cutadapt." % (fastq_files[0],
fastq_files[1])
do.run(cmd.format(**locals()), message)
return out_files |
def load_archive(archive_file: str,
cuda_device: int = -1,
overrides: str = "",
weights_file: str = None) -> Archive:
"""
Instantiates an Archive from an archived `tar.gz` file.
Parameters
----------
archive_file: ``str``
The archive file to load the model from.
weights_file: ``str``, optional (default = None)
The weights file to use. If unspecified, weights.th in the archive_file will be used.
cuda_device: ``int``, optional (default = -1)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
overrides: ``str``, optional (default = "")
JSON overrides to apply to the unarchived ``Params`` object.
"""
# redirect to the cache, if necessary
resolved_archive_file = cached_path(archive_file)
if resolved_archive_file == archive_file:
logger.info(f"loading archive file {archive_file}")
else:
logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}")
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}")
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
# Postpone cleanup until exit in case the unarchived contents are needed outside
# this function.
atexit.register(_cleanup_archive_dir, tempdir)
serialization_dir = tempdir
# Check for supplemental files in archive
fta_filename = os.path.join(serialization_dir, _FTA_NAME)
if os.path.exists(fta_filename):
with open(fta_filename, 'r') as fta_file:
files_to_archive = json.loads(fta_file.read())
# Add these replacements to overrides
replacements_dict: Dict[str, Any] = {}
for key, original_filename in files_to_archive.items():
replacement_filename = os.path.join(serialization_dir, f"fta/{key}")
if os.path.exists(replacement_filename):
replacements_dict[key] = replacement_filename
else:
logger.warning(f"Archived file {replacement_filename} not found! At train time "
f"this file was located at {original_filename}. This may be "
"because you are loading a serialization directory. Attempting to "
"load the file from its train-time location.")
overrides_dict = parse_overrides(overrides)
combined_dict = with_fallback(preferred=overrides_dict, fallback=unflatten(replacements_dict))
overrides = json.dumps(combined_dict)
# Load config
config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides)
config.loading_from_archive = True
if weights_file:
weights_path = weights_file
else:
weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME)
# Fallback for serialization directories.
if not os.path.exists(weights_path):
weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS)
# Instantiate model. Use a duplicate of the config, as it will get consumed.
model = Model.load(config.duplicate(),
weights_file=weights_path,
serialization_dir=serialization_dir,
cuda_device=cuda_device)
return Archive(model=model, config=config) | Instantiates an Archive from an archived `tar.gz` file.
Parameters
----------
archive_file: ``str``
The archive file to load the model from.
weights_file: ``str``, optional (default = None)
The weights file to use. If unspecified, weights.th in the archive_file will be used.
cuda_device: ``int``, optional (default = -1)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
overrides: ``str``, optional (default = "")
JSON overrides to apply to the unarchived ``Params`` object. | Below is the the instruction that describes the task:
### Input:
Instantiates an Archive from an archived `tar.gz` file.
Parameters
----------
archive_file: ``str``
The archive file to load the model from.
weights_file: ``str``, optional (default = None)
The weights file to use. If unspecified, weights.th in the archive_file will be used.
cuda_device: ``int``, optional (default = -1)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
overrides: ``str``, optional (default = "")
JSON overrides to apply to the unarchived ``Params`` object.
### Response:
def load_archive(archive_file: str,
cuda_device: int = -1,
overrides: str = "",
weights_file: str = None) -> Archive:
"""
Instantiates an Archive from an archived `tar.gz` file.
Parameters
----------
archive_file: ``str``
The archive file to load the model from.
weights_file: ``str``, optional (default = None)
The weights file to use. If unspecified, weights.th in the archive_file will be used.
cuda_device: ``int``, optional (default = -1)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
overrides: ``str``, optional (default = "")
JSON overrides to apply to the unarchived ``Params`` object.
"""
# redirect to the cache, if necessary
resolved_archive_file = cached_path(archive_file)
if resolved_archive_file == archive_file:
logger.info(f"loading archive file {archive_file}")
else:
logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}")
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}")
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
# Postpone cleanup until exit in case the unarchived contents are needed outside
# this function.
atexit.register(_cleanup_archive_dir, tempdir)
serialization_dir = tempdir
# Check for supplemental files in archive
fta_filename = os.path.join(serialization_dir, _FTA_NAME)
if os.path.exists(fta_filename):
with open(fta_filename, 'r') as fta_file:
files_to_archive = json.loads(fta_file.read())
# Add these replacements to overrides
replacements_dict: Dict[str, Any] = {}
for key, original_filename in files_to_archive.items():
replacement_filename = os.path.join(serialization_dir, f"fta/{key}")
if os.path.exists(replacement_filename):
replacements_dict[key] = replacement_filename
else:
logger.warning(f"Archived file {replacement_filename} not found! At train time "
f"this file was located at {original_filename}. This may be "
"because you are loading a serialization directory. Attempting to "
"load the file from its train-time location.")
overrides_dict = parse_overrides(overrides)
combined_dict = with_fallback(preferred=overrides_dict, fallback=unflatten(replacements_dict))
overrides = json.dumps(combined_dict)
# Load config
config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides)
config.loading_from_archive = True
if weights_file:
weights_path = weights_file
else:
weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME)
# Fallback for serialization directories.
if not os.path.exists(weights_path):
weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS)
# Instantiate model. Use a duplicate of the config, as it will get consumed.
model = Model.load(config.duplicate(),
weights_file=weights_path,
serialization_dir=serialization_dir,
cuda_device=cuda_device)
return Archive(model=model, config=config) |
def VectorLen(self, off):
"""VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return ret | VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object. | Below is the the instruction that describes the task:
### Input:
VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object.
### Response:
def VectorLen(self, off):
"""VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return ret |
def in_miso_and_inner(self):
"""
Test if a node is miso: multiple input and single output
"""
return len(self.successor) == 1 and self.successor[0] is not None and not self.successor[0].in_or_out and \
len(self.precedence) > 1 and self.precedence[0] is not None and not self.successor[0].in_or_out | Test if a node is miso: multiple input and single output | Below is the the instruction that describes the task:
### Input:
Test if a node is miso: multiple input and single output
### Response:
def in_miso_and_inner(self):
"""
Test if a node is miso: multiple input and single output
"""
return len(self.successor) == 1 and self.successor[0] is not None and not self.successor[0].in_or_out and \
len(self.precedence) > 1 and self.precedence[0] is not None and not self.successor[0].in_or_out |
def deprecated_name(name):
"""Allow old method names for backwards compatability. """
def decorator(func):
"""Decorator function."""
def func_wrapper(self):
"""Wrapper for original function."""
if hasattr(self, name):
# Return the old property
return getattr(self, name)
else:
return func(self)
return func_wrapper
return decorator | Allow old method names for backwards compatability. | Below is the the instruction that describes the task:
### Input:
Allow old method names for backwards compatability.
### Response:
def deprecated_name(name):
"""Allow old method names for backwards compatability. """
def decorator(func):
"""Decorator function."""
def func_wrapper(self):
"""Wrapper for original function."""
if hasattr(self, name):
# Return the old property
return getattr(self, name)
else:
return func(self)
return func_wrapper
return decorator |
def argv_parse(schema, argv, init=None,
arg_names=None, arg_abbrevs=None, value_parser=True, defaults=None, filters=None):
'''
argv_parse(schema, argv) yields the tuple (unparsed_argv, params) where unparsed_argv is a list
subset of argv that contains only those command line arguments that were not understood by
the given argument schema and params is a dictionary of parameters as parsed by the given
schema. It is equivalent to argv_parser(schema)(argv). See also help(CommandLineParser) for
information about the instructions format.
argv_parse(plan, argv) yields a pimms IMap object whose parameters have been initialized from
the arguments in argv using the given pimms calculation plan as a template for the argument
schema; see help(to_argv_schema) for information about the way plans are interpreted as argv
schemas. The plan is initialized with the additional parameters 'argv' and 'argv_parsed'. The
'argv' parameter contains the command-line arguments in argv that were not interpreted by the
command-line parser; the 'argv_parsed' parameter contains the parsed command-line parameters.
To avoid the plan-specific behavior and instead only parse the arguments from a plan, use
argv_parse(to_argv_schema(plan), argv).
The following options may be given:
* init (default: None) specifies that the given dictionary should be merged into either the
resulting options dictionary (if schema is a schema and not a plan) or into the parameters
initially provided to the plan (if schema is a plan).
* arg_names (default: None) may be a dictionary that specifies explicity command-line
argument names for the plan parameters; plan parameters should be keys and the argument
names should be values. Any parameter not listed in this option will be interpreted
according to the above rules. If a parameter is mapped to None then it will not be
filled from the command-line arguments.
* arg_abbrevs (default:None) may be a dictionary that is handled identically to that of
arg_names except that its values must be single letters, which are used for the
abbreviated flag names.
* defaults (default: None) may specify the default values for the plan parameters; this
dictionary overrides the default values of the plan itself.
* value_parse (default: True) specifies whether the values are interpreted via the
ast.literal_eval() function. This may be set to False to leave the values as strings or it
may be set to a function that takes one argument and performs the parsing itself; such a
function f must obey the syntax `parsed_val = f(string_val)`. The value_parse function is
only called on arguments that have string values included. Note that by default the
value_parse function interprets the string '...' as Ellipsis in addition to the typical
ast.literal_eval() behavior.
* filters (default: None) optionally specifies a dictionary of filter functions, each of which
is passed the parsed value of the associated argument. Each filter function f must obey the
syntax `final_value = f(parsed_value)`. The keys of this dictionary must be the entry names
of the arguments. Note that filter functions are called on provided default values but the
value_parse function is not called on these.
'''
parser = argv_parser(schema,
arg_names=arg_names, arg_abbrevs=arg_abbrevs, defaults=defaults,
value_parser=value_parser, filters=filters)
res = parser(argv)
if is_plan(schema):
init = {} if init is None else init
init = pyr.pmap(init) if not is_pmap(init) else init
return schema({'argv': tuple(res[0]), 'argv_parsed': pyr.pmap(res[1])}, res[1], init)
else:
return res if init is None else (res[0], dict(merge(res[1], init))) | argv_parse(schema, argv) yields the tuple (unparsed_argv, params) where unparsed_argv is a list
subset of argv that contains only those command line arguments that were not understood by
the given argument schema and params is a dictionary of parameters as parsed by the given
schema. It is equivalent to argv_parser(schema)(argv). See also help(CommandLineParser) for
information about the instructions format.
argv_parse(plan, argv) yields a pimms IMap object whose parameters have been initialized from
the arguments in argv using the given pimms calculation plan as a template for the argument
schema; see help(to_argv_schema) for information about the way plans are interpreted as argv
schemas. The plan is initialized with the additional parameters 'argv' and 'argv_parsed'. The
'argv' parameter contains the command-line arguments in argv that were not interpreted by the
command-line parser; the 'argv_parsed' parameter contains the parsed command-line parameters.
To avoid the plan-specific behavior and instead only parse the arguments from a plan, use
argv_parse(to_argv_schema(plan), argv).
The following options may be given:
* init (default: None) specifies that the given dictionary should be merged into either the
resulting options dictionary (if schema is a schema and not a plan) or into the parameters
initially provided to the plan (if schema is a plan).
* arg_names (default: None) may be a dictionary that specifies explicity command-line
argument names for the plan parameters; plan parameters should be keys and the argument
names should be values. Any parameter not listed in this option will be interpreted
according to the above rules. If a parameter is mapped to None then it will not be
filled from the command-line arguments.
* arg_abbrevs (default:None) may be a dictionary that is handled identically to that of
arg_names except that its values must be single letters, which are used for the
abbreviated flag names.
* defaults (default: None) may specify the default values for the plan parameters; this
dictionary overrides the default values of the plan itself.
* value_parse (default: True) specifies whether the values are interpreted via the
ast.literal_eval() function. This may be set to False to leave the values as strings or it
may be set to a function that takes one argument and performs the parsing itself; such a
function f must obey the syntax `parsed_val = f(string_val)`. The value_parse function is
only called on arguments that have string values included. Note that by default the
value_parse function interprets the string '...' as Ellipsis in addition to the typical
ast.literal_eval() behavior.
* filters (default: None) optionally specifies a dictionary of filter functions, each of which
is passed the parsed value of the associated argument. Each filter function f must obey the
syntax `final_value = f(parsed_value)`. The keys of this dictionary must be the entry names
of the arguments. Note that filter functions are called on provided default values but the
value_parse function is not called on these. | Below is the the instruction that describes the task:
### Input:
argv_parse(schema, argv) yields the tuple (unparsed_argv, params) where unparsed_argv is a list
subset of argv that contains only those command line arguments that were not understood by
the given argument schema and params is a dictionary of parameters as parsed by the given
schema. It is equivalent to argv_parser(schema)(argv). See also help(CommandLineParser) for
information about the instructions format.
argv_parse(plan, argv) yields a pimms IMap object whose parameters have been initialized from
the arguments in argv using the given pimms calculation plan as a template for the argument
schema; see help(to_argv_schema) for information about the way plans are interpreted as argv
schemas. The plan is initialized with the additional parameters 'argv' and 'argv_parsed'. The
'argv' parameter contains the command-line arguments in argv that were not interpreted by the
command-line parser; the 'argv_parsed' parameter contains the parsed command-line parameters.
To avoid the plan-specific behavior and instead only parse the arguments from a plan, use
argv_parse(to_argv_schema(plan), argv).
The following options may be given:
* init (default: None) specifies that the given dictionary should be merged into either the
resulting options dictionary (if schema is a schema and not a plan) or into the parameters
initially provided to the plan (if schema is a plan).
* arg_names (default: None) may be a dictionary that specifies explicity command-line
argument names for the plan parameters; plan parameters should be keys and the argument
names should be values. Any parameter not listed in this option will be interpreted
according to the above rules. If a parameter is mapped to None then it will not be
filled from the command-line arguments.
* arg_abbrevs (default:None) may be a dictionary that is handled identically to that of
arg_names except that its values must be single letters, which are used for the
abbreviated flag names.
* defaults (default: None) may specify the default values for the plan parameters; this
dictionary overrides the default values of the plan itself.
* value_parse (default: True) specifies whether the values are interpreted via the
ast.literal_eval() function. This may be set to False to leave the values as strings or it
may be set to a function that takes one argument and performs the parsing itself; such a
function f must obey the syntax `parsed_val = f(string_val)`. The value_parse function is
only called on arguments that have string values included. Note that by default the
value_parse function interprets the string '...' as Ellipsis in addition to the typical
ast.literal_eval() behavior.
* filters (default: None) optionally specifies a dictionary of filter functions, each of which
is passed the parsed value of the associated argument. Each filter function f must obey the
syntax `final_value = f(parsed_value)`. The keys of this dictionary must be the entry names
of the arguments. Note that filter functions are called on provided default values but the
value_parse function is not called on these.
### Response:
def argv_parse(schema, argv, init=None,
arg_names=None, arg_abbrevs=None, value_parser=True, defaults=None, filters=None):
'''
argv_parse(schema, argv) yields the tuple (unparsed_argv, params) where unparsed_argv is a list
subset of argv that contains only those command line arguments that were not understood by
the given argument schema and params is a dictionary of parameters as parsed by the given
schema. It is equivalent to argv_parser(schema)(argv). See also help(CommandLineParser) for
information about the instructions format.
argv_parse(plan, argv) yields a pimms IMap object whose parameters have been initialized from
the arguments in argv using the given pimms calculation plan as a template for the argument
schema; see help(to_argv_schema) for information about the way plans are interpreted as argv
schemas. The plan is initialized with the additional parameters 'argv' and 'argv_parsed'. The
'argv' parameter contains the command-line arguments in argv that were not interpreted by the
command-line parser; the 'argv_parsed' parameter contains the parsed command-line parameters.
To avoid the plan-specific behavior and instead only parse the arguments from a plan, use
argv_parse(to_argv_schema(plan), argv).
The following options may be given:
* init (default: None) specifies that the given dictionary should be merged into either the
resulting options dictionary (if schema is a schema and not a plan) or into the parameters
initially provided to the plan (if schema is a plan).
* arg_names (default: None) may be a dictionary that specifies explicity command-line
argument names for the plan parameters; plan parameters should be keys and the argument
names should be values. Any parameter not listed in this option will be interpreted
according to the above rules. If a parameter is mapped to None then it will not be
filled from the command-line arguments.
* arg_abbrevs (default:None) may be a dictionary that is handled identically to that of
arg_names except that its values must be single letters, which are used for the
abbreviated flag names.
* defaults (default: None) may specify the default values for the plan parameters; this
dictionary overrides the default values of the plan itself.
* value_parse (default: True) specifies whether the values are interpreted via the
ast.literal_eval() function. This may be set to False to leave the values as strings or it
may be set to a function that takes one argument and performs the parsing itself; such a
function f must obey the syntax `parsed_val = f(string_val)`. The value_parse function is
only called on arguments that have string values included. Note that by default the
value_parse function interprets the string '...' as Ellipsis in addition to the typical
ast.literal_eval() behavior.
* filters (default: None) optionally specifies a dictionary of filter functions, each of which
is passed the parsed value of the associated argument. Each filter function f must obey the
syntax `final_value = f(parsed_value)`. The keys of this dictionary must be the entry names
of the arguments. Note that filter functions are called on provided default values but the
value_parse function is not called on these.
'''
parser = argv_parser(schema,
arg_names=arg_names, arg_abbrevs=arg_abbrevs, defaults=defaults,
value_parser=value_parser, filters=filters)
res = parser(argv)
if is_plan(schema):
init = {} if init is None else init
init = pyr.pmap(init) if not is_pmap(init) else init
return schema({'argv': tuple(res[0]), 'argv_parsed': pyr.pmap(res[1])}, res[1], init)
else:
return res if init is None else (res[0], dict(merge(res[1], init))) |
def send_message(adu, serial_port):
""" Send ADU over serial to to server and return parsed response.
:param adu: Request ADU.
:param sock: Serial port instance.
:return: Parsed response from server.
"""
serial_port.write(adu)
serial_port.flush()
# Check exception ADU (which is shorter than all other responses) first.
exception_adu_size = 5
response_error_adu = recv_exactly(serial_port.read, exception_adu_size)
raise_for_exception_adu(response_error_adu)
expected_response_size = \
expected_response_pdu_size_from_request_pdu(adu[1:-2]) + 3
response_remainder = recv_exactly(
serial_port.read, expected_response_size - exception_adu_size)
return parse_response_adu(response_error_adu + response_remainder, adu) | Send ADU over serial to to server and return parsed response.
:param adu: Request ADU.
:param sock: Serial port instance.
:return: Parsed response from server. | Below is the the instruction that describes the task:
### Input:
Send ADU over serial to to server and return parsed response.
:param adu: Request ADU.
:param sock: Serial port instance.
:return: Parsed response from server.
### Response:
def send_message(adu, serial_port):
""" Send ADU over serial to to server and return parsed response.
:param adu: Request ADU.
:param sock: Serial port instance.
:return: Parsed response from server.
"""
serial_port.write(adu)
serial_port.flush()
# Check exception ADU (which is shorter than all other responses) first.
exception_adu_size = 5
response_error_adu = recv_exactly(serial_port.read, exception_adu_size)
raise_for_exception_adu(response_error_adu)
expected_response_size = \
expected_response_pdu_size_from_request_pdu(adu[1:-2]) + 3
response_remainder = recv_exactly(
serial_port.read, expected_response_size - exception_adu_size)
return parse_response_adu(response_error_adu + response_remainder, adu) |
def cluster_template_events_single_ifo(
self, tcolumn, column, window_size, ifo):
""" Cluster the internal events over the named column
"""
# Just call through to the standard function
self.template_events = self.template_event_dict[ifo]
self.cluster_template_events(tcolumn, column, window_size)
self.template_event_dict[ifo] = self.template_events
self.template_events = None | Cluster the internal events over the named column | Below is the the instruction that describes the task:
### Input:
Cluster the internal events over the named column
### Response:
def cluster_template_events_single_ifo(
self, tcolumn, column, window_size, ifo):
""" Cluster the internal events over the named column
"""
# Just call through to the standard function
self.template_events = self.template_event_dict[ifo]
self.cluster_template_events(tcolumn, column, window_size)
self.template_event_dict[ifo] = self.template_events
self.template_events = None |
def get_task_subtasks(client, task_id, completed=False):
''' Gets subtasks for task with given ID '''
params = {
'task_id' : int(task_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | Gets subtasks for task with given ID | Below is the the instruction that describes the task:
### Input:
Gets subtasks for task with given ID
### Response:
def get_task_subtasks(client, task_id, completed=False):
''' Gets subtasks for task with given ID '''
params = {
'task_id' : int(task_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() |
def _log_future_exception(future, logger):
"""Log any exception raised by future."""
if not future.done():
return
try:
future.result()
except: #pylint:disable=bare-except;This is a background logging helper
logger.warning("Exception in ignored future: %s", future, exc_info=True) | Log any exception raised by future. | Below is the the instruction that describes the task:
### Input:
Log any exception raised by future.
### Response:
def _log_future_exception(future, logger):
"""Log any exception raised by future."""
if not future.done():
return
try:
future.result()
except: #pylint:disable=bare-except;This is a background logging helper
logger.warning("Exception in ignored future: %s", future, exc_info=True) |
def _clear(self):
"""
Clear all references (called by its bundle activator)
"""
self.__names.clear()
self.__queue.clear()
self.__context = None | Clear all references (called by its bundle activator) | Below is the the instruction that describes the task:
### Input:
Clear all references (called by its bundle activator)
### Response:
def _clear(self):
"""
Clear all references (called by its bundle activator)
"""
self.__names.clear()
self.__queue.clear()
self.__context = None |
def createEditor(self, delegate, parent, _option):
""" Creates a hidden widget so that only the reset button is visible during editing.
:type option: QStyleOptionViewItem
"""
return GroupCtiEditor(self, delegate, parent=parent) | Creates a hidden widget so that only the reset button is visible during editing.
:type option: QStyleOptionViewItem | Below is the the instruction that describes the task:
### Input:
Creates a hidden widget so that only the reset button is visible during editing.
:type option: QStyleOptionViewItem
### Response:
def createEditor(self, delegate, parent, _option):
""" Creates a hidden widget so that only the reset button is visible during editing.
:type option: QStyleOptionViewItem
"""
return GroupCtiEditor(self, delegate, parent=parent) |
def set_hash_value(self, key, field, value, pipeline=False):
"""Set the value of field in a hash stored at key.
Args:
key (str): key (name) of the hash
field (str): Field within the hash to set
value: Value to set
pipeline (bool): True, start a transaction block. Default false.
"""
# FIXME(BMo): new name for this function -> save_dict_value ?
if pipeline:
self._pipeline.hset(key, field, str(value))
else:
self._db.hset(key, field, str(value)) | Set the value of field in a hash stored at key.
Args:
key (str): key (name) of the hash
field (str): Field within the hash to set
value: Value to set
pipeline (bool): True, start a transaction block. Default false. | Below is the the instruction that describes the task:
### Input:
Set the value of field in a hash stored at key.
Args:
key (str): key (name) of the hash
field (str): Field within the hash to set
value: Value to set
pipeline (bool): True, start a transaction block. Default false.
### Response:
def set_hash_value(self, key, field, value, pipeline=False):
"""Set the value of field in a hash stored at key.
Args:
key (str): key (name) of the hash
field (str): Field within the hash to set
value: Value to set
pipeline (bool): True, start a transaction block. Default false.
"""
# FIXME(BMo): new name for this function -> save_dict_value ?
if pipeline:
self._pipeline.hset(key, field, str(value))
else:
self._db.hset(key, field, str(value)) |
def _load_cell(args, schema):
"""Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed.
"""
name = args['destination']
table = _get_table(name)
if not table:
table = datalab.bigquery.Table(name)
if table.exists():
if args['mode'] == 'create':
raise Exception('%s already exists; use --append or --overwrite' % name)
elif schema:
table.create(json.loads(schema))
elif not args['infer']:
raise Exception(
'Table does not exist, no schema specified in cell and no --infer flag; cannot load')
# TODO(gram): we should probably try do the schema infer ourselves as BQ doesn't really seem
# to be able to do it. Alternatively we can drop the --infer argument and force the user
# to use a pre-existing table or supply a JSON schema.
csv_options = datalab.bigquery.CSVOptions(delimiter=args['delimiter'],
skip_leading_rows=args['skip'],
allow_jagged_rows=not args['strict'],
quote=args['quote'])
job = table.load(args['source'],
mode=args['mode'],
source_format=('CSV' if args['format'] == 'csv' else 'NEWLINE_DELIMITED_JSON'),
csv_options=csv_options,
ignore_unknown_values=not args['strict'])
if job.failed:
raise Exception('Load failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Load completed with errors: %s' % str(job.errors)) | Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed. | Below is the the instruction that describes the task:
### Input:
Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed.
### Response:
def _load_cell(args, schema):
"""Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed.
"""
name = args['destination']
table = _get_table(name)
if not table:
table = datalab.bigquery.Table(name)
if table.exists():
if args['mode'] == 'create':
raise Exception('%s already exists; use --append or --overwrite' % name)
elif schema:
table.create(json.loads(schema))
elif not args['infer']:
raise Exception(
'Table does not exist, no schema specified in cell and no --infer flag; cannot load')
# TODO(gram): we should probably try do the schema infer ourselves as BQ doesn't really seem
# to be able to do it. Alternatively we can drop the --infer argument and force the user
# to use a pre-existing table or supply a JSON schema.
csv_options = datalab.bigquery.CSVOptions(delimiter=args['delimiter'],
skip_leading_rows=args['skip'],
allow_jagged_rows=not args['strict'],
quote=args['quote'])
job = table.load(args['source'],
mode=args['mode'],
source_format=('CSV' if args['format'] == 'csv' else 'NEWLINE_DELIMITED_JSON'),
csv_options=csv_options,
ignore_unknown_values=not args['strict'])
if job.failed:
raise Exception('Load failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Load completed with errors: %s' % str(job.errors)) |
def p_poke2(p):
""" statement : POKE numbertype expr COMMA expr
| POKE LP numbertype expr COMMA expr RP
"""
i = 2 if isinstance(p[2], Symbol) or p[2] is None else 3
if p[i + 1] is None or p[i + 3] is None:
p[0] = None
return
p[0] = make_sentence('POKE',
make_typecast(TYPE.uinteger, p[i + 1], p.lineno(i + 2)),
make_typecast(p[i], p[i + 3], p.lineno(i + 3))
) | statement : POKE numbertype expr COMMA expr
| POKE LP numbertype expr COMMA expr RP | Below is the the instruction that describes the task:
### Input:
statement : POKE numbertype expr COMMA expr
| POKE LP numbertype expr COMMA expr RP
### Response:
def p_poke2(p):
""" statement : POKE numbertype expr COMMA expr
| POKE LP numbertype expr COMMA expr RP
"""
i = 2 if isinstance(p[2], Symbol) or p[2] is None else 3
if p[i + 1] is None or p[i + 3] is None:
p[0] = None
return
p[0] = make_sentence('POKE',
make_typecast(TYPE.uinteger, p[i + 1], p.lineno(i + 2)),
make_typecast(p[i], p[i + 3], p.lineno(i + 3))
) |
def run(self):
"""Starts the uploading thread."""
root_path, folder_name = os.path.split(self.root_folder)
self.root_folder = os.path.join(root_path, folder_name)
for dirname, _, fnames in os.walk(self.root_folder):
self.upload_files_in_folder(dirname, fnames) | Starts the uploading thread. | Below is the the instruction that describes the task:
### Input:
Starts the uploading thread.
### Response:
def run(self):
"""Starts the uploading thread."""
root_path, folder_name = os.path.split(self.root_folder)
self.root_folder = os.path.join(root_path, folder_name)
for dirname, _, fnames in os.walk(self.root_folder):
self.upload_files_in_folder(dirname, fnames) |
def iri_to_uri(iri, charset='utf-8'):
r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug
always uses utf-8 URLs internally because this is what browsers and HTTP
do as well. In some places where it accepts an URL it also accepts a
unicode IRI and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
.. versionadded:: 0.6
:param iri: the iri to convert
:param charset: the charset for the URI
"""
iri = unicode(iri)
scheme, auth, hostname, port, path, query, fragment = _uri_split(iri)
scheme = scheme.encode('ascii')
hostname = hostname.encode('idna')
if ':' in hostname:
hostname = '[' + hostname + ']'
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _quote(auth.encode(charset))
if password:
auth += ':' + _quote(password.encode(charset))
hostname = auth + '@' + hostname
if port:
hostname += ':' + port
path = _quote(path.encode(charset), safe="/:~+%")
query = _quote(query.encode(charset), safe="=%&[]:;$()+,!?*/")
# this absolutely always must return a string. Otherwise some parts of
# the system might perform double quoting (#61)
return str(urlparse.urlunsplit([scheme, hostname, path, query, fragment])) | r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug
always uses utf-8 URLs internally because this is what browsers and HTTP
do as well. In some places where it accepts an URL it also accepts a
unicode IRI and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
.. versionadded:: 0.6
:param iri: the iri to convert
:param charset: the charset for the URI | Below is the the instruction that describes the task:
### Input:
r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug
always uses utf-8 URLs internally because this is what browsers and HTTP
do as well. In some places where it accepts an URL it also accepts a
unicode IRI and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
.. versionadded:: 0.6
:param iri: the iri to convert
:param charset: the charset for the URI
### Response:
def iri_to_uri(iri, charset='utf-8'):
r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug
always uses utf-8 URLs internally because this is what browsers and HTTP
do as well. In some places where it accepts an URL it also accepts a
unicode IRI and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
.. versionadded:: 0.6
:param iri: the iri to convert
:param charset: the charset for the URI
"""
iri = unicode(iri)
scheme, auth, hostname, port, path, query, fragment = _uri_split(iri)
scheme = scheme.encode('ascii')
hostname = hostname.encode('idna')
if ':' in hostname:
hostname = '[' + hostname + ']'
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _quote(auth.encode(charset))
if password:
auth += ':' + _quote(password.encode(charset))
hostname = auth + '@' + hostname
if port:
hostname += ':' + port
path = _quote(path.encode(charset), safe="/:~+%")
query = _quote(query.encode(charset), safe="=%&[]:;$()+,!?*/")
# this absolutely always must return a string. Otherwise some parts of
# the system might perform double quoting (#61)
return str(urlparse.urlunsplit([scheme, hostname, path, query, fragment])) |
def swapon(name, priority=None):
'''
Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
if __grains__['kernel'] == 'SunOS':
if __grains__['virtual'] != 'zone':
__salt__['cmd.run']('swap -a {0}'.format(name), python_shell=False)
else:
return False
else:
cmd = 'swapon {0}'.format(name)
if priority and 'AIX' not in __grains__['kernel']:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd, python_shell=False)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret | Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile | Below is the the instruction that describes the task:
### Input:
Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
### Response:
def swapon(name, priority=None):
'''
Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
if __grains__['kernel'] == 'SunOS':
if __grains__['virtual'] != 'zone':
__salt__['cmd.run']('swap -a {0}'.format(name), python_shell=False)
else:
return False
else:
cmd = 'swapon {0}'.format(name)
if priority and 'AIX' not in __grains__['kernel']:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd, python_shell=False)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret |
def requiredOneInGroup(col_name, group, dm, df, *args):
"""
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
"""
if col_name in df.columns:
# if the column name is present, return nothing
return None
else:
# if the column name is missing, return column name
return col_name | If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group. | Below is the the instruction that describes the task:
### Input:
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
### Response:
def requiredOneInGroup(col_name, group, dm, df, *args):
"""
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
"""
if col_name in df.columns:
# if the column name is present, return nothing
return None
else:
# if the column name is missing, return column name
return col_name |
def unlink_version(self, source, target):
"""Unlink the current version of the target from the source
"""
if not hasattr(source, REFERENCE_VERSIONS):
return
target_uid = api.get_uid(target)
if target_uid in source.reference_versions[target_uid]:
# delete the version
del source.reference_versions[target_uid]
# persist changes that occured referenced versions
source._p_changed = 1
else:
logger.warn("No version link found on '{}' -> '{}'"
.format(repr(source), repr(target))) | Unlink the current version of the target from the source | Below is the the instruction that describes the task:
### Input:
Unlink the current version of the target from the source
### Response:
def unlink_version(self, source, target):
"""Unlink the current version of the target from the source
"""
if not hasattr(source, REFERENCE_VERSIONS):
return
target_uid = api.get_uid(target)
if target_uid in source.reference_versions[target_uid]:
# delete the version
del source.reference_versions[target_uid]
# persist changes that occured referenced versions
source._p_changed = 1
else:
logger.warn("No version link found on '{}' -> '{}'"
.format(repr(source), repr(target))) |
def update(self, iterable):
"""Update the list by adding all elements from *iterable*."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
values = sorted(iterable, key=self._key)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort(key=self._key)
self._clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load, _index = self._load, self._index
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_keys.extend(list(map(self._key, _list)) for _list in _lists)
_maxes.extend(sublist[-1] for sublist in _keys)
self._len = len(values)
del _index[:] | Update the list by adding all elements from *iterable*. | Below is the the instruction that describes the task:
### Input:
Update the list by adding all elements from *iterable*.
### Response:
def update(self, iterable):
"""Update the list by adding all elements from *iterable*."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
values = sorted(iterable, key=self._key)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort(key=self._key)
self._clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load, _index = self._load, self._index
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_keys.extend(list(map(self._key, _list)) for _list in _lists)
_maxes.extend(sublist[-1] for sublist in _keys)
self._len = len(values)
del _index[:] |
def build(self, klass, name="default", amount=None):
"""
Makes a factory builder with a specified amount.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param amount: The number of models to create
:type amount: int
:return: mixed
"""
if amount is None:
if isinstance(name, int):
amount = name
name = "default"
else:
amount = 1
return self.of(klass, name).times(amount) | Makes a factory builder with a specified amount.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param amount: The number of models to create
:type amount: int
:return: mixed | Below is the the instruction that describes the task:
### Input:
Makes a factory builder with a specified amount.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param amount: The number of models to create
:type amount: int
:return: mixed
### Response:
def build(self, klass, name="default", amount=None):
"""
Makes a factory builder with a specified amount.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param amount: The number of models to create
:type amount: int
:return: mixed
"""
if amount is None:
if isinstance(name, int):
amount = name
name = "default"
else:
amount = 1
return self.of(klass, name).times(amount) |
def functions_to_table(mod, colwidth=[27, 48]):
r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
"""
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-'*colwidth[0] + '+' + '-'*colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0]-2) + 's} {2:1s} {3:' \
+ str(colwidth[1]-2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for i, item in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...'
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row)
except AttributeError:
pass
s = '\n'.join(lines)
return s | r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths. | Below is the the instruction that describes the task:
### Input:
r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
### Response:
def functions_to_table(mod, colwidth=[27, 48]):
r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
"""
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-'*colwidth[0] + '+' + '-'*colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0]-2) + 's} {2:1s} {3:' \
+ str(colwidth[1]-2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for i, item in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...'
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row)
except AttributeError:
pass
s = '\n'.join(lines)
return s |
def create_nv_link(self):
"""After a save(), write a link to the saved file using a non-versioned name"""
from os.path import abspath, islink
from os import unlink, symlink
nv_name = self.doc.as_version(None)
from_path = abspath(self._last_write_path or self.package_path.path)
to_path = join(dirname(from_path), nv_name + self.type_suffix)
if islink(to_path):
unlink(to_path)
symlink(from_path, to_path) | After a save(), write a link to the saved file using a non-versioned name | Below is the the instruction that describes the task:
### Input:
After a save(), write a link to the saved file using a non-versioned name
### Response:
def create_nv_link(self):
"""After a save(), write a link to the saved file using a non-versioned name"""
from os.path import abspath, islink
from os import unlink, symlink
nv_name = self.doc.as_version(None)
from_path = abspath(self._last_write_path or self.package_path.path)
to_path = join(dirname(from_path), nv_name + self.type_suffix)
if islink(to_path):
unlink(to_path)
symlink(from_path, to_path) |
def _bytes_from_json(value, field):
"""Base64-decode value"""
if _not_null(value, field):
return base64.standard_b64decode(_to_bytes(value)) | Base64-decode value | Below is the the instruction that describes the task:
### Input:
Base64-decode value
### Response:
def _bytes_from_json(value, field):
"""Base64-decode value"""
if _not_null(value, field):
return base64.standard_b64decode(_to_bytes(value)) |
def copy(self, **attributes):
"""Create a copy of this message.
"""
updated_options = attributes.pop("options", {})
options = self.options.copy()
options.update(updated_options)
return self._replace(**attributes, options=options) | Create a copy of this message. | Below is the the instruction that describes the task:
### Input:
Create a copy of this message.
### Response:
def copy(self, **attributes):
"""Create a copy of this message.
"""
updated_options = attributes.pop("options", {})
options = self.options.copy()
options.update(updated_options)
return self._replace(**attributes, options=options) |
def listPrimaryDatasets(self, primary_ds_name="", primary_ds_type=""):
"""
API to list primary datasets
:param primary_ds_type: List primary datasets with primary dataset type (Optional)
:type primary_ds_type: str
:param primary_ds_name: List that primary dataset (Optional)
:type primary_ds_name: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
:returns: List of dictionaries containing the following keys (create_by, primary_ds_type, primary_ds_id, primary_ds_name, creation_date)
:rtype: list of dicts
"""
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
try:
return self.dbsPrimaryDataset.listPrimaryDatasets(primary_ds_name, primary_ds_type)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except Exception as ex:
sError = "DBSReaderModel/listPrimaryDatasets. %s\n Exception trace: \n %s." \
% (ex, traceback.format_exc() )
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | API to list primary datasets
:param primary_ds_type: List primary datasets with primary dataset type (Optional)
:type primary_ds_type: str
:param primary_ds_name: List that primary dataset (Optional)
:type primary_ds_name: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
:returns: List of dictionaries containing the following keys (create_by, primary_ds_type, primary_ds_id, primary_ds_name, creation_date)
:rtype: list of dicts | Below is the the instruction that describes the task:
### Input:
API to list primary datasets
:param primary_ds_type: List primary datasets with primary dataset type (Optional)
:type primary_ds_type: str
:param primary_ds_name: List that primary dataset (Optional)
:type primary_ds_name: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
:returns: List of dictionaries containing the following keys (create_by, primary_ds_type, primary_ds_id, primary_ds_name, creation_date)
:rtype: list of dicts
### Response:
def listPrimaryDatasets(self, primary_ds_name="", primary_ds_type=""):
"""
API to list primary datasets
:param primary_ds_type: List primary datasets with primary dataset type (Optional)
:type primary_ds_type: str
:param primary_ds_name: List that primary dataset (Optional)
:type primary_ds_name: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
:returns: List of dictionaries containing the following keys (create_by, primary_ds_type, primary_ds_id, primary_ds_name, creation_date)
:rtype: list of dicts
"""
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
try:
return self.dbsPrimaryDataset.listPrimaryDatasets(primary_ds_name, primary_ds_type)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except Exception as ex:
sError = "DBSReaderModel/listPrimaryDatasets. %s\n Exception trace: \n %s." \
% (ex, traceback.format_exc() )
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) |
def join_path_prefix(path, pre_path=None):
"""
If path set and not absolute, append it to pre path (if used)
:param path: path to append
:type path: str | None
:param pre_path: Base path to append to (default: None)
:type pre_path: None | str
:return: Path or appended path
:rtype: str | None
"""
if not path:
return path
if pre_path and not os.path.isabs(path):
return os.path.join(pre_path, path)
return path | If path set and not absolute, append it to pre path (if used)
:param path: path to append
:type path: str | None
:param pre_path: Base path to append to (default: None)
:type pre_path: None | str
:return: Path or appended path
:rtype: str | None | Below is the the instruction that describes the task:
### Input:
If path set and not absolute, append it to pre path (if used)
:param path: path to append
:type path: str | None
:param pre_path: Base path to append to (default: None)
:type pre_path: None | str
:return: Path or appended path
:rtype: str | None
### Response:
def join_path_prefix(path, pre_path=None):
"""
If path set and not absolute, append it to pre path (if used)
:param path: path to append
:type path: str | None
:param pre_path: Base path to append to (default: None)
:type pre_path: None | str
:return: Path or appended path
:rtype: str | None
"""
if not path:
return path
if pre_path and not os.path.isabs(path):
return os.path.join(pre_path, path)
return path |
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len) | Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress. | Below is the the instruction that describes the task:
### Input:
Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
### Response:
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len) |
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target | Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value. | Below is the the instruction that describes the task:
### Input:
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
### Response:
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target |
def squash_xml_to_text(elm, remove_namespaces=False):
"""Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str
"""
leading_text = elm.text and elm.text or ''
result = [leading_text]
for child in elm.getchildren():
# Encoding is set to utf-8 because otherwise `ó` would
# become `ó`
child_value = etree.tostring(child, encoding='utf-8')
# Decode to a string for later regexp and whitespace stripping
child_value = child_value.decode('utf-8')
result.append(child_value)
if remove_namespaces:
# Best way to remove the namespaces without having the parser complain
# about producing invalid XML.
result = [re.sub(' xmlns:?[^=]*="[^"]*"', '', v) for v in result]
# Join the results and strip any surrounding whitespace
result = u''.join(result).strip()
return result | Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str | Below is the the instruction that describes the task:
### Input:
Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str
### Response:
def squash_xml_to_text(elm, remove_namespaces=False):
"""Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str
"""
leading_text = elm.text and elm.text or ''
result = [leading_text]
for child in elm.getchildren():
# Encoding is set to utf-8 because otherwise `ó` would
# become `ó`
child_value = etree.tostring(child, encoding='utf-8')
# Decode to a string for later regexp and whitespace stripping
child_value = child_value.decode('utf-8')
result.append(child_value)
if remove_namespaces:
# Best way to remove the namespaces without having the parser complain
# about producing invalid XML.
result = [re.sub(' xmlns:?[^=]*="[^"]*"', '', v) for v in result]
# Join the results and strip any surrounding whitespace
result = u''.join(result).strip()
return result |
def set_duration(self, duration):
"""Sets the assessment duration.
arg: duration (osid.calendaring.Duration): assessment
duration
raise: InvalidArgument - ``duration`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_duration_template
if self.get_duration_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_duration(
duration,
self.get_duration_metadata()):
raise errors.InvalidArgument()
map = dict()
map['days'] = duration.days
map['seconds'] = duration.seconds
map['microseconds'] = duration.microseconds
self._my_map['duration'] = map | Sets the assessment duration.
arg: duration (osid.calendaring.Duration): assessment
duration
raise: InvalidArgument - ``duration`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the assessment duration.
arg: duration (osid.calendaring.Duration): assessment
duration
raise: InvalidArgument - ``duration`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_duration(self, duration):
"""Sets the assessment duration.
arg: duration (osid.calendaring.Duration): assessment
duration
raise: InvalidArgument - ``duration`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_duration_template
if self.get_duration_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_duration(
duration,
self.get_duration_metadata()):
raise errors.InvalidArgument()
map = dict()
map['days'] = duration.days
map['seconds'] = duration.seconds
map['microseconds'] = duration.microseconds
self._my_map['duration'] = map |
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_mac = ET.SubElement(vcs_node_info, "node-switch-mac")
node_switch_mac.text = kwargs.pop('node_switch_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_mac = ET.SubElement(vcs_node_info, "node-switch-mac")
node_switch_mac.text = kwargs.pop('node_switch_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_coin_snapshot(fsym, tsym):
"""
Get blockchain information, aggregated data as well as data for the
individual exchanges available for the specified currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
Returns:
The function returns a dictionairy containing blockain as well as
trading information from the different exchanges were the specified
currency pair is available.
{'AggregatedData': dict,
'Algorithm': ...,
'BlockNumber': ...,
'BlockReward': ...,
'Exchanges': [dict1, dict2, ...],
'NetHashesPerSecond': ...,
'ProofType': ...,
'TotalCoinsMined': ...}
dict = {'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...}
"""
# load data
url = build_url('coinsnapshot', fsym=fsym, tsym=tsym)
data = load_data(url)['Data']
return data | Get blockchain information, aggregated data as well as data for the
individual exchanges available for the specified currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
Returns:
The function returns a dictionairy containing blockain as well as
trading information from the different exchanges were the specified
currency pair is available.
{'AggregatedData': dict,
'Algorithm': ...,
'BlockNumber': ...,
'BlockReward': ...,
'Exchanges': [dict1, dict2, ...],
'NetHashesPerSecond': ...,
'ProofType': ...,
'TotalCoinsMined': ...}
dict = {'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...} | Below is the the instruction that describes the task:
### Input:
Get blockchain information, aggregated data as well as data for the
individual exchanges available for the specified currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
Returns:
The function returns a dictionairy containing blockain as well as
trading information from the different exchanges were the specified
currency pair is available.
{'AggregatedData': dict,
'Algorithm': ...,
'BlockNumber': ...,
'BlockReward': ...,
'Exchanges': [dict1, dict2, ...],
'NetHashesPerSecond': ...,
'ProofType': ...,
'TotalCoinsMined': ...}
dict = {'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...}
### Response:
def get_coin_snapshot(fsym, tsym):
"""
Get blockchain information, aggregated data as well as data for the
individual exchanges available for the specified currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
Returns:
The function returns a dictionairy containing blockain as well as
trading information from the different exchanges were the specified
currency pair is available.
{'AggregatedData': dict,
'Algorithm': ...,
'BlockNumber': ...,
'BlockReward': ...,
'Exchanges': [dict1, dict2, ...],
'NetHashesPerSecond': ...,
'ProofType': ...,
'TotalCoinsMined': ...}
dict = {'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...}
"""
# load data
url = build_url('coinsnapshot', fsym=fsym, tsym=tsym)
data = load_data(url)['Data']
return data |
def work_dir(path):
'''
Context menager for executing commands in some working directory.
Returns to the previous wd when finished.
Usage:
>>> with work_dir(path):
... subprocess.call('git status')
'''
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory) | Context menager for executing commands in some working directory.
Returns to the previous wd when finished.
Usage:
>>> with work_dir(path):
... subprocess.call('git status') | Below is the the instruction that describes the task:
### Input:
Context menager for executing commands in some working directory.
Returns to the previous wd when finished.
Usage:
>>> with work_dir(path):
... subprocess.call('git status')
### Response:
def work_dir(path):
'''
Context menager for executing commands in some working directory.
Returns to the previous wd when finished.
Usage:
>>> with work_dir(path):
... subprocess.call('git status')
'''
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory) |
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dir = self.profile_dir.startup_dir
startup_files = glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warn("Unknown error in handling startup files:")
self.shell.showtraceback() | Run files from profile startup directory | Below is the the instruction that describes the task:
### Input:
Run files from profile startup directory
### Response:
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dir = self.profile_dir.startup_dir
startup_files = glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warn("Unknown error in handling startup files:")
self.shell.showtraceback() |
def libvlc_audio_output_set(p_mi, psz_name):
'''Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_output_set', None) or \
_Cfunction('libvlc_audio_output_set', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_name) | Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error. | Below is the the instruction that describes the task:
### Input:
Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
### Response:
def libvlc_audio_output_set(p_mi, psz_name):
'''Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_output_set', None) or \
_Cfunction('libvlc_audio_output_set', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_name) |
def notify(self, message):
"""Monitor output from heroku process.
This overrides the base class's `notify`
to make sure that we stop if the status-monitoring thread
has determined that the experiment is complete.
"""
if self.complete:
return HerokuLocalWrapper.MONITOR_STOP
return super(DebugDeployment, self).notify(message) | Monitor output from heroku process.
This overrides the base class's `notify`
to make sure that we stop if the status-monitoring thread
has determined that the experiment is complete. | Below is the the instruction that describes the task:
### Input:
Monitor output from heroku process.
This overrides the base class's `notify`
to make sure that we stop if the status-monitoring thread
has determined that the experiment is complete.
### Response:
def notify(self, message):
"""Monitor output from heroku process.
This overrides the base class's `notify`
to make sure that we stop if the status-monitoring thread
has determined that the experiment is complete.
"""
if self.complete:
return HerokuLocalWrapper.MONITOR_STOP
return super(DebugDeployment, self).notify(message) |
def Random(self):
"""Chooses a random element from this PMF.
Returns:
float value from the Pmf
"""
if len(self.d) == 0:
raise ValueError('Pmf contains no values.')
target = random.random()
total = 0.0
for x, p in self.d.iteritems():
total += p
if total >= target:
return x
# we shouldn't get here
assert False | Chooses a random element from this PMF.
Returns:
float value from the Pmf | Below is the the instruction that describes the task:
### Input:
Chooses a random element from this PMF.
Returns:
float value from the Pmf
### Response:
def Random(self):
"""Chooses a random element from this PMF.
Returns:
float value from the Pmf
"""
if len(self.d) == 0:
raise ValueError('Pmf contains no values.')
target = random.random()
total = 0.0
for x, p in self.d.iteritems():
total += p
if total >= target:
return x
# we shouldn't get here
assert False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.