code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def mbcs_work_around():
'''
work around for mbcs codec to make "bdist_wininst" work
https://mail.python.org/pipermail/python-list/2012-February/620326.html
'''
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name: {True: ascii}.get(name == 'mbcs')) | work around for mbcs codec to make "bdist_wininst" work
https://mail.python.org/pipermail/python-list/2012-February/620326.html | Below is the the instruction that describes the task:
### Input:
work around for mbcs codec to make "bdist_wininst" work
https://mail.python.org/pipermail/python-list/2012-February/620326.html
### Response:
def mbcs_work_around():
'''
work around for mbcs codec to make "bdist_wininst" work
https://mail.python.org/pipermail/python-list/2012-February/620326.html
'''
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name: {True: ascii}.get(name == 'mbcs')) |
def emit(self, action, payload):
"""Emit action with payload via `requests.post`."""
url = self.get_emit_api(action)
headers = {
'User-Agent': 'rio/%s' % VERSION,
'X-Rio-Protocol': '1',
}
args = dict(
url=url,
json=payload,
headers=headers,
timeout=self.timeout,
)
resp = requests.post(**args)
data = resp.json()
is_success = resp.status_code == 200
result = dict(
is_success=is_success,
message=data['message'],
)
if result['is_success']:
result.update(
event_uuid=data['event']['uuid'],
task_id=data['task']['id'],
)
return result | Emit action with payload via `requests.post`. | Below is the the instruction that describes the task:
### Input:
Emit action with payload via `requests.post`.
### Response:
def emit(self, action, payload):
"""Emit action with payload via `requests.post`."""
url = self.get_emit_api(action)
headers = {
'User-Agent': 'rio/%s' % VERSION,
'X-Rio-Protocol': '1',
}
args = dict(
url=url,
json=payload,
headers=headers,
timeout=self.timeout,
)
resp = requests.post(**args)
data = resp.json()
is_success = resp.status_code == 200
result = dict(
is_success=is_success,
message=data['message'],
)
if result['is_success']:
result.update(
event_uuid=data['event']['uuid'],
task_id=data['task']['id'],
)
return result |
def _fill_capture_regions(data):
"""Fill short-hand specification of BED capture regions.
"""
special_targets = {"sv_regions": ("exons", "transcripts")}
ref_file = dd.get_ref_file(data)
for target in ["variant_regions", "sv_regions", "coverage"]:
val = tz.get_in(["config", "algorithm", target], data)
if val and not os.path.exists(val) and not objectstore.is_remote(val):
installed_vals = []
# Check prioritize directory
for ext in [".bed", ".bed.gz"]:
installed_vals += glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir,
"coverage", val + ext)))
if len(installed_vals) == 0:
if target not in special_targets or not val.startswith(special_targets[target]):
raise ValueError("Configuration problem. BED file not found for %s: %s" %
(target, val))
else:
assert len(installed_vals) == 1, installed_vals
data = tz.update_in(data, ["config", "algorithm", target], lambda x: installed_vals[0])
return data | Fill short-hand specification of BED capture regions. | Below is the the instruction that describes the task:
### Input:
Fill short-hand specification of BED capture regions.
### Response:
def _fill_capture_regions(data):
"""Fill short-hand specification of BED capture regions.
"""
special_targets = {"sv_regions": ("exons", "transcripts")}
ref_file = dd.get_ref_file(data)
for target in ["variant_regions", "sv_regions", "coverage"]:
val = tz.get_in(["config", "algorithm", target], data)
if val and not os.path.exists(val) and not objectstore.is_remote(val):
installed_vals = []
# Check prioritize directory
for ext in [".bed", ".bed.gz"]:
installed_vals += glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir,
"coverage", val + ext)))
if len(installed_vals) == 0:
if target not in special_targets or not val.startswith(special_targets[target]):
raise ValueError("Configuration problem. BED file not found for %s: %s" %
(target, val))
else:
assert len(installed_vals) == 1, installed_vals
data = tz.update_in(data, ["config", "algorithm", target], lambda x: installed_vals[0])
return data |
def goBack(self):
"""
Goes up one level if possible and returns the url at the current level.
If it cannot go up, then a blank string will be returned.
:return <str>
"""
if not self.canGoBack():
return ''
self._blockStack = True
self._index -= 1
self.emitCurrentChanged()
self._blockStack = False
return self.currentUrl() | Goes up one level if possible and returns the url at the current level.
If it cannot go up, then a blank string will be returned.
:return <str> | Below is the the instruction that describes the task:
### Input:
Goes up one level if possible and returns the url at the current level.
If it cannot go up, then a blank string will be returned.
:return <str>
### Response:
def goBack(self):
"""
Goes up one level if possible and returns the url at the current level.
If it cannot go up, then a blank string will be returned.
:return <str>
"""
if not self.canGoBack():
return ''
self._blockStack = True
self._index -= 1
self.emitCurrentChanged()
self._blockStack = False
return self.currentUrl() |
def find_modules_with_decorators(path,decorator_module,decorator_name):
'''
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
'''
modules_paths = []
#If a path to a module file
if path[-3:] == '.py':
modules_paths.append(path)
#If a directory, Get all the .py files
else :
modules_paths += find_file_regex(path,'.*\.py$')
#Return only modules using the decorator
return [module for module in modules_paths if is_module_has_decorated(module,decorator_module,decorator_name)] | Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator. | Below is the the instruction that describes the task:
### Input:
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
### Response:
def find_modules_with_decorators(path,decorator_module,decorator_name):
'''
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
'''
modules_paths = []
#If a path to a module file
if path[-3:] == '.py':
modules_paths.append(path)
#If a directory, Get all the .py files
else :
modules_paths += find_file_regex(path,'.*\.py$')
#Return only modules using the decorator
return [module for module in modules_paths if is_module_has_decorated(module,decorator_module,decorator_name)] |
def from_args(cls, args):
"""
Initialize a WorkflowConfigParser instance using the command line values
parsed in args. args must contain the values provided by the
workflow_command_line_group() function. If you are not using the standard
workflow command line interface, you should probably initialize directly
using __init__()
Parameters
-----------
args : argparse.ArgumentParser
The command line arguments parsed by argparse
"""
# Identify the config files
confFiles = []
# files and URLs to resolve
if args.config_files:
confFiles += args.config_files
# Identify the deletes
confDeletes = args.config_delete or []
# and parse them
parsedDeletes = []
for delete in confDeletes:
splitDelete = delete.split(":")
if len(splitDelete) > 2:
raise ValueError(
"Deletes must be of format section:option "
"or section. Cannot parse %s." % str(delete))
else:
parsedDeletes.append(tuple(splitDelete))
# Identify the overrides
confOverrides = args.config_overrides or []
# and parse them
parsedOverrides = []
for override in confOverrides:
splitOverride = override.split(":")
if len(splitOverride) == 3:
parsedOverrides.append(tuple(splitOverride))
elif len(splitOverride) == 2:
parsedOverrides.append(tuple(splitOverride + [""]))
elif len(splitOverride) > 3:
# Cannot have colons in either section name or variable name
# but the value may contain colons
rec_value = ':'.join(splitOverride[2:])
parsedOverrides.append(tuple(splitOverride[:2] + [rec_value]))
else:
raise ValueError(
"Overrides must be of format section:option:value "
"or section:option. Cannot parse %s." % str(override))
return cls(confFiles, parsedOverrides, None, parsedDeletes) | Initialize a WorkflowConfigParser instance using the command line values
parsed in args. args must contain the values provided by the
workflow_command_line_group() function. If you are not using the standard
workflow command line interface, you should probably initialize directly
using __init__()
Parameters
-----------
args : argparse.ArgumentParser
The command line arguments parsed by argparse | Below is the the instruction that describes the task:
### Input:
Initialize a WorkflowConfigParser instance using the command line values
parsed in args. args must contain the values provided by the
workflow_command_line_group() function. If you are not using the standard
workflow command line interface, you should probably initialize directly
using __init__()
Parameters
-----------
args : argparse.ArgumentParser
The command line arguments parsed by argparse
### Response:
def from_args(cls, args):
"""
Initialize a WorkflowConfigParser instance using the command line values
parsed in args. args must contain the values provided by the
workflow_command_line_group() function. If you are not using the standard
workflow command line interface, you should probably initialize directly
using __init__()
Parameters
-----------
args : argparse.ArgumentParser
The command line arguments parsed by argparse
"""
# Identify the config files
confFiles = []
# files and URLs to resolve
if args.config_files:
confFiles += args.config_files
# Identify the deletes
confDeletes = args.config_delete or []
# and parse them
parsedDeletes = []
for delete in confDeletes:
splitDelete = delete.split(":")
if len(splitDelete) > 2:
raise ValueError(
"Deletes must be of format section:option "
"or section. Cannot parse %s." % str(delete))
else:
parsedDeletes.append(tuple(splitDelete))
# Identify the overrides
confOverrides = args.config_overrides or []
# and parse them
parsedOverrides = []
for override in confOverrides:
splitOverride = override.split(":")
if len(splitOverride) == 3:
parsedOverrides.append(tuple(splitOverride))
elif len(splitOverride) == 2:
parsedOverrides.append(tuple(splitOverride + [""]))
elif len(splitOverride) > 3:
# Cannot have colons in either section name or variable name
# but the value may contain colons
rec_value = ':'.join(splitOverride[2:])
parsedOverrides.append(tuple(splitOverride[:2] + [rec_value]))
else:
raise ValueError(
"Overrides must be of format section:option:value "
"or section:option. Cannot parse %s." % str(override))
return cls(confFiles, parsedOverrides, None, parsedDeletes) |
def find_case_control(items):
"""Find case/control items in a population of multiple samples.
"""
cases = []
controls = []
for data in items:
if population.get_affected_status(data) == 1:
controls.append(data)
else:
cases.append(data)
return cases, controls | Find case/control items in a population of multiple samples. | Below is the the instruction that describes the task:
### Input:
Find case/control items in a population of multiple samples.
### Response:
def find_case_control(items):
"""Find case/control items in a population of multiple samples.
"""
cases = []
controls = []
for data in items:
if population.get_affected_status(data) == 1:
controls.append(data)
else:
cases.append(data)
return cases, controls |
def get_unpermitted_fields(self):
"""
Gives unpermitted fields for current context/user.
Returns:
List of unpermitted field names.
"""
return (self._unpermitted_fields if self._is_unpermitted_fields_set else
self._apply_cell_filters(self._context)) | Gives unpermitted fields for current context/user.
Returns:
List of unpermitted field names. | Below is the the instruction that describes the task:
### Input:
Gives unpermitted fields for current context/user.
Returns:
List of unpermitted field names.
### Response:
def get_unpermitted_fields(self):
"""
Gives unpermitted fields for current context/user.
Returns:
List of unpermitted field names.
"""
return (self._unpermitted_fields if self._is_unpermitted_fields_set else
self._apply_cell_filters(self._context)) |
def sql_like_fragments(self) -> List[str]:
"""
Returns all the string literals to which a database column should be
compared using the SQL ``LIKE`` operator, to match this drug.
This isn't as accurate as the regex, but ``LIKE`` can do less.
``LIKE`` uses the wildcards ``?`` and ``%``.
"""
if self._sql_like_fragments is None:
self._sql_like_fragments = []
for p in list(set(self.all_generics + self.alternatives)):
self._sql_like_fragments.extend(self.regex_to_sql_like(p))
return self._sql_like_fragments | Returns all the string literals to which a database column should be
compared using the SQL ``LIKE`` operator, to match this drug.
This isn't as accurate as the regex, but ``LIKE`` can do less.
``LIKE`` uses the wildcards ``?`` and ``%``. | Below is the the instruction that describes the task:
### Input:
Returns all the string literals to which a database column should be
compared using the SQL ``LIKE`` operator, to match this drug.
This isn't as accurate as the regex, but ``LIKE`` can do less.
``LIKE`` uses the wildcards ``?`` and ``%``.
### Response:
def sql_like_fragments(self) -> List[str]:
"""
Returns all the string literals to which a database column should be
compared using the SQL ``LIKE`` operator, to match this drug.
This isn't as accurate as the regex, but ``LIKE`` can do less.
``LIKE`` uses the wildcards ``?`` and ``%``.
"""
if self._sql_like_fragments is None:
self._sql_like_fragments = []
for p in list(set(self.all_generics + self.alternatives)):
self._sql_like_fragments.extend(self.regex_to_sql_like(p))
return self._sql_like_fragments |
def getWindowByTitle(self, wildcard, order=0):
""" Returns a handle for the first window that matches the provided "wildcard" regex """
for w in self._get_window_list():
if "kCGWindowName" in w and re.search(wildcard, w["kCGWindowName"], flags=re.I):
# Matches - make sure we get it in the correct order
if order == 0:
return w["kCGWindowNumber"]
else:
order -= 1 | Returns a handle for the first window that matches the provided "wildcard" regex | Below is the the instruction that describes the task:
### Input:
Returns a handle for the first window that matches the provided "wildcard" regex
### Response:
def getWindowByTitle(self, wildcard, order=0):
""" Returns a handle for the first window that matches the provided "wildcard" regex """
for w in self._get_window_list():
if "kCGWindowName" in w and re.search(wildcard, w["kCGWindowName"], flags=re.I):
# Matches - make sure we get it in the correct order
if order == 0:
return w["kCGWindowNumber"]
else:
order -= 1 |
def register_event(self, direction, verb, child_fn, priority=10):
"""Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first).
Note: `all` will not match `raw` events. If you wish to receive both
`raw` and all other events, you need to register these separately.
"""
event_managers = []
if direction in ('in', 'both'):
event_managers.append(self._events_in)
if direction in ('out', 'both'):
event_managers.append(self._events_out)
if direction == 'girc':
event_managers.append(self._girc_events)
for event_manager in event_managers:
event_manager.register(verb, child_fn, priority=priority) | Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first).
Note: `all` will not match `raw` events. If you wish to receive both
`raw` and all other events, you need to register these separately. | Below is the the instruction that describes the task:
### Input:
Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first).
Note: `all` will not match `raw` events. If you wish to receive both
`raw` and all other events, you need to register these separately.
### Response:
def register_event(self, direction, verb, child_fn, priority=10):
"""Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first).
Note: `all` will not match `raw` events. If you wish to receive both
`raw` and all other events, you need to register these separately.
"""
event_managers = []
if direction in ('in', 'both'):
event_managers.append(self._events_in)
if direction in ('out', 'both'):
event_managers.append(self._events_out)
if direction == 'girc':
event_managers.append(self._girc_events)
for event_manager in event_managers:
event_manager.register(verb, child_fn, priority=priority) |
def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag) | Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf. | Below is the the instruction that describes the task:
### Input:
Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
### Response:
def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag) |
def run_compare_gold_standard(in_prefix, in_type, out_prefix, base_dir,
options):
"""Compares with a gold standard data set (compare_gold_standard.
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.Misc.compare_gold_standard`
module. The required file type for this module is ``bfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.Misc.compare_gold_standard` module doesn't
return usable output files. Hence, this function returns the input file
prefix and its type.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need bfile
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "compare_with_gold")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
compare_gold_standard.main(options)
except compare_gold_standard.ProgramError as e:
msg = "compare_gold_standard: {}".format(e)
raise ProgramError(msg)
# We create the LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
compare_gold_standard.pretty_name
)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# We know this step doesn't produce an new data set, so we return the old
# prefix and the old in_type
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=compare_gold_standard.desc,
long_description=compare_gold_standard.long_desc,
graph_path=None,
) | Compares with a gold standard data set (compare_gold_standard.
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.Misc.compare_gold_standard`
module. The required file type for this module is ``bfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.Misc.compare_gold_standard` module doesn't
return usable output files. Hence, this function returns the input file
prefix and its type. | Below is the the instruction that describes the task:
### Input:
Compares with a gold standard data set (compare_gold_standard.
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.Misc.compare_gold_standard`
module. The required file type for this module is ``bfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.Misc.compare_gold_standard` module doesn't
return usable output files. Hence, this function returns the input file
prefix and its type.
### Response:
def run_compare_gold_standard(in_prefix, in_type, out_prefix, base_dir,
options):
"""Compares with a gold standard data set (compare_gold_standard.
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.Misc.compare_gold_standard`
module. The required file type for this module is ``bfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.Misc.compare_gold_standard` module doesn't
return usable output files. Hence, this function returns the input file
prefix and its type.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need bfile
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "compare_with_gold")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
compare_gold_standard.main(options)
except compare_gold_standard.ProgramError as e:
msg = "compare_gold_standard: {}".format(e)
raise ProgramError(msg)
# We create the LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
compare_gold_standard.pretty_name
)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# We know this step doesn't produce an new data set, so we return the old
# prefix and the old in_type
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=compare_gold_standard.desc,
long_description=compare_gold_standard.long_desc,
graph_path=None,
) |
def remove_member_from_group(self, group_id, user_id):
"""
Add a user to a group as a member
:param group_id:
:param user_id:
"""
url = self.TEAM_MEMBERS_URL % (group_id, user_id)
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.delete_request() | Add a user to a group as a member
:param group_id:
:param user_id: | Below is the the instruction that describes the task:
### Input:
Add a user to a group as a member
:param group_id:
:param user_id:
### Response:
def remove_member_from_group(self, group_id, user_id):
"""
Add a user to a group as a member
:param group_id:
:param user_id:
"""
url = self.TEAM_MEMBERS_URL % (group_id, user_id)
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.delete_request() |
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return "%s %d-%d/%d" % (
self.units,
range_for_length[0],
range_for_length[1] - 1,
length,
)
return None | Converts the object into `Content-Range` HTTP header,
based on given length | Below is the the instruction that describes the task:
### Input:
Converts the object into `Content-Range` HTTP header,
based on given length
### Response:
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return "%s %d-%d/%d" % (
self.units,
range_for_length[0],
range_for_length[1] - 1,
length,
)
return None |
def get_company_user(self, email):
"""Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information
"""
users = self.get_company_users()
for user in users:
if user['email'] == email:
return user
msg = 'No user with email: "{email}" associated with this company.'
raise FMBaseError(msg.format(email=email)) | Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information | Below is the the instruction that describes the task:
### Input:
Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information
### Response:
def get_company_user(self, email):
"""Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information
"""
users = self.get_company_users()
for user in users:
if user['email'] == email:
return user
msg = 'No user with email: "{email}" associated with this company.'
raise FMBaseError(msg.format(email=email)) |
def scramble_value(self, value):
"""Duck-type value and scramble appropriately"""
try:
type, format = typeof_rave_data(value)
if type == 'float':
i, f = value.split('.')
return self.scramble_float(len(value) - 1, len(f))
elif type == 'int':
return self.scramble_int(len(value))
elif type == 'date':
return self.scramble_date(value, format)
elif type == 'time':
return self.scramble_time(format)
elif type == 'string':
return self.scramble_string(len(value))
else:
return value
except:
return "" | Duck-type value and scramble appropriately | Below is the the instruction that describes the task:
### Input:
Duck-type value and scramble appropriately
### Response:
def scramble_value(self, value):
"""Duck-type value and scramble appropriately"""
try:
type, format = typeof_rave_data(value)
if type == 'float':
i, f = value.split('.')
return self.scramble_float(len(value) - 1, len(f))
elif type == 'int':
return self.scramble_int(len(value))
elif type == 'date':
return self.scramble_date(value, format)
elif type == 'time':
return self.scramble_time(format)
elif type == 'string':
return self.scramble_string(len(value))
else:
return value
except:
return "" |
def get_list(self, **kwargs):
"""Get list of items from Model
---
get:
parameters:
- $ref: '#/components/parameters/get_list_schema'
responses:
200:
description: Items from Model
content:
application/json:
schema:
type: object
properties:
label_columns:
type: object
list_columns:
type: array
items:
type: string
description_columns:
type: object
list_title:
type: string
ids:
type: array
items:
type: string
order_columns:
type: array
items:
type: string
result:
type: array
items:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get_list' # noqa
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
_response = dict()
_args = kwargs.get("rison", {})
# handle select columns
select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
_pruned_select_cols = [col for col in select_cols if col in self.list_columns]
self.set_response_key_mappings(
_response,
self.get_list,
_args,
**{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols}
)
if _pruned_select_cols:
_list_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
else:
_list_model_schema = self.list_model_schema
# handle filters
joined_filters = self._handle_filters_args(_args)
# handle base order
order_column, order_direction = self._handle_order_args(_args)
# handle pagination
page_index, page_size = self._handle_page_args(_args)
# Make the query
query_select_columns = _pruned_select_cols or self.list_columns
count, lst = self.datamodel.query(
joined_filters,
order_column,
order_direction,
page=page_index,
page_size=page_size,
select_columns=query_select_columns,
)
pks = self.datamodel.get_keys(lst)
_response[API_RESULT_RES_KEY] = _list_model_schema.dump(lst, many=True).data
_response["ids"] = pks
_response["count"] = count
self.pre_get_list(_response)
return self.response(200, **_response) | Get list of items from Model
---
get:
parameters:
- $ref: '#/components/parameters/get_list_schema'
responses:
200:
description: Items from Model
content:
application/json:
schema:
type: object
properties:
label_columns:
type: object
list_columns:
type: array
items:
type: string
description_columns:
type: object
list_title:
type: string
ids:
type: array
items:
type: string
order_columns:
type: array
items:
type: string
result:
type: array
items:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get_list' # noqa
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500' | Below is the the instruction that describes the task:
### Input:
Get list of items from Model
---
get:
parameters:
- $ref: '#/components/parameters/get_list_schema'
responses:
200:
description: Items from Model
content:
application/json:
schema:
type: object
properties:
label_columns:
type: object
list_columns:
type: array
items:
type: string
description_columns:
type: object
list_title:
type: string
ids:
type: array
items:
type: string
order_columns:
type: array
items:
type: string
result:
type: array
items:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get_list' # noqa
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
### Response:
def get_list(self, **kwargs):
"""Get list of items from Model
---
get:
parameters:
- $ref: '#/components/parameters/get_list_schema'
responses:
200:
description: Items from Model
content:
application/json:
schema:
type: object
properties:
label_columns:
type: object
list_columns:
type: array
items:
type: string
description_columns:
type: object
list_title:
type: string
ids:
type: array
items:
type: string
order_columns:
type: array
items:
type: string
result:
type: array
items:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get_list' # noqa
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
_response = dict()
_args = kwargs.get("rison", {})
# handle select columns
select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
_pruned_select_cols = [col for col in select_cols if col in self.list_columns]
self.set_response_key_mappings(
_response,
self.get_list,
_args,
**{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols}
)
if _pruned_select_cols:
_list_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
else:
_list_model_schema = self.list_model_schema
# handle filters
joined_filters = self._handle_filters_args(_args)
# handle base order
order_column, order_direction = self._handle_order_args(_args)
# handle pagination
page_index, page_size = self._handle_page_args(_args)
# Make the query
query_select_columns = _pruned_select_cols or self.list_columns
count, lst = self.datamodel.query(
joined_filters,
order_column,
order_direction,
page=page_index,
page_size=page_size,
select_columns=query_select_columns,
)
pks = self.datamodel.get_keys(lst)
_response[API_RESULT_RES_KEY] = _list_model_schema.dump(lst, many=True).data
_response["ids"] = pks
_response["count"] = count
self.pre_get_list(_response)
return self.response(200, **_response) |
def _get_gradient_log_pdf(self):
"""
Method that finds gradient and its log at position
"""
sub_vec = self.variable_assignments - self.model.mean.flatten()
grad = - np.dot(self.model.precision_matrix, sub_vec)
log_pdf = 0.5 * np.dot(sub_vec, grad)
return grad, log_pdf | Method that finds gradient and its log at position | Below is the the instruction that describes the task:
### Input:
Method that finds gradient and its log at position
### Response:
def _get_gradient_log_pdf(self):
"""
Method that finds gradient and its log at position
"""
sub_vec = self.variable_assignments - self.model.mean.flatten()
grad = - np.dot(self.model.precision_matrix, sub_vec)
log_pdf = 0.5 * np.dot(sub_vec, grad)
return grad, log_pdf |
def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 8080
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource | Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket | Below is the the instruction that describes the task:
### Input:
Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
### Response:
def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 8080
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource |
def set_state(self, state, enable):
"""Set the state."""
is_enabled = self.get_state(state)
if is_enabled == enable:
return True
key = None
desired_states = [{'state': state, 'enabled': not is_enabled}]
if state == States.FILTER_LOW_SPEED:
if not self._multi_speed_pump:
return False
# Send the FILTER key once.
# If the pump is in high speed, it wil switch to low speed.
# If the pump is off the retry mechanism will send an additional
# FILTER key to switch into low speed.
# If the pump is in low speed then we pretend the pump is off;
# the retry mechanism will send an additional FILTER key
# to switch into high speed.
key = Keys.FILTER
desired_states.append({'state': States.FILTER, 'enabled': True})
else:
# See if this state has a corresponding Key
try:
key = Keys[state.name]
except KeyError:
# TODO: send the appropriate combination of keys
# to enable the state
return False
frame = self._get_key_event_frame(key)
# Queue it to send immediately following the reception
# of a keep-alive packet in an attempt to avoid bus collisions.
self._send_queue.put({'frame': frame, 'desired_states': desired_states,
'retries': 10})
return True | Set the state. | Below is the the instruction that describes the task:
### Input:
Set the state.
### Response:
def set_state(self, state, enable):
"""Set the state."""
is_enabled = self.get_state(state)
if is_enabled == enable:
return True
key = None
desired_states = [{'state': state, 'enabled': not is_enabled}]
if state == States.FILTER_LOW_SPEED:
if not self._multi_speed_pump:
return False
# Send the FILTER key once.
# If the pump is in high speed, it wil switch to low speed.
# If the pump is off the retry mechanism will send an additional
# FILTER key to switch into low speed.
# If the pump is in low speed then we pretend the pump is off;
# the retry mechanism will send an additional FILTER key
# to switch into high speed.
key = Keys.FILTER
desired_states.append({'state': States.FILTER, 'enabled': True})
else:
# See if this state has a corresponding Key
try:
key = Keys[state.name]
except KeyError:
# TODO: send the appropriate combination of keys
# to enable the state
return False
frame = self._get_key_event_frame(key)
# Queue it to send immediately following the reception
# of a keep-alive packet in an attempt to avoid bus collisions.
self._send_queue.put({'frame': frame, 'desired_states': desired_states,
'retries': 10})
return True |
def looks_like_issubclass(obj, classname):
""" Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
"""
t = obj
if t.__name__ == classname:
return True
for klass in t.__mro__:
if klass.__name__ == classname:
return True
return False | Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes. | Below is the the instruction that describes the task:
### Input:
Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
### Response:
def looks_like_issubclass(obj, classname):
""" Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
"""
t = obj
if t.__name__ == classname:
return True
for klass in t.__mro__:
if klass.__name__ == classname:
return True
return False |
def index(self, collection, docs, params=None, min_rf=None, **kwargs):
"""
:param str collection: The name of the collection for the request.
:param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}]
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied list of dicts to solr for indexing. ::
>>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}]
>>> solr.index('SolrClient_unittest', docs)
"""
data = json.dumps(docs)
return self.index_json(collection, data, params, min_rf=min_rf, **kwargs) | :param str collection: The name of the collection for the request.
:param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}]
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied list of dicts to solr for indexing. ::
>>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}]
>>> solr.index('SolrClient_unittest', docs) | Below is the the instruction that describes the task:
### Input:
:param str collection: The name of the collection for the request.
:param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}]
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied list of dicts to solr for indexing. ::
>>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}]
>>> solr.index('SolrClient_unittest', docs)
### Response:
def index(self, collection, docs, params=None, min_rf=None, **kwargs):
"""
:param str collection: The name of the collection for the request.
:param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}]
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied list of dicts to solr for indexing. ::
>>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}]
>>> solr.index('SolrClient_unittest', docs)
"""
data = json.dumps(docs)
return self.index_json(collection, data, params, min_rf=min_rf, **kwargs) |
def next(self):
"""Advance the cursor."""
if self.__empty:
raise StopIteration
if len(self.__data) or self._refresh():
if self.__manipulate:
_db = self.__collection.database
return _db._fix_outgoing(self.__data.popleft(),
self.__collection)
else:
return self.__data.popleft()
else:
raise StopIteration | Advance the cursor. | Below is the the instruction that describes the task:
### Input:
Advance the cursor.
### Response:
def next(self):
"""Advance the cursor."""
if self.__empty:
raise StopIteration
if len(self.__data) or self._refresh():
if self.__manipulate:
_db = self.__collection.database
return _db._fix_outgoing(self.__data.popleft(),
self.__collection)
else:
return self.__data.popleft()
else:
raise StopIteration |
def output(data, **kwargs): # pylint: disable=unused-argument
'''
Rather basic....
'''
if not isinstance(data, six.string_types):
data = six.text_type(data)
return salt.utils.stringutils.to_unicode(data) | Rather basic.... | Below is the the instruction that describes the task:
### Input:
Rather basic....
### Response:
def output(data, **kwargs): # pylint: disable=unused-argument
'''
Rather basic....
'''
if not isinstance(data, six.string_types):
data = six.text_type(data)
return salt.utils.stringutils.to_unicode(data) |
def datetime_to_ms(d):
"""Convert a Python datetime object to a millisecond epoch (UTC) time value."""
try:
millisecond = d.microsecond // 1000
return calendar.timegm(_add_tzone(d).utctimetuple()) * 1000 + millisecond
except AttributeError:
raise TypeError('expect Python datetime object, not %s' % type(d)) | Convert a Python datetime object to a millisecond epoch (UTC) time value. | Below is the the instruction that describes the task:
### Input:
Convert a Python datetime object to a millisecond epoch (UTC) time value.
### Response:
def datetime_to_ms(d):
"""Convert a Python datetime object to a millisecond epoch (UTC) time value."""
try:
millisecond = d.microsecond // 1000
return calendar.timegm(_add_tzone(d).utctimetuple()) * 1000 + millisecond
except AttributeError:
raise TypeError('expect Python datetime object, not %s' % type(d)) |
def get_default_env(self):
"""
Vanilla Ansible local commands execute with an environment inherited
from WorkerProcess, we must emulate that.
"""
return dict_diff(
old=ansible_mitogen.process.MuxProcess.original_env,
new=os.environ,
) | Vanilla Ansible local commands execute with an environment inherited
from WorkerProcess, we must emulate that. | Below is the the instruction that describes the task:
### Input:
Vanilla Ansible local commands execute with an environment inherited
from WorkerProcess, we must emulate that.
### Response:
def get_default_env(self):
"""
Vanilla Ansible local commands execute with an environment inherited
from WorkerProcess, we must emulate that.
"""
return dict_diff(
old=ansible_mitogen.process.MuxProcess.original_env,
new=os.environ,
) |
def dispatchlist(self, *, author=None, category=None,
subcategory=None, sort='new'):
"""Find dispatches by certain criteria.
Parameters
----------
author : str
Name of the nation authoring the dispatch.
category : str
Dispatch's primary category.
subcategory : str
Dispatch's secondary category.
sort : str
Sort order, 'new' or 'best'.
Returns
-------
an :class:`ApiQuery` of a list of :class:`DispatchThumbnail`
"""
params = {'sort': sort}
if author:
params['dispatchauthor'] = author
# Here we do need to ensure that our categories are valid, cause
# NS just ignores the categories it doesn't recognise and returns
# whatever it feels like.
if category and subcategory:
if (category not in dispatch_categories or
subcategory not in dispatch_categories[category]):
raise ValueError('Invalid category/subcategory')
params['dispatchcategory'] = f'{category}:{subcategory}'
elif category:
if category not in dispatch_categories:
raise ValueError('Invalid category')
params['dispatchcategory'] = category
else:
raise ValueError('Cannot request subcategory without category')
@api_query('dispatchlist', **params)
async def result(_, root):
return [
DispatchThumbnail._from_elem(elem)
for elem in root.find('DISPATCHLIST')
]
return result(self) | Find dispatches by certain criteria.
Parameters
----------
author : str
Name of the nation authoring the dispatch.
category : str
Dispatch's primary category.
subcategory : str
Dispatch's secondary category.
sort : str
Sort order, 'new' or 'best'.
Returns
-------
an :class:`ApiQuery` of a list of :class:`DispatchThumbnail` | Below is the the instruction that describes the task:
### Input:
Find dispatches by certain criteria.
Parameters
----------
author : str
Name of the nation authoring the dispatch.
category : str
Dispatch's primary category.
subcategory : str
Dispatch's secondary category.
sort : str
Sort order, 'new' or 'best'.
Returns
-------
an :class:`ApiQuery` of a list of :class:`DispatchThumbnail`
### Response:
def dispatchlist(self, *, author=None, category=None,
subcategory=None, sort='new'):
"""Find dispatches by certain criteria.
Parameters
----------
author : str
Name of the nation authoring the dispatch.
category : str
Dispatch's primary category.
subcategory : str
Dispatch's secondary category.
sort : str
Sort order, 'new' or 'best'.
Returns
-------
an :class:`ApiQuery` of a list of :class:`DispatchThumbnail`
"""
params = {'sort': sort}
if author:
params['dispatchauthor'] = author
# Here we do need to ensure that our categories are valid, cause
# NS just ignores the categories it doesn't recognise and returns
# whatever it feels like.
if category and subcategory:
if (category not in dispatch_categories or
subcategory not in dispatch_categories[category]):
raise ValueError('Invalid category/subcategory')
params['dispatchcategory'] = f'{category}:{subcategory}'
elif category:
if category not in dispatch_categories:
raise ValueError('Invalid category')
params['dispatchcategory'] = category
else:
raise ValueError('Cannot request subcategory without category')
@api_query('dispatchlist', **params)
async def result(_, root):
return [
DispatchThumbnail._from_elem(elem)
for elem in root.find('DISPATCHLIST')
]
return result(self) |
def deps_tree(self):
"""Package dependencies image map file
"""
dependencies = self.dependencies + [self.name]
if self.repo == "sbo":
for dep in dependencies:
deps = Requires(flag="").sbo(dep)
if dep not in self.deps_dict.values():
self.deps_dict[dep] = Utils().dimensional_list(deps)
else:
for dep in dependencies:
deps = Dependencies(self.repo, self.black).binary(dep, flag="")
if dep not in self.deps_dict.values():
self.deps_dict[dep] = Utils().dimensional_list(deps) | Package dependencies image map file | Below is the the instruction that describes the task:
### Input:
Package dependencies image map file
### Response:
def deps_tree(self):
"""Package dependencies image map file
"""
dependencies = self.dependencies + [self.name]
if self.repo == "sbo":
for dep in dependencies:
deps = Requires(flag="").sbo(dep)
if dep not in self.deps_dict.values():
self.deps_dict[dep] = Utils().dimensional_list(deps)
else:
for dep in dependencies:
deps = Dependencies(self.repo, self.black).binary(dep, flag="")
if dep not in self.deps_dict.values():
self.deps_dict[dep] = Utils().dimensional_list(deps) |
def hist(hists,
stacked=True,
reverse=False,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
logy=None,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
returns = []
if isinstance(hists, _Hist):
# This is a single plottable object.
returns = _hist(hists, axes=axes, logy=logy, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked:
# draw the top histogram first so its edges don't cover the histograms
# beneath it in the stack
if not reverse:
hists = list(hists)[::-1]
for i, h in enumerate(hists):
kwargs_local = kwargs.copy()
if i == len(hists) - 1:
low = h.Clone()
low.Reset()
else:
low = sum(hists[i + 1:])
high = h + low
high.alpha = getattr(h, 'alpha', None)
proxy = _hist(high, bottom=low, axes=axes, logy=logy, **kwargs)
returns.append(proxy)
if not reverse:
returns = returns[::-1]
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
for h in _maybe_reversed(hists, reverse):
returns.append(_hist(h, axes=axes, logy=logy, **kwargs))
if reverse:
returns = returns[::-1]
_set_bounds(hists[max(range(len(hists)), key=lambda idx: hists[idx].max())],
axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns | Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted. | Below is the the instruction that describes the task:
### Input:
Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted.
### Response:
def hist(hists,
stacked=True,
reverse=False,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
logy=None,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
returns = []
if isinstance(hists, _Hist):
# This is a single plottable object.
returns = _hist(hists, axes=axes, logy=logy, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked:
# draw the top histogram first so its edges don't cover the histograms
# beneath it in the stack
if not reverse:
hists = list(hists)[::-1]
for i, h in enumerate(hists):
kwargs_local = kwargs.copy()
if i == len(hists) - 1:
low = h.Clone()
low.Reset()
else:
low = sum(hists[i + 1:])
high = h + low
high.alpha = getattr(h, 'alpha', None)
proxy = _hist(high, bottom=low, axes=axes, logy=logy, **kwargs)
returns.append(proxy)
if not reverse:
returns = returns[::-1]
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
for h in _maybe_reversed(hists, reverse):
returns.append(_hist(h, axes=axes, logy=logy, **kwargs))
if reverse:
returns = returns[::-1]
_set_bounds(hists[max(range(len(hists)), key=lambda idx: hists[idx].max())],
axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns |
def _get_related_series(parse_info):
"""get related_series from parse info.
:param parse_info: Parsed info from html soup.
"""
seriesother_tags = [x for x in parse_info.select('h5.seriesother')]
sibling_tag = [x for x in seriesother_tags if x.text == 'Related Series'][0]
siblings_tag = list(sibling_tag.next_siblings)
# filter valid tag
# valid tag is all tag before following tag
# <h5 class="seriesother">Recommendations</h5>
valid_tag = []
keypoint_found = False
for x in siblings_tag:
# change keypoint if condition match
if x.name == 'h5' and x.attrs['class'] == ['seriesother']:
keypoint_found = True
if not keypoint_found and x.strip is not None:
if x.strip():
valid_tag.append(x)
elif not keypoint_found:
valid_tag.append(x)
# only one item found and it is 'N/A
if len(valid_tag) == 1:
if valid_tag[0].strip() == 'N/A':
return None
# items are combination between bs4 and text
# merge and return them as list of text
if len(valid_tag) % 2 == 0:
zipped_list = zip(valid_tag[::2], valid_tag[1::2])
result = []
for x in zipped_list:
result.append('{} {}'.format(x[0].text, x[1]))
return result
raise ValueError("Valid tag isn't recognizeable.\n{}".format("\n".join(valid_tag))) | get related_series from parse info.
:param parse_info: Parsed info from html soup. | Below is the the instruction that describes the task:
### Input:
get related_series from parse info.
:param parse_info: Parsed info from html soup.
### Response:
def _get_related_series(parse_info):
"""get related_series from parse info.
:param parse_info: Parsed info from html soup.
"""
seriesother_tags = [x for x in parse_info.select('h5.seriesother')]
sibling_tag = [x for x in seriesother_tags if x.text == 'Related Series'][0]
siblings_tag = list(sibling_tag.next_siblings)
# filter valid tag
# valid tag is all tag before following tag
# <h5 class="seriesother">Recommendations</h5>
valid_tag = []
keypoint_found = False
for x in siblings_tag:
# change keypoint if condition match
if x.name == 'h5' and x.attrs['class'] == ['seriesother']:
keypoint_found = True
if not keypoint_found and x.strip is not None:
if x.strip():
valid_tag.append(x)
elif not keypoint_found:
valid_tag.append(x)
# only one item found and it is 'N/A
if len(valid_tag) == 1:
if valid_tag[0].strip() == 'N/A':
return None
# items are combination between bs4 and text
# merge and return them as list of text
if len(valid_tag) % 2 == 0:
zipped_list = zip(valid_tag[::2], valid_tag[1::2])
result = []
for x in zipped_list:
result.append('{} {}'.format(x[0].text, x[1]))
return result
raise ValueError("Valid tag isn't recognizeable.\n{}".format("\n".join(valid_tag))) |
def system_monitor_SFM_threshold_down_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
SFM = ET.SubElement(system_monitor, "SFM")
threshold = ET.SubElement(SFM, "threshold")
down_threshold = ET.SubElement(threshold, "down-threshold")
down_threshold.text = kwargs.pop('down_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def system_monitor_SFM_threshold_down_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
SFM = ET.SubElement(system_monitor, "SFM")
threshold = ET.SubElement(SFM, "threshold")
down_threshold = ET.SubElement(threshold, "down-threshold")
down_threshold.text = kwargs.pop('down_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def string_for_count(dictionary, count):
"""Create a random string of N=`count` words"""
string_to_print = ""
if count is not None:
if count == 0:
return ""
ranger = count
else:
ranger = 2
for index in range(ranger):
string_to_print += "{} ".format(get_random_word(dictionary))
return string_to_print.strip() | Create a random string of N=`count` words | Below is the the instruction that describes the task:
### Input:
Create a random string of N=`count` words
### Response:
def string_for_count(dictionary, count):
"""Create a random string of N=`count` words"""
string_to_print = ""
if count is not None:
if count == 0:
return ""
ranger = count
else:
ranger = 2
for index in range(ranger):
string_to_print += "{} ".format(get_random_word(dictionary))
return string_to_print.strip() |
def http_basic_auth_superuser_required(func=None):
"""Decorator. Use it to specify a RPC method is available only to logged superusers"""
wrapper = auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_is_superuser])
# If @http_basic_auth_superuser_required() is used (with parenthesis)
if func is None:
return wrapper
# If @http_basic_auth_superuser_required is used without parenthesis
return wrapper(func) | Decorator. Use it to specify a RPC method is available only to logged superusers | Below is the the instruction that describes the task:
### Input:
Decorator. Use it to specify a RPC method is available only to logged superusers
### Response:
def http_basic_auth_superuser_required(func=None):
"""Decorator. Use it to specify a RPC method is available only to logged superusers"""
wrapper = auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_is_superuser])
# If @http_basic_auth_superuser_required() is used (with parenthesis)
if func is None:
return wrapper
# If @http_basic_auth_superuser_required is used without parenthesis
return wrapper(func) |
def get_normalized_parameters(request):
"""
Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
"""
# See issues #10 and #12
if ('Content-Type' not in request.headers or \
request.headers.get('Content-Type').startswith('application/x-www-form-urlencoded')) \
and not isinstance(request.data, basestring):
data_and_params = dict(request.data.items() + request.params.items())
for key,value in data_and_params.items():
request.data_and_params[to_utf8(key)] = to_utf8(value)
if request.data_and_params.has_key('oauth_signature'):
del request.data_and_params['oauth_signature']
items = []
for key, value in request.data_and_params.iteritems():
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((key, value))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((key, value))
else:
items.extend((key, item) for item in value)
# Include any query string parameters included in the url
query_string = urlparse(request.url)[4]
items.extend([(to_utf8(k), to_utf8(v)) for k, v in OAuthHook._split_url_string(query_string).items()])
items.sort()
return urllib.urlencode(items).replace('+', '%20').replace('%7E', '~') | Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1 | Below is the the instruction that describes the task:
### Input:
Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
### Response:
def get_normalized_parameters(request):
"""
Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
"""
# See issues #10 and #12
if ('Content-Type' not in request.headers or \
request.headers.get('Content-Type').startswith('application/x-www-form-urlencoded')) \
and not isinstance(request.data, basestring):
data_and_params = dict(request.data.items() + request.params.items())
for key,value in data_and_params.items():
request.data_and_params[to_utf8(key)] = to_utf8(value)
if request.data_and_params.has_key('oauth_signature'):
del request.data_and_params['oauth_signature']
items = []
for key, value in request.data_and_params.iteritems():
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((key, value))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((key, value))
else:
items.extend((key, item) for item in value)
# Include any query string parameters included in the url
query_string = urlparse(request.url)[4]
items.extend([(to_utf8(k), to_utf8(v)) for k, v in OAuthHook._split_url_string(query_string).items()])
items.sort()
return urllib.urlencode(items).replace('+', '%20').replace('%7E', '~') |
def find_globals_and_nonlocals(node, globs, nonlocals, code, version):
"""search a node of parse tree to find variable names that need a
either 'global' or 'nonlocal' statements added."""
for n in node:
if isinstance(n, SyntaxTree):
globs, nonlocals = find_globals_and_nonlocals(n, globs, nonlocals,
code, version)
elif n.kind in read_global_ops:
globs.add(n.pattr)
elif (version >= 3.0
and n.kind in nonglobal_ops
and n.pattr in code.co_freevars
and n.pattr != code.co_name
and code.co_name != '<lambda>'):
nonlocals.add(n.pattr)
return globs, nonlocals | search a node of parse tree to find variable names that need a
either 'global' or 'nonlocal' statements added. | Below is the the instruction that describes the task:
### Input:
search a node of parse tree to find variable names that need a
either 'global' or 'nonlocal' statements added.
### Response:
def find_globals_and_nonlocals(node, globs, nonlocals, code, version):
"""search a node of parse tree to find variable names that need a
either 'global' or 'nonlocal' statements added."""
for n in node:
if isinstance(n, SyntaxTree):
globs, nonlocals = find_globals_and_nonlocals(n, globs, nonlocals,
code, version)
elif n.kind in read_global_ops:
globs.add(n.pattr)
elif (version >= 3.0
and n.kind in nonglobal_ops
and n.pattr in code.co_freevars
and n.pattr != code.co_name
and code.co_name != '<lambda>'):
nonlocals.add(n.pattr)
return globs, nonlocals |
def bytes_base64(x):
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '')
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'') | Turn bytes into base64 | Below is the the instruction that describes the task:
### Input:
Turn bytes into base64
### Response:
def bytes_base64(x):
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '')
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'') |
async def change_directory(self, path=".."):
"""
:py:func:`asyncio.coroutine`
Change current directory. Goes «up» if no parameters passed.
:param path: new directory, goes «up» if omitted
:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
"""
path = pathlib.PurePosixPath(path)
if path == pathlib.PurePosixPath(".."):
cmd = "CDUP"
else:
cmd = "CWD " + str(path)
await self.command(cmd, "2xx") | :py:func:`asyncio.coroutine`
Change current directory. Goes «up» if no parameters passed.
:param path: new directory, goes «up» if omitted
:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath` | Below is the the instruction that describes the task:
### Input:
:py:func:`asyncio.coroutine`
Change current directory. Goes «up» if no parameters passed.
:param path: new directory, goes «up» if omitted
:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
### Response:
async def change_directory(self, path=".."):
"""
:py:func:`asyncio.coroutine`
Change current directory. Goes «up» if no parameters passed.
:param path: new directory, goes «up» if omitted
:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
"""
path = pathlib.PurePosixPath(path)
if path == pathlib.PurePosixPath(".."):
cmd = "CDUP"
else:
cmd = "CWD " + str(path)
await self.command(cmd, "2xx") |
def __assert_false(returned):
'''
Test if an boolean is False
'''
result = "Pass"
if isinstance(returned, str):
try:
returned = bool(returned)
except ValueError:
raise
try:
assert (returned is False), "{0} not False".format(returned)
except AssertionError as err:
result = "Fail: " + six.text_type(err)
return result | Test if an boolean is False | Below is the the instruction that describes the task:
### Input:
Test if an boolean is False
### Response:
def __assert_false(returned):
'''
Test if an boolean is False
'''
result = "Pass"
if isinstance(returned, str):
try:
returned = bool(returned)
except ValueError:
raise
try:
assert (returned is False), "{0} not False".format(returned)
except AssertionError as err:
result = "Fail: " + six.text_type(err)
return result |
def set_area_geometry(w, src):
"""
Set area polygon as shapefile geometry
"""
assert "areaSource" in src.tag
geometry_node = src.nodes[get_taglist(src).index("areaGeometry")]
area_attrs = parse_area_geometry(geometry_node)
w.poly(parts=[area_attrs["polygon"].tolist()]) | Set area polygon as shapefile geometry | Below is the the instruction that describes the task:
### Input:
Set area polygon as shapefile geometry
### Response:
def set_area_geometry(w, src):
"""
Set area polygon as shapefile geometry
"""
assert "areaSource" in src.tag
geometry_node = src.nodes[get_taglist(src).index("areaGeometry")]
area_attrs = parse_area_geometry(geometry_node)
w.poly(parts=[area_attrs["polygon"].tolist()]) |
def get_stp_mst_detail_output_msti_port_admin_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
admin_edge = ET.SubElement(port, "admin-edge")
admin_edge.text = kwargs.pop('admin_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_msti_port_admin_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
admin_edge = ET.SubElement(port, "admin-edge")
admin_edge.text = kwargs.pop('admin_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def remove_imath_operators(lines):
"""Remove mathematical expressions that require Pythons global
interpreter locking mechanism.
This is not a exhaustive test, but shows how the method works:
>>> lines = [' x += 1*1']
>>> from hydpy.cythons.modelutils import FuncConverter
>>> FuncConverter.remove_imath_operators(lines)
>>> lines
[' x = x + (1*1)']
"""
for idx, line in enumerate(lines):
for operator in ('+=', '-=', '**=', '*=', '//=', '/=', '%='):
sublines = line.split(operator)
if len(sublines) > 1:
indent = line.count(' ') - line.lstrip().count(' ')
sublines = [sl.strip() for sl in sublines]
line = ('%s%s = %s %s (%s)'
% (indent*' ', sublines[0], sublines[0],
operator[:-1], sublines[1]))
lines[idx] = line | Remove mathematical expressions that require Pythons global
interpreter locking mechanism.
This is not a exhaustive test, but shows how the method works:
>>> lines = [' x += 1*1']
>>> from hydpy.cythons.modelutils import FuncConverter
>>> FuncConverter.remove_imath_operators(lines)
>>> lines
[' x = x + (1*1)'] | Below is the the instruction that describes the task:
### Input:
Remove mathematical expressions that require Pythons global
interpreter locking mechanism.
This is not a exhaustive test, but shows how the method works:
>>> lines = [' x += 1*1']
>>> from hydpy.cythons.modelutils import FuncConverter
>>> FuncConverter.remove_imath_operators(lines)
>>> lines
[' x = x + (1*1)']
### Response:
def remove_imath_operators(lines):
"""Remove mathematical expressions that require Pythons global
interpreter locking mechanism.
This is not a exhaustive test, but shows how the method works:
>>> lines = [' x += 1*1']
>>> from hydpy.cythons.modelutils import FuncConverter
>>> FuncConverter.remove_imath_operators(lines)
>>> lines
[' x = x + (1*1)']
"""
for idx, line in enumerate(lines):
for operator in ('+=', '-=', '**=', '*=', '//=', '/=', '%='):
sublines = line.split(operator)
if len(sublines) > 1:
indent = line.count(' ') - line.lstrip().count(' ')
sublines = [sl.strip() for sl in sublines]
line = ('%s%s = %s %s (%s)'
% (indent*' ', sublines[0], sublines[0],
operator[:-1], sublines[1]))
lines[idx] = line |
def code_to_names(category, code):
"""
Given the code for a language, script, or region, get a dictionary of its
names in various languages.
"""
trie_name = '{}_to_name'.format(category)
if trie_name not in TRIES:
TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name)))
trie = TRIES[trie_name]
lookup = code.lower() + '@'
possible_keys = trie.keys(lookup)
names = {}
for key in possible_keys:
target_language = key.split('@')[1]
names[target_language] = get_trie_value(trie, key)
return names | Given the code for a language, script, or region, get a dictionary of its
names in various languages. | Below is the the instruction that describes the task:
### Input:
Given the code for a language, script, or region, get a dictionary of its
names in various languages.
### Response:
def code_to_names(category, code):
"""
Given the code for a language, script, or region, get a dictionary of its
names in various languages.
"""
trie_name = '{}_to_name'.format(category)
if trie_name not in TRIES:
TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name)))
trie = TRIES[trie_name]
lookup = code.lower() + '@'
possible_keys = trie.keys(lookup)
names = {}
for key in possible_keys:
target_language = key.split('@')[1]
names[target_language] = get_trie_value(trie, key)
return names |
def authorize_url(self):
"""获取授权跳转地址
:return: URL 地址
"""
redirect_uri = quote(self.redirect_uri, safe=b'')
url_list = [
self.OAUTH_BASE_URL,
'oauth2/authorize?appid=',
self.app_id,
'&redirect_uri=',
redirect_uri,
'&response_type=code&scope=',
self.scope
]
if self.state:
url_list.extend(['&state=', self.state])
url_list.append('#wechat_redirect')
return ''.join(url_list) | 获取授权跳转地址
:return: URL 地址 | Below is the the instruction that describes the task:
### Input:
获取授权跳转地址
:return: URL 地址
### Response:
def authorize_url(self):
"""获取授权跳转地址
:return: URL 地址
"""
redirect_uri = quote(self.redirect_uri, safe=b'')
url_list = [
self.OAUTH_BASE_URL,
'oauth2/authorize?appid=',
self.app_id,
'&redirect_uri=',
redirect_uri,
'&response_type=code&scope=',
self.scope
]
if self.state:
url_list.extend(['&state=', self.state])
url_list.append('#wechat_redirect')
return ''.join(url_list) |
def plot_frequencies(self, mindB=None, maxdB=None, norm=True):
"""Plot the window in the frequency domain
:param mindB: change the default lower y bound
:param maxdB: change the default upper lower bound
:param bool norm: if True, normalise the frequency response.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_frequencies()
"""
from pylab import plot, title, xlim, grid, ylim, xlabel, ylabel
# recompute the response
self.compute_response(norm=norm)
plot(self.frequencies, self.response)
title("ENBW=%2.1f" % (self.enbw))
ylabel('Frequency response (dB)')
xlabel('Fraction of sampling frequency')
# define the plot limits
xlim(-0.5, 0.5)
y0, y1 = ylim()
if mindB:
y0 = mindB
if maxdB is not None:
y1 = maxdB
else:
y1 = max(self.response)
ylim(y0, y1)
grid(True) | Plot the window in the frequency domain
:param mindB: change the default lower y bound
:param maxdB: change the default upper lower bound
:param bool norm: if True, normalise the frequency response.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_frequencies() | Below is the the instruction that describes the task:
### Input:
Plot the window in the frequency domain
:param mindB: change the default lower y bound
:param maxdB: change the default upper lower bound
:param bool norm: if True, normalise the frequency response.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_frequencies()
### Response:
def plot_frequencies(self, mindB=None, maxdB=None, norm=True):
"""Plot the window in the frequency domain
:param mindB: change the default lower y bound
:param maxdB: change the default upper lower bound
:param bool norm: if True, normalise the frequency response.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_frequencies()
"""
from pylab import plot, title, xlim, grid, ylim, xlabel, ylabel
# recompute the response
self.compute_response(norm=norm)
plot(self.frequencies, self.response)
title("ENBW=%2.1f" % (self.enbw))
ylabel('Frequency response (dB)')
xlabel('Fraction of sampling frequency')
# define the plot limits
xlim(-0.5, 0.5)
y0, y1 = ylim()
if mindB:
y0 = mindB
if maxdB is not None:
y1 = maxdB
else:
y1 = max(self.response)
ylim(y0, y1)
grid(True) |
def process_result(self, new_sia, min_sia):
"""Check if the new SIA has smaller |big_phi| than the standing
result.
"""
if new_sia.phi == 0:
self.done = True # Short-circuit
return new_sia
elif new_sia < min_sia:
return new_sia
return min_sia | Check if the new SIA has smaller |big_phi| than the standing
result. | Below is the the instruction that describes the task:
### Input:
Check if the new SIA has smaller |big_phi| than the standing
result.
### Response:
def process_result(self, new_sia, min_sia):
"""Check if the new SIA has smaller |big_phi| than the standing
result.
"""
if new_sia.phi == 0:
self.done = True # Short-circuit
return new_sia
elif new_sia < min_sia:
return new_sia
return min_sia |
async def update_api(request: web.Request) -> web.Response:
"""
This handler accepts a POST request with Content-Type: multipart/form-data
and file fields in the body named "whl", "serverlib", and "fw". The "whl"
and "serverlib" files should be valid Python wheels to be installed ("whl"
is expected generally to be the API server wheel, and "serverlib" is
expected to be the ot2serverlib wheel. The "fw" file is expected to be a
Smoothie firmware hex file. The Python files are install using pip, and the
firmware file is flashed to the Smoothie board, then the files are deleted
and a success code is returned.
"""
log.debug('Update request received')
data = await request.post()
try:
res0 = await install_py(
data['whl'], request.loop)
reslist = [res0]
if 'serverlib' in data.keys():
res1 = await install_py(
data['serverlib'], request.loop)
reslist.append(res1)
if 'fw' in data.keys():
res2 = await install_smoothie_firmware(
data['fw'], request.loop)
reslist.append(res2)
res: Dict[str, Any] = {
'message': [r['message'] for r in reslist],
'filename': [r['filename'] for r in reslist]
}
status = 200
except Exception as e:
res = {'message': 'Exception {} raised by update of {}: {}'.format(
type(e), data, e.__traceback__)}
status = 500
return web.json_response(res, status=status) | This handler accepts a POST request with Content-Type: multipart/form-data
and file fields in the body named "whl", "serverlib", and "fw". The "whl"
and "serverlib" files should be valid Python wheels to be installed ("whl"
is expected generally to be the API server wheel, and "serverlib" is
expected to be the ot2serverlib wheel. The "fw" file is expected to be a
Smoothie firmware hex file. The Python files are install using pip, and the
firmware file is flashed to the Smoothie board, then the files are deleted
and a success code is returned. | Below is the the instruction that describes the task:
### Input:
This handler accepts a POST request with Content-Type: multipart/form-data
and file fields in the body named "whl", "serverlib", and "fw". The "whl"
and "serverlib" files should be valid Python wheels to be installed ("whl"
is expected generally to be the API server wheel, and "serverlib" is
expected to be the ot2serverlib wheel. The "fw" file is expected to be a
Smoothie firmware hex file. The Python files are install using pip, and the
firmware file is flashed to the Smoothie board, then the files are deleted
and a success code is returned.
### Response:
async def update_api(request: web.Request) -> web.Response:
"""
This handler accepts a POST request with Content-Type: multipart/form-data
and file fields in the body named "whl", "serverlib", and "fw". The "whl"
and "serverlib" files should be valid Python wheels to be installed ("whl"
is expected generally to be the API server wheel, and "serverlib" is
expected to be the ot2serverlib wheel. The "fw" file is expected to be a
Smoothie firmware hex file. The Python files are install using pip, and the
firmware file is flashed to the Smoothie board, then the files are deleted
and a success code is returned.
"""
log.debug('Update request received')
data = await request.post()
try:
res0 = await install_py(
data['whl'], request.loop)
reslist = [res0]
if 'serverlib' in data.keys():
res1 = await install_py(
data['serverlib'], request.loop)
reslist.append(res1)
if 'fw' in data.keys():
res2 = await install_smoothie_firmware(
data['fw'], request.loop)
reslist.append(res2)
res: Dict[str, Any] = {
'message': [r['message'] for r in reslist],
'filename': [r['filename'] for r in reslist]
}
status = 200
except Exception as e:
res = {'message': 'Exception {} raised by update of {}: {}'.format(
type(e), data, e.__traceback__)}
status = 500
return web.json_response(res, status=status) |
def SendSms(self, *TargetNumbers, **Properties):
"""Creates and sends an SMS message.
:Parameters:
TargetNumbers : str
One or more target SMS numbers.
Properties
Message properties. Properties available are same as `SmsMessage` object properties.
:return: An sms message object. The message is already sent at this point.
:rtype: `SmsMessage`
"""
sms = self.CreateSms(smsMessageTypeOutgoing, *TargetNumbers)
for name, value in Properties.items():
if isinstance(getattr(sms.__class__, name, None), property):
setattr(sms, name, value)
else:
raise TypeError('Unknown property: %s' % prop)
sms.Send()
return sms | Creates and sends an SMS message.
:Parameters:
TargetNumbers : str
One or more target SMS numbers.
Properties
Message properties. Properties available are same as `SmsMessage` object properties.
:return: An sms message object. The message is already sent at this point.
:rtype: `SmsMessage` | Below is the the instruction that describes the task:
### Input:
Creates and sends an SMS message.
:Parameters:
TargetNumbers : str
One or more target SMS numbers.
Properties
Message properties. Properties available are same as `SmsMessage` object properties.
:return: An sms message object. The message is already sent at this point.
:rtype: `SmsMessage`
### Response:
def SendSms(self, *TargetNumbers, **Properties):
"""Creates and sends an SMS message.
:Parameters:
TargetNumbers : str
One or more target SMS numbers.
Properties
Message properties. Properties available are same as `SmsMessage` object properties.
:return: An sms message object. The message is already sent at this point.
:rtype: `SmsMessage`
"""
sms = self.CreateSms(smsMessageTypeOutgoing, *TargetNumbers)
for name, value in Properties.items():
if isinstance(getattr(sms.__class__, name, None), property):
setattr(sms, name, value)
else:
raise TypeError('Unknown property: %s' % prop)
sms.Send()
return sms |
def format_unix_var(text):
"""
Example::
this_is_very_good
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
raise ValueError("can not be empty string!")
else:
if text[0] in string.digits:
raise ValueError("variable can not start with digits!")
text = text.lower()
# delete redundant empty space
words = list()
word = list()
for char in text:
if char in ALPHA_DIGITS:
word.append(char)
else:
if len(word):
words.append("".join(word))
word = list()
if len(word):
words.append("".join(word))
return "_".join(words) | Example::
this_is_very_good | Below is the the instruction that describes the task:
### Input:
Example::
this_is_very_good
### Response:
def format_unix_var(text):
"""
Example::
this_is_very_good
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
raise ValueError("can not be empty string!")
else:
if text[0] in string.digits:
raise ValueError("variable can not start with digits!")
text = text.lower()
# delete redundant empty space
words = list()
word = list()
for char in text:
if char in ALPHA_DIGITS:
word.append(char)
else:
if len(word):
words.append("".join(word))
word = list()
if len(word):
words.append("".join(word))
return "_".join(words) |
def process_directory(source, target, apikey, handler, overwrite=False):
"""Optimize and save png files form source to target directory.
@param source: path to input directory
@param target: path to output directory
@param handler: callback holder, instance of handlers.BaseHandler
@param overwrite: boolean flag to allow overwrite already existing
files in output directory.
"""
handler.on_start()
attempts = defaultdict(lambda: 0)
input_files = files_with_exts(source, suffix='.png')
next_ = lambda: next(input_files, None)
current_file = next_()
response = None
last_processed = None
while current_file:
output_file = target_path(source, target, current_file)
if os.path.exists(output_file) and not overwrite:
handler.on_skip(current_file, source=source)
current_file = next_()
continue
try:
handler.on_pre_item(current_file)
last_processed = current_file
response = _process_file(current_file, output_file, apikey)
current_file = next_()
except StopProcessing as e:
# Unauthorized or exceed number of allowed monthly calls
response = e.response
handler.on_stop(response.errmsg)
break
except RetryProcessing as e:
# handle InternalServerError on tinypng side
response = e.response
if attempts[current_file] < 9:
handler.on_retry(current_file)
time.sleep(TINYPNG_SLEEP_SEC)
attempts[current_file] += 1
else:
current_file = next_()
finally:
handler.on_post_item(response, input_file=last_processed, source=source)
handler.on_finish(output_dir=target) | Optimize and save png files form source to target directory.
@param source: path to input directory
@param target: path to output directory
@param handler: callback holder, instance of handlers.BaseHandler
@param overwrite: boolean flag to allow overwrite already existing
files in output directory. | Below is the the instruction that describes the task:
### Input:
Optimize and save png files form source to target directory.
@param source: path to input directory
@param target: path to output directory
@param handler: callback holder, instance of handlers.BaseHandler
@param overwrite: boolean flag to allow overwrite already existing
files in output directory.
### Response:
def process_directory(source, target, apikey, handler, overwrite=False):
"""Optimize and save png files form source to target directory.
@param source: path to input directory
@param target: path to output directory
@param handler: callback holder, instance of handlers.BaseHandler
@param overwrite: boolean flag to allow overwrite already existing
files in output directory.
"""
handler.on_start()
attempts = defaultdict(lambda: 0)
input_files = files_with_exts(source, suffix='.png')
next_ = lambda: next(input_files, None)
current_file = next_()
response = None
last_processed = None
while current_file:
output_file = target_path(source, target, current_file)
if os.path.exists(output_file) and not overwrite:
handler.on_skip(current_file, source=source)
current_file = next_()
continue
try:
handler.on_pre_item(current_file)
last_processed = current_file
response = _process_file(current_file, output_file, apikey)
current_file = next_()
except StopProcessing as e:
# Unauthorized or exceed number of allowed monthly calls
response = e.response
handler.on_stop(response.errmsg)
break
except RetryProcessing as e:
# handle InternalServerError on tinypng side
response = e.response
if attempts[current_file] < 9:
handler.on_retry(current_file)
time.sleep(TINYPNG_SLEEP_SEC)
attempts[current_file] += 1
else:
current_file = next_()
finally:
handler.on_post_item(response, input_file=last_processed, source=source)
handler.on_finish(output_dir=target) |
def list_names():
""" List all known color names. """
names = get_all_names()
# This is 375 right now. Probably won't ever change, but I'm not sure.
nameslen = len(names)
print('\nListing {} names:\n'.format(nameslen))
# Using 3 columns of names, still alphabetically sorted from the top down.
# Longest name so far: lightgoldenrodyellow (20 chars)
namewidth = 20
# namewidth * columns == 60, colorwidth * columns == 18, final == 78.
swatch = ' ' * 9
third = nameslen // 3
lastthird = third * 2
cols = (
names[0: third],
names[third: lastthird],
names[lastthird:],
)
# Exactly enough spaces to fill in a blank item (+2 for ': ').
# This may not ever be used, unless another 'known name' is added.
blankitem = ' ' * (namewidth + len(swatch) + 2)
for i in range(third):
nameset = []
for colset in cols:
try:
nameset.append(colset[i])
except IndexError:
nameset.append(None)
continue
line = C('').join(
C(': ').join(
C(name.rjust(namewidth)),
C(swatch, back=name),
) if name else blankitem
for name in nameset
)
print(line)
return 0 | List all known color names. | Below is the the instruction that describes the task:
### Input:
List all known color names.
### Response:
def list_names():
""" List all known color names. """
names = get_all_names()
# This is 375 right now. Probably won't ever change, but I'm not sure.
nameslen = len(names)
print('\nListing {} names:\n'.format(nameslen))
# Using 3 columns of names, still alphabetically sorted from the top down.
# Longest name so far: lightgoldenrodyellow (20 chars)
namewidth = 20
# namewidth * columns == 60, colorwidth * columns == 18, final == 78.
swatch = ' ' * 9
third = nameslen // 3
lastthird = third * 2
cols = (
names[0: third],
names[third: lastthird],
names[lastthird:],
)
# Exactly enough spaces to fill in a blank item (+2 for ': ').
# This may not ever be used, unless another 'known name' is added.
blankitem = ' ' * (namewidth + len(swatch) + 2)
for i in range(third):
nameset = []
for colset in cols:
try:
nameset.append(colset[i])
except IndexError:
nameset.append(None)
continue
line = C('').join(
C(': ').join(
C(name.rjust(namewidth)),
C(swatch, back=name),
) if name else blankitem
for name in nameset
)
print(line)
return 0 |
def _resolve(value, model_instance=None, context=None):
""" Resolves any template references in the given value.
"""
if isinstance(value, basestring) and "{" in value:
if context is None:
context = Context()
if model_instance is not None:
context[model_instance._meta.module_name] = model_instance
value = Template(value).render(context)
return value | Resolves any template references in the given value. | Below is the the instruction that describes the task:
### Input:
Resolves any template references in the given value.
### Response:
def _resolve(value, model_instance=None, context=None):
""" Resolves any template references in the given value.
"""
if isinstance(value, basestring) and "{" in value:
if context is None:
context = Context()
if model_instance is not None:
context[model_instance._meta.module_name] = model_instance
value = Template(value).render(context)
return value |
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices):
"""Calculate the chisq timeseries from precomputed values for only select points.
This function calculates the chisq at each point by explicitly time shifting
and summing each bin. No FFT is involved.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: numpy.ndarray
The unnormalized array of snr values at only the selected points in `indices`.
snr_norm: float
The normalization of the snr (EXPLAINME : refer to Findchirp paper?)
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `corr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points.
"""
num_bins = len(bins) - 1
chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return
return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0) | Calculate the chisq timeseries from precomputed values for only select points.
This function calculates the chisq at each point by explicitly time shifting
and summing each bin. No FFT is involved.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: numpy.ndarray
The unnormalized array of snr values at only the selected points in `indices`.
snr_norm: float
The normalization of the snr (EXPLAINME : refer to Findchirp paper?)
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `corr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points. | Below is the the instruction that describes the task:
### Input:
Calculate the chisq timeseries from precomputed values for only select points.
This function calculates the chisq at each point by explicitly time shifting
and summing each bin. No FFT is involved.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: numpy.ndarray
The unnormalized array of snr values at only the selected points in `indices`.
snr_norm: float
The normalization of the snr (EXPLAINME : refer to Findchirp paper?)
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `corr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points.
### Response:
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices):
"""Calculate the chisq timeseries from precomputed values for only select points.
This function calculates the chisq at each point by explicitly time shifting
and summing each bin. No FFT is involved.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: numpy.ndarray
The unnormalized array of snr values at only the selected points in `indices`.
snr_norm: float
The normalization of the snr (EXPLAINME : refer to Findchirp paper?)
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `corr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points.
"""
num_bins = len(bins) - 1
chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return
return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0) |
def dipole(src, rec, depth, res, freqtime, signal=None, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, xdirect=False,
ht='fht', htarg=None, ft='sin', ftarg=None, opt=None, loop=None,
verb=2):
r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Backwards compatibility
htarg, opt = spline_backwards_hankel(ht, htarg, opt)
# Check times and Fourier Transform arguments, get required frequencies
# (freq = freqtime if ``signal=None``)
if signal is not None:
time, freq, ft, ftarg = check_time(freqtime, signal, ft, ftarg, verb)
else:
freq = freqtime
# Check layer parameters
model = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
xdirect, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = model
# Check frequency => get etaH, etaV, zetaH, and zetaV
frequency = check_frequency(freq, res, aniso, epermH, epermV, mpermH,
mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = frequency
# Update etaH/etaV and zetaH/zetaV according to user-provided model
if isinstance(res, dict) and 'func_eta' in res:
etaH, etaV = res['func_eta'](res, locals())
if isinstance(res, dict) and 'func_zeta' in res:
zetaH, zetaV = res['func_zeta'](res, locals())
# Check Hankel transform parameters
ht, htarg = check_hankel(ht, htarg, verb)
# Check optimization
use_ne_eval, loop_freq, loop_off = check_opt(opt, loop, ht, htarg, verb)
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get offsets and angles (off, angle)
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
# Get layer number in which src and rec reside (lsrc/lrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
# === 3. EM-FIELD CALCULATION ============
# Collect variables for fem
inp = (ab_calc, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH,
etaV, zetaH, zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval,
msrc, mrec, loop_freq, loop_off)
EM, kcount, conv = fem(*inp)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, htarg, 'Hankel', verb)
# Do f->t transform if required
if signal is not None:
EM, conv = tem(EM, off, freq, time, signal, ft, ftarg)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, ftarg, 'Fourier', verb)
# Reshape for number of sources
EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F'))
# === 4. FINISHED ============
printstartfinish(verb, t0, kcount)
return EM | r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j] | Below is the the instruction that describes the task:
### Input:
r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
### Response:
def dipole(src, rec, depth, res, freqtime, signal=None, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, xdirect=False,
ht='fht', htarg=None, ft='sin', ftarg=None, opt=None, loop=None,
verb=2):
r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Backwards compatibility
htarg, opt = spline_backwards_hankel(ht, htarg, opt)
# Check times and Fourier Transform arguments, get required frequencies
# (freq = freqtime if ``signal=None``)
if signal is not None:
time, freq, ft, ftarg = check_time(freqtime, signal, ft, ftarg, verb)
else:
freq = freqtime
# Check layer parameters
model = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
xdirect, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = model
# Check frequency => get etaH, etaV, zetaH, and zetaV
frequency = check_frequency(freq, res, aniso, epermH, epermV, mpermH,
mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = frequency
# Update etaH/etaV and zetaH/zetaV according to user-provided model
if isinstance(res, dict) and 'func_eta' in res:
etaH, etaV = res['func_eta'](res, locals())
if isinstance(res, dict) and 'func_zeta' in res:
zetaH, zetaV = res['func_zeta'](res, locals())
# Check Hankel transform parameters
ht, htarg = check_hankel(ht, htarg, verb)
# Check optimization
use_ne_eval, loop_freq, loop_off = check_opt(opt, loop, ht, htarg, verb)
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get offsets and angles (off, angle)
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
# Get layer number in which src and rec reside (lsrc/lrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
# === 3. EM-FIELD CALCULATION ============
# Collect variables for fem
inp = (ab_calc, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH,
etaV, zetaH, zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval,
msrc, mrec, loop_freq, loop_off)
EM, kcount, conv = fem(*inp)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, htarg, 'Hankel', verb)
# Do f->t transform if required
if signal is not None:
EM, conv = tem(EM, off, freq, time, signal, ft, ftarg)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, ftarg, 'Fourier', verb)
# Reshape for number of sources
EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F'))
# === 4. FINISHED ============
printstartfinish(verb, t0, kcount)
return EM |
def shutdown(name, message=None, timeout=5, force_close=True, reboot=False,
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown the computer
:param str message:
An optional message to display to users. It will also be used as a
comment in the event log entry.
The default value is None.
:param int timeout:
The number of minutes or seconds before a shutdown will occur. Whether
this number represents minutes or seconds depends on the value of
``in_seconds``.
The default value is 5.
:param bool in_seconds:
If this is True, the value of ``timeout`` will be treated as a number
of seconds. If this is False, the value of ``timeout`` will be treated
as a number of minutes.
The default value is False.
:param bool force_close:
If this is True, running applications will be forced to close without
warning. If this is False, running applications will not get the
opportunity to prompt users about unsaved data.
The default value is True.
:param bool reboot:
If this is True, the computer will restart immediately after shutting
down. If False the system flushes all caches to disk and safely powers
down the system.
The default value is False.
:param bool only_on_pending_reboot:
If this is True, the shutdown will only occur if the system reports a
pending reboot. If this is False, the shutdown will always occur.
The default value is False.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if reboot:
action = 'reboot'
else:
action = 'shutdown'
if only_on_pending_reboot and not __salt__['system.get_pending_reboot']():
if __opts__['test']:
ret['comment'] = ('System {0} will be skipped because '
'no reboot is pending').format(action)
else:
ret['comment'] = ('System {0} has been skipped because '
'no reboot was pending').format(action)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Will attempt to schedule a {0}'.format(action)
return ret
ret['result'] = __salt__['system.shutdown'](message=message,
timeout=timeout,
force_close=force_close,
reboot=reboot,
in_seconds=in_seconds,
only_on_pending_reboot=False)
if ret['result']:
ret['changes'] = {'old': 'No reboot or shutdown was scheduled',
'new': 'A {0} has been scheduled'.format(action)}
ret['comment'] = 'Request to {0} was successful'.format(action)
else:
ret['comment'] = 'Request to {0} failed'.format(action)
return ret | Shutdown the computer
:param str message:
An optional message to display to users. It will also be used as a
comment in the event log entry.
The default value is None.
:param int timeout:
The number of minutes or seconds before a shutdown will occur. Whether
this number represents minutes or seconds depends on the value of
``in_seconds``.
The default value is 5.
:param bool in_seconds:
If this is True, the value of ``timeout`` will be treated as a number
of seconds. If this is False, the value of ``timeout`` will be treated
as a number of minutes.
The default value is False.
:param bool force_close:
If this is True, running applications will be forced to close without
warning. If this is False, running applications will not get the
opportunity to prompt users about unsaved data.
The default value is True.
:param bool reboot:
If this is True, the computer will restart immediately after shutting
down. If False the system flushes all caches to disk and safely powers
down the system.
The default value is False.
:param bool only_on_pending_reboot:
If this is True, the shutdown will only occur if the system reports a
pending reboot. If this is False, the shutdown will always occur.
The default value is False. | Below is the the instruction that describes the task:
### Input:
Shutdown the computer
:param str message:
An optional message to display to users. It will also be used as a
comment in the event log entry.
The default value is None.
:param int timeout:
The number of minutes or seconds before a shutdown will occur. Whether
this number represents minutes or seconds depends on the value of
``in_seconds``.
The default value is 5.
:param bool in_seconds:
If this is True, the value of ``timeout`` will be treated as a number
of seconds. If this is False, the value of ``timeout`` will be treated
as a number of minutes.
The default value is False.
:param bool force_close:
If this is True, running applications will be forced to close without
warning. If this is False, running applications will not get the
opportunity to prompt users about unsaved data.
The default value is True.
:param bool reboot:
If this is True, the computer will restart immediately after shutting
down. If False the system flushes all caches to disk and safely powers
down the system.
The default value is False.
:param bool only_on_pending_reboot:
If this is True, the shutdown will only occur if the system reports a
pending reboot. If this is False, the shutdown will always occur.
The default value is False.
### Response:
def shutdown(name, message=None, timeout=5, force_close=True, reboot=False,
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown the computer
:param str message:
An optional message to display to users. It will also be used as a
comment in the event log entry.
The default value is None.
:param int timeout:
The number of minutes or seconds before a shutdown will occur. Whether
this number represents minutes or seconds depends on the value of
``in_seconds``.
The default value is 5.
:param bool in_seconds:
If this is True, the value of ``timeout`` will be treated as a number
of seconds. If this is False, the value of ``timeout`` will be treated
as a number of minutes.
The default value is False.
:param bool force_close:
If this is True, running applications will be forced to close without
warning. If this is False, running applications will not get the
opportunity to prompt users about unsaved data.
The default value is True.
:param bool reboot:
If this is True, the computer will restart immediately after shutting
down. If False the system flushes all caches to disk and safely powers
down the system.
The default value is False.
:param bool only_on_pending_reboot:
If this is True, the shutdown will only occur if the system reports a
pending reboot. If this is False, the shutdown will always occur.
The default value is False.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if reboot:
action = 'reboot'
else:
action = 'shutdown'
if only_on_pending_reboot and not __salt__['system.get_pending_reboot']():
if __opts__['test']:
ret['comment'] = ('System {0} will be skipped because '
'no reboot is pending').format(action)
else:
ret['comment'] = ('System {0} has been skipped because '
'no reboot was pending').format(action)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Will attempt to schedule a {0}'.format(action)
return ret
ret['result'] = __salt__['system.shutdown'](message=message,
timeout=timeout,
force_close=force_close,
reboot=reboot,
in_seconds=in_seconds,
only_on_pending_reboot=False)
if ret['result']:
ret['changes'] = {'old': 'No reboot or shutdown was scheduled',
'new': 'A {0} has been scheduled'.format(action)}
ret['comment'] = 'Request to {0} was successful'.format(action)
else:
ret['comment'] = 'Request to {0} failed'.format(action)
return ret |
def _parse_s3_file(original_file):
"""
Convert `s3://bucketname/path/to/file.txt` to ('bucketname', 'path/to/file.txt')
"""
bits = original_file.replace('s3://', '').split("/")
bucket = bits[0]
object_key = "/".join(bits[1:])
return bucket, object_key | Convert `s3://bucketname/path/to/file.txt` to ('bucketname', 'path/to/file.txt') | Below is the the instruction that describes the task:
### Input:
Convert `s3://bucketname/path/to/file.txt` to ('bucketname', 'path/to/file.txt')
### Response:
def _parse_s3_file(original_file):
"""
Convert `s3://bucketname/path/to/file.txt` to ('bucketname', 'path/to/file.txt')
"""
bits = original_file.replace('s3://', '').split("/")
bucket = bits[0]
object_key = "/".join(bits[1:])
return bucket, object_key |
def _full_code_scan(self):
"""
Perform a full code scan on the target binary.
"""
# We gotta time this function
start_time = datetime.now()
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH}
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
widgets = [progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.Timer(),
' ',
progressbar.ETA()
]
pb = progressbar.ProgressBar(widgets=widgets, maxval=10000 * 100).start()
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
if percentage > 100.0: percentage = 100.0
pb.update(percentage * 10000)
if next_addr is not None:
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
else:
l.info('No more addr to analyze. Progress %0.04f%%', percentage)
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
pb.finish()
end_time = datetime.now()
l.info("A full code scan takes %d seconds.", (end_time - start_time).seconds) | Perform a full code scan on the target binary. | Below is the the instruction that describes the task:
### Input:
Perform a full code scan on the target binary.
### Response:
def _full_code_scan(self):
"""
Perform a full code scan on the target binary.
"""
# We gotta time this function
start_time = datetime.now()
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH}
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
widgets = [progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.Timer(),
' ',
progressbar.ETA()
]
pb = progressbar.ProgressBar(widgets=widgets, maxval=10000 * 100).start()
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
if percentage > 100.0: percentage = 100.0
pb.update(percentage * 10000)
if next_addr is not None:
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
else:
l.info('No more addr to analyze. Progress %0.04f%%', percentage)
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
pb.finish()
end_time = datetime.now()
l.info("A full code scan takes %d seconds.", (end_time - start_time).seconds) |
def createCategoryFilter(self, retina_name, filter_name, body, ):
"""get filter for classifier
Args:
filter_name, str: A unique name for the filter. (required)
body, FilterTrainingObject: The list of positive and negative (optional) example items. (required)
retina_name, str: The retina name (required)
Returns: CategoryFilter
"""
resourcePath = '/classify/create_category_filter'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['filter_name'] = filter_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return category_filter.CategoryFilter(**response.json()) | get filter for classifier
Args:
filter_name, str: A unique name for the filter. (required)
body, FilterTrainingObject: The list of positive and negative (optional) example items. (required)
retina_name, str: The retina name (required)
Returns: CategoryFilter | Below is the the instruction that describes the task:
### Input:
get filter for classifier
Args:
filter_name, str: A unique name for the filter. (required)
body, FilterTrainingObject: The list of positive and negative (optional) example items. (required)
retina_name, str: The retina name (required)
Returns: CategoryFilter
### Response:
def createCategoryFilter(self, retina_name, filter_name, body, ):
"""get filter for classifier
Args:
filter_name, str: A unique name for the filter. (required)
body, FilterTrainingObject: The list of positive and negative (optional) example items. (required)
retina_name, str: The retina name (required)
Returns: CategoryFilter
"""
resourcePath = '/classify/create_category_filter'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['filter_name'] = filter_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return category_filter.CategoryFilter(**response.json()) |
async def send_job_result(self, job_id: BackendJobId, result: str, text: str = "", grade: float = None, problems: Dict[str, SPResult] = None,
tests: Dict[str, Any] = None, custom: Dict[str, Any] = None, state: str = "", archive: Optional[bytes] = None,
stdout: Optional[str] = None, stderr: Optional[str] = None):
"""
Send the result of a job back to the backend. Must be called *once and only once* for each job
:exception JobNotRunningException: is raised when send_job_result is called more than once for a given job_id
"""
if job_id not in self.__running_job:
raise JobNotRunningException()
del self.__running_job[job_id]
if grade is None:
if result == "success":
grade = 100.0
else:
grade = 0.0
if problems is None:
problems = {}
if custom is None:
custom = {}
if tests is None:
tests = {}
await ZMQUtils.send(self.__backend_socket, AgentJobDone(job_id, (result, text), round(grade, 2), problems, tests, custom, state, archive, stdout, stderr)) | Send the result of a job back to the backend. Must be called *once and only once* for each job
:exception JobNotRunningException: is raised when send_job_result is called more than once for a given job_id | Below is the the instruction that describes the task:
### Input:
Send the result of a job back to the backend. Must be called *once and only once* for each job
:exception JobNotRunningException: is raised when send_job_result is called more than once for a given job_id
### Response:
async def send_job_result(self, job_id: BackendJobId, result: str, text: str = "", grade: float = None, problems: Dict[str, SPResult] = None,
tests: Dict[str, Any] = None, custom: Dict[str, Any] = None, state: str = "", archive: Optional[bytes] = None,
stdout: Optional[str] = None, stderr: Optional[str] = None):
"""
Send the result of a job back to the backend. Must be called *once and only once* for each job
:exception JobNotRunningException: is raised when send_job_result is called more than once for a given job_id
"""
if job_id not in self.__running_job:
raise JobNotRunningException()
del self.__running_job[job_id]
if grade is None:
if result == "success":
grade = 100.0
else:
grade = 0.0
if problems is None:
problems = {}
if custom is None:
custom = {}
if tests is None:
tests = {}
await ZMQUtils.send(self.__backend_socket, AgentJobDone(job_id, (result, text), round(grade, 2), problems, tests, custom, state, archive, stdout, stderr)) |
def from_labels(labels):
"""
Creates list of cell features based on label image (1-oo pixel values)
@return: list of cell features in the same order as labels
"""
labels = labels.astype(int)
areas = scipy.ndimage.measurements.sum(labels != 0, labels, list(range(1, numpy.max(labels) + 1)))
existing_labels = [i for (i, a) in enumerate(areas, 1) if a > 0]
existing_areas = [a for a in areas if a > 0]
existing_centers = scipy.ndimage.measurements.center_of_mass(labels != 0, labels, existing_labels)
zipped = zip(existing_labels, existing_centers, existing_areas)
features = [CellFeatures(c, a, i, labels.shape)
for i, c, a in zipped if a != 0]
return features | Creates list of cell features based on label image (1-oo pixel values)
@return: list of cell features in the same order as labels | Below is the the instruction that describes the task:
### Input:
Creates list of cell features based on label image (1-oo pixel values)
@return: list of cell features in the same order as labels
### Response:
def from_labels(labels):
"""
Creates list of cell features based on label image (1-oo pixel values)
@return: list of cell features in the same order as labels
"""
labels = labels.astype(int)
areas = scipy.ndimage.measurements.sum(labels != 0, labels, list(range(1, numpy.max(labels) + 1)))
existing_labels = [i for (i, a) in enumerate(areas, 1) if a > 0]
existing_areas = [a for a in areas if a > 0]
existing_centers = scipy.ndimage.measurements.center_of_mass(labels != 0, labels, existing_labels)
zipped = zip(existing_labels, existing_centers, existing_areas)
features = [CellFeatures(c, a, i, labels.shape)
for i, c, a in zipped if a != 0]
return features |
def get_call_function_name(frame):
"""If f_back is looking at a call function, return
the name for it. Otherwise return None"""
f_back = frame.f_back
if not f_back: return None
if 'CALL_FUNCTION' != Mbytecode.op_at_frame(f_back): return None
co = f_back.f_code
code = co.co_code
# labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
offset = f_back.f_lasti
while offset >= 0:
if offset in linestarts:
op = code[offset]
offset += 1
arg = code[offset]
# FIXME: put this code in xdis
extended_arg = 0
while True:
if PYTHON_VERSION >= 3.6:
if op == opc.EXTENDED_ARG:
extended_arg += (arg << 8)
continue
arg = code[offset] + extended_arg
# FIXME: Python 3.6.0a1 is 2, for 3.6.a3 we have 1
else:
if op == opc.EXTENDED_ARG:
extended_arg += (arg << 256)
continue
arg = code[offset] + code[offset+1]*256 + extended_arg
break
return co.co_names[arg]
offset -= 1
pass
return None | If f_back is looking at a call function, return
the name for it. Otherwise return None | Below is the the instruction that describes the task:
### Input:
If f_back is looking at a call function, return
the name for it. Otherwise return None
### Response:
def get_call_function_name(frame):
"""If f_back is looking at a call function, return
the name for it. Otherwise return None"""
f_back = frame.f_back
if not f_back: return None
if 'CALL_FUNCTION' != Mbytecode.op_at_frame(f_back): return None
co = f_back.f_code
code = co.co_code
# labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
offset = f_back.f_lasti
while offset >= 0:
if offset in linestarts:
op = code[offset]
offset += 1
arg = code[offset]
# FIXME: put this code in xdis
extended_arg = 0
while True:
if PYTHON_VERSION >= 3.6:
if op == opc.EXTENDED_ARG:
extended_arg += (arg << 8)
continue
arg = code[offset] + extended_arg
# FIXME: Python 3.6.0a1 is 2, for 3.6.a3 we have 1
else:
if op == opc.EXTENDED_ARG:
extended_arg += (arg << 256)
continue
arg = code[offset] + code[offset+1]*256 + extended_arg
break
return co.co_names[arg]
offset -= 1
pass
return None |
def _vertex_list_to_dataframe(ls, id_column_name):
"""
Convert a list of vertices into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
df = pd.DataFrame({id_column_name: [v.vid for v in ls]})
for c in cols:
df[c] = [v.attr.get(c) for v in ls]
return df | Convert a list of vertices into dataframe. | Below is the the instruction that describes the task:
### Input:
Convert a list of vertices into dataframe.
### Response:
def _vertex_list_to_dataframe(ls, id_column_name):
"""
Convert a list of vertices into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
df = pd.DataFrame({id_column_name: [v.vid for v in ls]})
for c in cols:
df[c] = [v.attr.get(c) for v in ls]
return df |
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names]) | Parse a single file. | Below is the the instruction that describes the task:
### Input:
Parse a single file.
### Response:
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names]) |
def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau | Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008) | Below is the the instruction that describes the task:
### Input:
Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
### Response:
def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau |
def load_gene(ensembl, gene_id, de_novos=[]):
""" sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences
"""
transcripts = minimise_transcripts(ensembl, gene_id, de_novos)
genes = []
for transcript_id in transcripts:
gene = construct_gene_object(ensembl, transcript_id)
genes.append(gene)
if len(genes) == 0:
raise IndexError("{0}: no suitable transcripts".format(gene_id))
return genes | sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences | Below is the the instruction that describes the task:
### Input:
sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences
### Response:
def load_gene(ensembl, gene_id, de_novos=[]):
""" sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences
"""
transcripts = minimise_transcripts(ensembl, gene_id, de_novos)
genes = []
for transcript_id in transcripts:
gene = construct_gene_object(ensembl, transcript_id)
genes.append(gene)
if len(genes) == 0:
raise IndexError("{0}: no suitable transcripts".format(gene_id))
return genes |
def t_IDENTIFER(self, t):
r'\#?[a-zA-Z_][a-zA-Z_0-9]*'
t.type = SpecParser.reserved.get(t.value, 'IDENTIFIER')
return t | r'\#?[a-zA-Z_][a-zA-Z_0-9]* | Below is the the instruction that describes the task:
### Input:
r'\#?[a-zA-Z_][a-zA-Z_0-9]*
### Response:
def t_IDENTIFER(self, t):
r'\#?[a-zA-Z_][a-zA-Z_0-9]*'
t.type = SpecParser.reserved.get(t.value, 'IDENTIFIER')
return t |
async def spawn_slaves(self, slave_addrs, slave_env_cls, slave_mgr_cls,
slave_kwargs=None):
"""Spawn slave environments.
:param slave_addrs:
List of (HOST, PORT) addresses for the slave-environments.
:param slave_env_cls: Class for the slave environments.
:param slave_kwargs:
If not None, must be a list of the same size as *addrs*. Each item
in the list containing parameter values for one slave environment.
:param slave_mgr_cls:
Class of the slave environment managers.
"""
pool, r = spawn_containers(slave_addrs, env_cls=slave_env_cls,
env_params=slave_kwargs,
mgr_cls=slave_mgr_cls)
self._pool = pool
self._r = r
self._manager_addrs = ["{}{}".format(_get_base_url(a), 0) for
a in slave_addrs] | Spawn slave environments.
:param slave_addrs:
List of (HOST, PORT) addresses for the slave-environments.
:param slave_env_cls: Class for the slave environments.
:param slave_kwargs:
If not None, must be a list of the same size as *addrs*. Each item
in the list containing parameter values for one slave environment.
:param slave_mgr_cls:
Class of the slave environment managers. | Below is the the instruction that describes the task:
### Input:
Spawn slave environments.
:param slave_addrs:
List of (HOST, PORT) addresses for the slave-environments.
:param slave_env_cls: Class for the slave environments.
:param slave_kwargs:
If not None, must be a list of the same size as *addrs*. Each item
in the list containing parameter values for one slave environment.
:param slave_mgr_cls:
Class of the slave environment managers.
### Response:
async def spawn_slaves(self, slave_addrs, slave_env_cls, slave_mgr_cls,
slave_kwargs=None):
"""Spawn slave environments.
:param slave_addrs:
List of (HOST, PORT) addresses for the slave-environments.
:param slave_env_cls: Class for the slave environments.
:param slave_kwargs:
If not None, must be a list of the same size as *addrs*. Each item
in the list containing parameter values for one slave environment.
:param slave_mgr_cls:
Class of the slave environment managers.
"""
pool, r = spawn_containers(slave_addrs, env_cls=slave_env_cls,
env_params=slave_kwargs,
mgr_cls=slave_mgr_cls)
self._pool = pool
self._r = r
self._manager_addrs = ["{}{}".format(_get_base_url(a), 0) for
a in slave_addrs] |
def get_task_instance(dag_id, task_id, execution_date):
"""Return the task object identified by the given dag_id and task_id."""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
# Get DAG object and check Task Exists
dag = dagbag.get_dag(dag_id)
if not dag.has_task(task_id):
error_message = 'Task {} not found in dag {}'.format(task_id, dag_id)
raise TaskNotFound(error_message)
# Get DagRun object and check that it exists
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = ('Dag Run for date {} not found in dag {}'
.format(execution_date, dag_id))
raise DagRunNotFound(error_message)
# Get task instance object and check that it exists
task_instance = dagrun.get_task_instance(task_id)
if not task_instance:
error_message = ('Task {} instance for date {} not found'
.format(task_id, execution_date))
raise TaskInstanceNotFound(error_message)
return task_instance | Return the task object identified by the given dag_id and task_id. | Below is the the instruction that describes the task:
### Input:
Return the task object identified by the given dag_id and task_id.
### Response:
def get_task_instance(dag_id, task_id, execution_date):
"""Return the task object identified by the given dag_id and task_id."""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
# Get DAG object and check Task Exists
dag = dagbag.get_dag(dag_id)
if not dag.has_task(task_id):
error_message = 'Task {} not found in dag {}'.format(task_id, dag_id)
raise TaskNotFound(error_message)
# Get DagRun object and check that it exists
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = ('Dag Run for date {} not found in dag {}'
.format(execution_date, dag_id))
raise DagRunNotFound(error_message)
# Get task instance object and check that it exists
task_instance = dagrun.get_task_instance(task_id)
if not task_instance:
error_message = ('Task {} instance for date {} not found'
.format(task_id, execution_date))
raise TaskInstanceNotFound(error_message)
return task_instance |
def exists(Name, region=None, key=None, keyid=None, profile=None):
'''
Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
rule does not exist.
CLI example::
salt myminion boto_cloudwatch_event.exists myevent region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
events = conn.list_rules(NamePrefix=Name)
if not events:
return {'exists': False}
for rule in events.get('Rules', []):
if rule.get('Name', None) == Name:
return {'exists': True}
return {'exists': False}
except ClientError as e:
err = __utils__['boto3.get_error'](e)
return {'error': err} | Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
rule does not exist.
CLI example::
salt myminion boto_cloudwatch_event.exists myevent region=us-east-1 | Below is the the instruction that describes the task:
### Input:
Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
rule does not exist.
CLI example::
salt myminion boto_cloudwatch_event.exists myevent region=us-east-1
### Response:
def exists(Name, region=None, key=None, keyid=None, profile=None):
'''
Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
rule does not exist.
CLI example::
salt myminion boto_cloudwatch_event.exists myevent region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
events = conn.list_rules(NamePrefix=Name)
if not events:
return {'exists': False}
for rule in events.get('Rules', []):
if rule.get('Name', None) == Name:
return {'exists': True}
return {'exists': False}
except ClientError as e:
err = __utils__['boto3.get_error'](e)
return {'error': err} |
def create(self, stage_url, tileset, name=None, patch=False, bypass=False):
"""Create a tileset
Note: this step is refered to as "upload" in the API docs;
This class's upload() method is a high-level function
which acts like the Studio upload form.
Returns a response object where the json() contents are
an upload dict. Completion of the tileset may take several
seconds or minutes depending on size of the data. The status()
method of this class may be used to poll the API endpoint for
tileset creation status.
Parameters
----------
stage_url: str
URL to resource on S3, typically provided in the response
of this class's stage() method.
tileset: str
The id of the tileset set to be created. Username will be
prefixed if not present. For example, 'my-tileset' becomes
'{username}.my-tileset'.
name: str
A short name for the tileset that will appear in Mapbox
studio.
patch: bool
Optional patch mode which requires a flag on the owner's
account.
bypass: bool
Optional bypass validation mode for MBTiles which requires
a flag on the owner's account.
Returns
-------
requests.Response
"""
tileset = self._validate_tileset(tileset)
username, _name = tileset.split(".")
msg = {'tileset': tileset,
'url': stage_url}
if patch:
msg['patch'] = patch
if bypass:
msg['bypass_mbtiles_validation'] = bypass
msg['name'] = name if name else _name
uri = URITemplate(self.baseuri + '/{username}').expand(
username=username)
resp = self.session.post(uri, json=msg)
self.handle_http_error(resp)
return resp | Create a tileset
Note: this step is refered to as "upload" in the API docs;
This class's upload() method is a high-level function
which acts like the Studio upload form.
Returns a response object where the json() contents are
an upload dict. Completion of the tileset may take several
seconds or minutes depending on size of the data. The status()
method of this class may be used to poll the API endpoint for
tileset creation status.
Parameters
----------
stage_url: str
URL to resource on S3, typically provided in the response
of this class's stage() method.
tileset: str
The id of the tileset set to be created. Username will be
prefixed if not present. For example, 'my-tileset' becomes
'{username}.my-tileset'.
name: str
A short name for the tileset that will appear in Mapbox
studio.
patch: bool
Optional patch mode which requires a flag on the owner's
account.
bypass: bool
Optional bypass validation mode for MBTiles which requires
a flag on the owner's account.
Returns
-------
requests.Response | Below is the the instruction that describes the task:
### Input:
Create a tileset
Note: this step is refered to as "upload" in the API docs;
This class's upload() method is a high-level function
which acts like the Studio upload form.
Returns a response object where the json() contents are
an upload dict. Completion of the tileset may take several
seconds or minutes depending on size of the data. The status()
method of this class may be used to poll the API endpoint for
tileset creation status.
Parameters
----------
stage_url: str
URL to resource on S3, typically provided in the response
of this class's stage() method.
tileset: str
The id of the tileset set to be created. Username will be
prefixed if not present. For example, 'my-tileset' becomes
'{username}.my-tileset'.
name: str
A short name for the tileset that will appear in Mapbox
studio.
patch: bool
Optional patch mode which requires a flag on the owner's
account.
bypass: bool
Optional bypass validation mode for MBTiles which requires
a flag on the owner's account.
Returns
-------
requests.Response
### Response:
def create(self, stage_url, tileset, name=None, patch=False, bypass=False):
"""Create a tileset
Note: this step is refered to as "upload" in the API docs;
This class's upload() method is a high-level function
which acts like the Studio upload form.
Returns a response object where the json() contents are
an upload dict. Completion of the tileset may take several
seconds or minutes depending on size of the data. The status()
method of this class may be used to poll the API endpoint for
tileset creation status.
Parameters
----------
stage_url: str
URL to resource on S3, typically provided in the response
of this class's stage() method.
tileset: str
The id of the tileset set to be created. Username will be
prefixed if not present. For example, 'my-tileset' becomes
'{username}.my-tileset'.
name: str
A short name for the tileset that will appear in Mapbox
studio.
patch: bool
Optional patch mode which requires a flag on the owner's
account.
bypass: bool
Optional bypass validation mode for MBTiles which requires
a flag on the owner's account.
Returns
-------
requests.Response
"""
tileset = self._validate_tileset(tileset)
username, _name = tileset.split(".")
msg = {'tileset': tileset,
'url': stage_url}
if patch:
msg['patch'] = patch
if bypass:
msg['bypass_mbtiles_validation'] = bypass
msg['name'] = name if name else _name
uri = URITemplate(self.baseuri + '/{username}').expand(
username=username)
resp = self.session.post(uri, json=msg)
self.handle_http_error(resp)
return resp |
async def open(self) -> 'HolderProver':
"""
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
"""
LOGGER.debug('Verifier.open >>>')
await super().open()
if self.cfg.get('parse-cache-on-open', False):
Caches.parse(self.dir_cache)
LOGGER.debug('Verifier.open <<<')
return self | Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object | Below is the the instruction that describes the task:
### Input:
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
### Response:
async def open(self) -> 'HolderProver':
"""
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
"""
LOGGER.debug('Verifier.open >>>')
await super().open()
if self.cfg.get('parse-cache-on-open', False):
Caches.parse(self.dir_cache)
LOGGER.debug('Verifier.open <<<')
return self |
def rpc_put_zonefiles( self, zonefile_datas, **con_info ):
"""
Replicate one or more zonefiles, given as serialized strings.
Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files)
Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure)
Returns {'error': ...} on error
Takes at most 5 zonefiles
"""
conf = get_blockstack_opts()
if not is_atlas_enabled(conf):
return {'error': 'No data', 'http_status': 400}
if 'zonefiles' not in conf:
return {'error': 'No zonefiles directory (likely a configuration error)', 'http_status': 400}
if type(zonefile_datas) != list:
return {'error': 'Invalid data', 'http_status': 400}
if len(zonefile_datas) > 5:
return {'error': 'Too many zonefiles', 'http_status': 400}
for zfd in zonefile_datas:
if not check_string(zfd, max_length=((4 * RPC_MAX_ZONEFILE_LEN) / 3) + 3, pattern=OP_BASE64_EMPTY_PATTERN):
return {'error': 'Invalid zone file payload (exceeds {} bytes and/or not base64-encoded)'.format(RPC_MAX_ZONEFILE_LEN)}
zonefile_dir = conf.get("zonefiles", None)
saved = []
for zonefile_data in zonefile_datas:
# decode
try:
zonefile_data = base64.b64decode( zonefile_data )
except:
log.debug("Invalid base64 zonefile")
saved.append(0)
continue
if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN:
log.debug("Zonefile too long")
saved.append(0)
continue
# is this zone file already discovered?
zonefile_hash = get_zonefile_data_hash(str(zonefile_data))
zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf['atlasdb_path'])
if not zfinfos:
# nope
log.debug("Unknown zonefile hash {}".format(zonefile_hash))
saved.append(0)
continue
# keep this zone file
rc = store_atlas_zonefile_data( str(zonefile_data), zonefile_dir )
if not rc:
log.error("Failed to store zonefile {}".format(zonefile_hash))
saved.append(0)
continue
# mark this zone file as present, so we don't ask anyone else for it
was_present = atlasdb_set_zonefile_present(zonefile_hash, True, path=conf['atlasdb_path'])
if was_present:
# we already got this zone file
# only process it if it's outside our recovery range
recovery_start, recovery_end = get_recovery_range(self.working_dir)
current_block = virtualchain_hooks.get_last_block(self.working_dir)
if recovery_start is not None and recovery_end is not None and recovery_end < current_block:
# no need to process
log.debug("Already have zonefile {}".format(zonefile_hash))
saved.append(1)
continue
if self.subdomain_index:
# got new zonefile
# let the subdomain indexer know, along with giving it the minimum block height
min_block_height = min([zfi['block_height'] for zfi in zfinfos])
log.debug("Enqueue {} from {} for subdomain processing".format(zonefile_hash, min_block_height))
self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height)
log.debug("Stored new zonefile {}".format(zonefile_hash))
saved.append(1)
log.debug("Saved {} zonefile(s)".format(sum(saved)))
log.debug("Reply: {}".format({'saved': saved}))
return self.success_response( {'saved': saved} ) | Replicate one or more zonefiles, given as serialized strings.
Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files)
Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure)
Returns {'error': ...} on error
Takes at most 5 zonefiles | Below is the the instruction that describes the task:
### Input:
Replicate one or more zonefiles, given as serialized strings.
Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files)
Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure)
Returns {'error': ...} on error
Takes at most 5 zonefiles
### Response:
def rpc_put_zonefiles( self, zonefile_datas, **con_info ):
"""
Replicate one or more zonefiles, given as serialized strings.
Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files)
Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure)
Returns {'error': ...} on error
Takes at most 5 zonefiles
"""
conf = get_blockstack_opts()
if not is_atlas_enabled(conf):
return {'error': 'No data', 'http_status': 400}
if 'zonefiles' not in conf:
return {'error': 'No zonefiles directory (likely a configuration error)', 'http_status': 400}
if type(zonefile_datas) != list:
return {'error': 'Invalid data', 'http_status': 400}
if len(zonefile_datas) > 5:
return {'error': 'Too many zonefiles', 'http_status': 400}
for zfd in zonefile_datas:
if not check_string(zfd, max_length=((4 * RPC_MAX_ZONEFILE_LEN) / 3) + 3, pattern=OP_BASE64_EMPTY_PATTERN):
return {'error': 'Invalid zone file payload (exceeds {} bytes and/or not base64-encoded)'.format(RPC_MAX_ZONEFILE_LEN)}
zonefile_dir = conf.get("zonefiles", None)
saved = []
for zonefile_data in zonefile_datas:
# decode
try:
zonefile_data = base64.b64decode( zonefile_data )
except:
log.debug("Invalid base64 zonefile")
saved.append(0)
continue
if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN:
log.debug("Zonefile too long")
saved.append(0)
continue
# is this zone file already discovered?
zonefile_hash = get_zonefile_data_hash(str(zonefile_data))
zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf['atlasdb_path'])
if not zfinfos:
# nope
log.debug("Unknown zonefile hash {}".format(zonefile_hash))
saved.append(0)
continue
# keep this zone file
rc = store_atlas_zonefile_data( str(zonefile_data), zonefile_dir )
if not rc:
log.error("Failed to store zonefile {}".format(zonefile_hash))
saved.append(0)
continue
# mark this zone file as present, so we don't ask anyone else for it
was_present = atlasdb_set_zonefile_present(zonefile_hash, True, path=conf['atlasdb_path'])
if was_present:
# we already got this zone file
# only process it if it's outside our recovery range
recovery_start, recovery_end = get_recovery_range(self.working_dir)
current_block = virtualchain_hooks.get_last_block(self.working_dir)
if recovery_start is not None and recovery_end is not None and recovery_end < current_block:
# no need to process
log.debug("Already have zonefile {}".format(zonefile_hash))
saved.append(1)
continue
if self.subdomain_index:
# got new zonefile
# let the subdomain indexer know, along with giving it the minimum block height
min_block_height = min([zfi['block_height'] for zfi in zfinfos])
log.debug("Enqueue {} from {} for subdomain processing".format(zonefile_hash, min_block_height))
self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height)
log.debug("Stored new zonefile {}".format(zonefile_hash))
saved.append(1)
log.debug("Saved {} zonefile(s)".format(sum(saved)))
log.debug("Reply: {}".format({'saved': saved}))
return self.success_response( {'saved': saved} ) |
def parse(f, encoding='utf-8'):
"""
Parse the TDL file *f* and yield the interpreted contents.
If *f* is a filename, the file is opened and closed when the
generator has finished, otherwise *f* is an open file object and
will not be closed when the generator has finished.
Args:
f (str, file): a filename or open file object
encoding (str): the encoding of the file (default: `"utf-8"`;
ignored if *f* is an open file)
"""
if hasattr(f, 'read'):
for event in _parse(f):
yield event
else:
with io.open(f, encoding=encoding) as fh:
for event in _parse(fh):
yield event | Parse the TDL file *f* and yield the interpreted contents.
If *f* is a filename, the file is opened and closed when the
generator has finished, otherwise *f* is an open file object and
will not be closed when the generator has finished.
Args:
f (str, file): a filename or open file object
encoding (str): the encoding of the file (default: `"utf-8"`;
ignored if *f* is an open file) | Below is the the instruction that describes the task:
### Input:
Parse the TDL file *f* and yield the interpreted contents.
If *f* is a filename, the file is opened and closed when the
generator has finished, otherwise *f* is an open file object and
will not be closed when the generator has finished.
Args:
f (str, file): a filename or open file object
encoding (str): the encoding of the file (default: `"utf-8"`;
ignored if *f* is an open file)
### Response:
def parse(f, encoding='utf-8'):
"""
Parse the TDL file *f* and yield the interpreted contents.
If *f* is a filename, the file is opened and closed when the
generator has finished, otherwise *f* is an open file object and
will not be closed when the generator has finished.
Args:
f (str, file): a filename or open file object
encoding (str): the encoding of the file (default: `"utf-8"`;
ignored if *f* is an open file)
"""
if hasattr(f, 'read'):
for event in _parse(f):
yield event
else:
with io.open(f, encoding=encoding) as fh:
for event in _parse(fh):
yield event |
def get_objective_bank_lookup_session(self, proxy):
"""Gets the OsidSession associated with the objective bank lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveBankLookupSession) - an
``ObjectiveBankLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_bank_lookup() is
false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_lookup()`` is true.*
"""
if not self.supports_objective_bank_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveBankLookupSession(proxy=proxy, runtime=self._runtime) | Gets the OsidSession associated with the objective bank lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveBankLookupSession) - an
``ObjectiveBankLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_bank_lookup() is
false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_lookup()`` is true.* | Below is the the instruction that describes the task:
### Input:
Gets the OsidSession associated with the objective bank lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveBankLookupSession) - an
``ObjectiveBankLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_bank_lookup() is
false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_lookup()`` is true.*
### Response:
def get_objective_bank_lookup_session(self, proxy):
"""Gets the OsidSession associated with the objective bank lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveBankLookupSession) - an
``ObjectiveBankLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_bank_lookup() is
false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_lookup()`` is true.*
"""
if not self.supports_objective_bank_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveBankLookupSession(proxy=proxy, runtime=self._runtime) |
def backtrace_on_usr1 ():
"""Install a signal handler such that this program prints a Python traceback
upon receipt of SIGUSR1. This could be useful for checking that
long-running programs are behaving properly, or for discovering where an
infinite loop is occurring.
Note, however, that the Python interpreter does not invoke Python signal
handlers exactly when the process is signaled. For instance, a signal
delivered in the midst of a time.sleep() call will only be seen by Python
code after that call completes. This means that this feature may not be as
helpful as one might like for debugging certain kinds of problems.
"""
import signal
try:
signal.signal (signal.SIGUSR1, _print_backtrace_signal_handler)
except Exception as e:
warn ('failed to set up Python backtraces on SIGUSR1: %s', e) | Install a signal handler such that this program prints a Python traceback
upon receipt of SIGUSR1. This could be useful for checking that
long-running programs are behaving properly, or for discovering where an
infinite loop is occurring.
Note, however, that the Python interpreter does not invoke Python signal
handlers exactly when the process is signaled. For instance, a signal
delivered in the midst of a time.sleep() call will only be seen by Python
code after that call completes. This means that this feature may not be as
helpful as one might like for debugging certain kinds of problems. | Below is the the instruction that describes the task:
### Input:
Install a signal handler such that this program prints a Python traceback
upon receipt of SIGUSR1. This could be useful for checking that
long-running programs are behaving properly, or for discovering where an
infinite loop is occurring.
Note, however, that the Python interpreter does not invoke Python signal
handlers exactly when the process is signaled. For instance, a signal
delivered in the midst of a time.sleep() call will only be seen by Python
code after that call completes. This means that this feature may not be as
helpful as one might like for debugging certain kinds of problems.
### Response:
def backtrace_on_usr1 ():
"""Install a signal handler such that this program prints a Python traceback
upon receipt of SIGUSR1. This could be useful for checking that
long-running programs are behaving properly, or for discovering where an
infinite loop is occurring.
Note, however, that the Python interpreter does not invoke Python signal
handlers exactly when the process is signaled. For instance, a signal
delivered in the midst of a time.sleep() call will only be seen by Python
code after that call completes. This means that this feature may not be as
helpful as one might like for debugging certain kinds of problems.
"""
import signal
try:
signal.signal (signal.SIGUSR1, _print_backtrace_signal_handler)
except Exception as e:
warn ('failed to set up Python backtraces on SIGUSR1: %s', e) |
def detectTizenTV(self):
"""Return detection of a Tizen smart TV
Detects if the current browser is on a Tizen smart TV.
"""
return UAgentInfo.deviceTizen in self.__userAgent \
and UAgentInfo.smartTV1 in self.__userAgent | Return detection of a Tizen smart TV
Detects if the current browser is on a Tizen smart TV. | Below is the the instruction that describes the task:
### Input:
Return detection of a Tizen smart TV
Detects if the current browser is on a Tizen smart TV.
### Response:
def detectTizenTV(self):
"""Return detection of a Tizen smart TV
Detects if the current browser is on a Tizen smart TV.
"""
return UAgentInfo.deviceTizen in self.__userAgent \
and UAgentInfo.smartTV1 in self.__userAgent |
def add_schema(self, schema):
"""
Merges in an existing schema.
arguments:
* `schema` (required - `dict` or `SchemaNode`):
an existing JSON Schema to merge.
"""
# serialize instances of SchemaNode before parsing
if isinstance(schema, SchemaNode):
schema = schema.to_schema()
for subschema in self._get_subschemas(schema):
# delegate to SchemaType object
schema_generator = self._get_generator_for_schema(subschema)
schema_generator.add_schema(subschema)
# return self for easy method chaining
return self | Merges in an existing schema.
arguments:
* `schema` (required - `dict` or `SchemaNode`):
an existing JSON Schema to merge. | Below is the the instruction that describes the task:
### Input:
Merges in an existing schema.
arguments:
* `schema` (required - `dict` or `SchemaNode`):
an existing JSON Schema to merge.
### Response:
def add_schema(self, schema):
"""
Merges in an existing schema.
arguments:
* `schema` (required - `dict` or `SchemaNode`):
an existing JSON Schema to merge.
"""
# serialize instances of SchemaNode before parsing
if isinstance(schema, SchemaNode):
schema = schema.to_schema()
for subschema in self._get_subschemas(schema):
# delegate to SchemaType object
schema_generator = self._get_generator_for_schema(subschema)
schema_generator.add_schema(subschema)
# return self for easy method chaining
return self |
def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address)) | simply collect requests and put them on the queue for the workers. | Below is the the instruction that describes the task:
### Input:
simply collect requests and put them on the queue for the workers.
### Response:
def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address)) |
def predict(self, dataset, confidence_threshold=0.25, iou_threshold=None, verbose=True):
"""
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
dataset, unpack = self._canonize_input(dataset)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
from . import util
return unpack(util.unstack_annotations(stacked_pred, num_rows=len(dataset))) | Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions']) | Below is the the instruction that describes the task:
### Input:
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
### Response:
def predict(self, dataset, confidence_threshold=0.25, iou_threshold=None, verbose=True):
"""
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
dataset, unpack = self._canonize_input(dataset)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
from . import util
return unpack(util.unstack_annotations(stacked_pred, num_rows=len(dataset))) |
def construct_graph(sakefile, settings):
"""
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
"""
verbose = settings["verbose"]
sprint = settings["sprint"]
G = nx.DiGraph()
sprint("Going to construct Graph", level="verbose")
for target in sakefile:
if target == "all":
# we don't want this node
continue
if "formula" not in sakefile[target]:
# that means this is a meta target
for atomtarget in sakefile[target]:
if atomtarget == "help":
continue
sprint("Adding '{}'".format(atomtarget), level="verbose")
data_dict = sakefile[target][atomtarget]
data_dict["parent"] = target
G.add_node(atomtarget, **data_dict)
else:
sprint("Adding '{}'".format(target), level="verbose")
G.add_node(target, **sakefile[target])
sprint("Nodes are built\nBuilding connections", level="verbose")
for node in G.nodes(data=True):
sprint("checking node {} for dependencies".format(node[0]),
level="verbose")
# normalize all paths in output
for k, v in node[1].items():
if v is None: node[1][k] = []
if "output" in node[1]:
for index, out in enumerate(node[1]['output']):
node[1]['output'][index] = clean_path(node[1]['output'][index])
if "dependencies" not in node[1]:
continue
sprint("it has dependencies", level="verbose")
connects = []
# normalize all paths in dependencies
for index, dep in enumerate(node[1]['dependencies']):
dep = os.path.normpath(dep)
shrt = "dependencies"
node[1]['dependencies'][index] = clean_path(node[1][shrt][index])
for node in G.nodes(data=True):
connects = []
if "dependencies" not in node[1]:
continue
for dep in node[1]['dependencies']:
matches = check_for_dep_in_outputs(dep, verbose, G)
if not matches:
continue
for match in matches:
sprint("Appending {} to matches".format(match), level="verbose")
connects.append(match)
if connects:
for connect in connects:
G.add_edge(connect, node[0])
return G | Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph | Below is the the instruction that describes the task:
### Input:
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
### Response:
def construct_graph(sakefile, settings):
"""
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
"""
verbose = settings["verbose"]
sprint = settings["sprint"]
G = nx.DiGraph()
sprint("Going to construct Graph", level="verbose")
for target in sakefile:
if target == "all":
# we don't want this node
continue
if "formula" not in sakefile[target]:
# that means this is a meta target
for atomtarget in sakefile[target]:
if atomtarget == "help":
continue
sprint("Adding '{}'".format(atomtarget), level="verbose")
data_dict = sakefile[target][atomtarget]
data_dict["parent"] = target
G.add_node(atomtarget, **data_dict)
else:
sprint("Adding '{}'".format(target), level="verbose")
G.add_node(target, **sakefile[target])
sprint("Nodes are built\nBuilding connections", level="verbose")
for node in G.nodes(data=True):
sprint("checking node {} for dependencies".format(node[0]),
level="verbose")
# normalize all paths in output
for k, v in node[1].items():
if v is None: node[1][k] = []
if "output" in node[1]:
for index, out in enumerate(node[1]['output']):
node[1]['output'][index] = clean_path(node[1]['output'][index])
if "dependencies" not in node[1]:
continue
sprint("it has dependencies", level="verbose")
connects = []
# normalize all paths in dependencies
for index, dep in enumerate(node[1]['dependencies']):
dep = os.path.normpath(dep)
shrt = "dependencies"
node[1]['dependencies'][index] = clean_path(node[1][shrt][index])
for node in G.nodes(data=True):
connects = []
if "dependencies" not in node[1]:
continue
for dep in node[1]['dependencies']:
matches = check_for_dep_in_outputs(dep, verbose, G)
if not matches:
continue
for match in matches:
sprint("Appending {} to matches".format(match), level="verbose")
connects.append(match)
if connects:
for connect in connects:
G.add_edge(connect, node[0])
return G |
def _isbtaddr(address):
"""
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
"""
# Define validity regex. Accept either ":" or "-" as separators.
global _validbtaddr
if _validbtaddr is None:
import re
_validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}",
re.IGNORECASE)
import types
if not isinstance(address, str):
return False
return _validbtaddr.match(address) is not None | Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string. | Below is the the instruction that describes the task:
### Input:
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
### Response:
def _isbtaddr(address):
"""
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
"""
# Define validity regex. Accept either ":" or "-" as separators.
global _validbtaddr
if _validbtaddr is None:
import re
_validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}",
re.IGNORECASE)
import types
if not isinstance(address, str):
return False
return _validbtaddr.match(address) is not None |
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1 | use the info smoother to solve a symmetric block tridiagonal system | Below is the the instruction that describes the task:
### Input:
use the info smoother to solve a symmetric block tridiagonal system
### Response:
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1 |
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob | Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian | Below is the the instruction that describes the task:
### Input:
Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
### Response:
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob |
def join(rasters):
"""
This method takes a list of rasters and returns a raster that is constructed of all of them
"""
raster = rasters[0] # using the first raster to understand what is the type of data we have
mask_band = None
nodata = None
with raster._raster_opener(raster.source_file) as r:
nodata = r.nodata
mask_flags = r.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
if per_dataset_mask and nodata is None:
mask_band = 0
return GeoRaster2.from_rasters(rasters, relative_to_vrt=False, nodata=nodata, mask_band=mask_band) | This method takes a list of rasters and returns a raster that is constructed of all of them | Below is the the instruction that describes the task:
### Input:
This method takes a list of rasters and returns a raster that is constructed of all of them
### Response:
def join(rasters):
"""
This method takes a list of rasters and returns a raster that is constructed of all of them
"""
raster = rasters[0] # using the first raster to understand what is the type of data we have
mask_band = None
nodata = None
with raster._raster_opener(raster.source_file) as r:
nodata = r.nodata
mask_flags = r.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
if per_dataset_mask and nodata is None:
mask_band = 0
return GeoRaster2.from_rasters(rasters, relative_to_vrt=False, nodata=nodata, mask_band=mask_band) |
def convert_to_merged_ids(self, id_run):
"""
Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases
:param id_run: a run of token ids
:param dictionary: a dictionary of acceptable phrases described as there component token ids
:return: a run of token and phrase ids.
"""
i = 0
rv = []
while i < len(id_run):
phrase_id, offset = self.max_phrase(id_run, i)
if phrase_id:
rv.append(phrase_id)
i = offset
else:
rv.append(id_run[i])
i += 1
return rv | Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases
:param id_run: a run of token ids
:param dictionary: a dictionary of acceptable phrases described as there component token ids
:return: a run of token and phrase ids. | Below is the the instruction that describes the task:
### Input:
Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases
:param id_run: a run of token ids
:param dictionary: a dictionary of acceptable phrases described as there component token ids
:return: a run of token and phrase ids.
### Response:
def convert_to_merged_ids(self, id_run):
"""
Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases
:param id_run: a run of token ids
:param dictionary: a dictionary of acceptable phrases described as there component token ids
:return: a run of token and phrase ids.
"""
i = 0
rv = []
while i < len(id_run):
phrase_id, offset = self.max_phrase(id_run, i)
if phrase_id:
rv.append(phrase_id)
i = offset
else:
rv.append(id_run[i])
i += 1
return rv |
def remove_listener(self, listener):
"""Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`.
"""
internal_listener = self._internal_listeners.pop(listener)
return self._client.remove_listener(internal_listener) | Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`. | Below is the the instruction that describes the task:
### Input:
Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`.
### Response:
def remove_listener(self, listener):
"""Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`.
"""
internal_listener = self._internal_listeners.pop(listener)
return self._client.remove_listener(internal_listener) |
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func) | Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str | Below is the the instruction that describes the task:
### Input:
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
### Response:
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func) |
def _try_get_string(dev, index, langid = None, default_str_i0 = "",
default_access_error = "Error Accessing String"):
""" try to get a string, but return a string no matter what
"""
if index == 0 :
string = default_str_i0
else:
try:
if langid is None:
string = util.get_string(dev, index)
else:
string = util.get_string(dev, index, langid)
except :
string = default_access_error
return string | try to get a string, but return a string no matter what | Below is the the instruction that describes the task:
### Input:
try to get a string, but return a string no matter what
### Response:
def _try_get_string(dev, index, langid = None, default_str_i0 = "",
default_access_error = "Error Accessing String"):
""" try to get a string, but return a string no matter what
"""
if index == 0 :
string = default_str_i0
else:
try:
if langid is None:
string = util.get_string(dev, index)
else:
string = util.get_string(dev, index, langid)
except :
string = default_access_error
return string |
def get_vcenter_version(kwargs=None, call=None):
'''
Show the vCenter Server version with build number.
CLI Example:
.. code-block:: bash
salt-cloud -f get_vcenter_version my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The get_vcenter_version function must be called with '
'-f or --function.'
)
# Get the inventory
inv = salt.utils.vmware.get_inventory(_get_si())
return inv.about.fullName | Show the vCenter Server version with build number.
CLI Example:
.. code-block:: bash
salt-cloud -f get_vcenter_version my-vmware-config | Below is the the instruction that describes the task:
### Input:
Show the vCenter Server version with build number.
CLI Example:
.. code-block:: bash
salt-cloud -f get_vcenter_version my-vmware-config
### Response:
def get_vcenter_version(kwargs=None, call=None):
'''
Show the vCenter Server version with build number.
CLI Example:
.. code-block:: bash
salt-cloud -f get_vcenter_version my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The get_vcenter_version function must be called with '
'-f or --function.'
)
# Get the inventory
inv = salt.utils.vmware.get_inventory(_get_si())
return inv.about.fullName |
def sync(self, api_token, sync_token, resource_types='["all"]', **kwargs):
"""Update and retrieve Todoist data.
:param api_token: The user's login api_token.
:type api_token: str
:param seq_no: The request sequence number. On initial request pass
``0``. On all others pass the last seq_no you received.
:type seq_no: int
:param seq_no_global: The request sequence number. On initial request
pass ``0``. On all others pass the last seq_no you received.
:type seq_no_global: int
:param resource_types: Specifies which subset of data you want to
receive e.g. only projects. Defaults to all data.
:type resources_types: str
:param commands: A list of JSON commands to perform.
:type commands: list (str)
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.register('john.doe@gmail.com', 'John Doe',
... 'password')
>>> user_info = response.json()
>>> api_token = user_info['api_token']
>>> response = api.sync(api_token, 0, 0, '["projects"]')
>>> print(response.json())
{'seq_no_global': 3848029654, 'seq_no': 3848029654, 'Projects': ...}
"""
params = {
'token': api_token,
'sync_token': sync_token,
}
req_func = self._post
if 'commands' not in kwargs: # GET if we're not changing data.
req_func = self._get
params['resource_types'] = resource_types
return req_func('sync', params, **kwargs) | Update and retrieve Todoist data.
:param api_token: The user's login api_token.
:type api_token: str
:param seq_no: The request sequence number. On initial request pass
``0``. On all others pass the last seq_no you received.
:type seq_no: int
:param seq_no_global: The request sequence number. On initial request
pass ``0``. On all others pass the last seq_no you received.
:type seq_no_global: int
:param resource_types: Specifies which subset of data you want to
receive e.g. only projects. Defaults to all data.
:type resources_types: str
:param commands: A list of JSON commands to perform.
:type commands: list (str)
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.register('john.doe@gmail.com', 'John Doe',
... 'password')
>>> user_info = response.json()
>>> api_token = user_info['api_token']
>>> response = api.sync(api_token, 0, 0, '["projects"]')
>>> print(response.json())
{'seq_no_global': 3848029654, 'seq_no': 3848029654, 'Projects': ...} | Below is the the instruction that describes the task:
### Input:
Update and retrieve Todoist data.
:param api_token: The user's login api_token.
:type api_token: str
:param seq_no: The request sequence number. On initial request pass
``0``. On all others pass the last seq_no you received.
:type seq_no: int
:param seq_no_global: The request sequence number. On initial request
pass ``0``. On all others pass the last seq_no you received.
:type seq_no_global: int
:param resource_types: Specifies which subset of data you want to
receive e.g. only projects. Defaults to all data.
:type resources_types: str
:param commands: A list of JSON commands to perform.
:type commands: list (str)
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.register('john.doe@gmail.com', 'John Doe',
... 'password')
>>> user_info = response.json()
>>> api_token = user_info['api_token']
>>> response = api.sync(api_token, 0, 0, '["projects"]')
>>> print(response.json())
{'seq_no_global': 3848029654, 'seq_no': 3848029654, 'Projects': ...}
### Response:
def sync(self, api_token, sync_token, resource_types='["all"]', **kwargs):
"""Update and retrieve Todoist data.
:param api_token: The user's login api_token.
:type api_token: str
:param seq_no: The request sequence number. On initial request pass
``0``. On all others pass the last seq_no you received.
:type seq_no: int
:param seq_no_global: The request sequence number. On initial request
pass ``0``. On all others pass the last seq_no you received.
:type seq_no_global: int
:param resource_types: Specifies which subset of data you want to
receive e.g. only projects. Defaults to all data.
:type resources_types: str
:param commands: A list of JSON commands to perform.
:type commands: list (str)
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.register('john.doe@gmail.com', 'John Doe',
... 'password')
>>> user_info = response.json()
>>> api_token = user_info['api_token']
>>> response = api.sync(api_token, 0, 0, '["projects"]')
>>> print(response.json())
{'seq_no_global': 3848029654, 'seq_no': 3848029654, 'Projects': ...}
"""
params = {
'token': api_token,
'sync_token': sync_token,
}
req_func = self._post
if 'commands' not in kwargs: # GET if we're not changing data.
req_func = self._get
params['resource_types'] = resource_types
return req_func('sync', params, **kwargs) |
def ports_open(name, ports, proto='tcp', direction='in'):
'''
Ensure ports are open for a protocol, in a direction.
e.g. - proto='tcp', direction='in' would set the values
for TCP_IN in the csf.conf file.
ports
A list of ports that should be open.
proto
The protocol. May be one of 'tcp', 'udp',
'tcp6', or 'udp6'.
direction
Choose 'in', 'out', or both to indicate the port
should be opened for inbound traffic, outbound
traffic, or both.
'''
ports = list(six.moves.map(six.text_type, ports))
diff = False
ret = {'name': ','.join(ports),
'changes': {},
'result': True,
'comment': 'Ports open.'}
current_ports = __salt__['csf.get_ports'](proto=proto, direction=direction)
direction = direction.upper()
directions = __salt__['csf.build_directions'](direction)
for direction in directions:
log.trace('current_ports[direction]: %s', current_ports[direction])
log.trace('ports: %s', ports)
if current_ports[direction] != ports:
diff = True
if diff:
result = __salt__['csf.allow_ports'](ports, proto=proto, direction=direction)
ret['changes']['Ports'] = 'Changed'
ret['comment'] = result
return ret | Ensure ports are open for a protocol, in a direction.
e.g. - proto='tcp', direction='in' would set the values
for TCP_IN in the csf.conf file.
ports
A list of ports that should be open.
proto
The protocol. May be one of 'tcp', 'udp',
'tcp6', or 'udp6'.
direction
Choose 'in', 'out', or both to indicate the port
should be opened for inbound traffic, outbound
traffic, or both. | Below is the the instruction that describes the task:
### Input:
Ensure ports are open for a protocol, in a direction.
e.g. - proto='tcp', direction='in' would set the values
for TCP_IN in the csf.conf file.
ports
A list of ports that should be open.
proto
The protocol. May be one of 'tcp', 'udp',
'tcp6', or 'udp6'.
direction
Choose 'in', 'out', or both to indicate the port
should be opened for inbound traffic, outbound
traffic, or both.
### Response:
def ports_open(name, ports, proto='tcp', direction='in'):
'''
Ensure ports are open for a protocol, in a direction.
e.g. - proto='tcp', direction='in' would set the values
for TCP_IN in the csf.conf file.
ports
A list of ports that should be open.
proto
The protocol. May be one of 'tcp', 'udp',
'tcp6', or 'udp6'.
direction
Choose 'in', 'out', or both to indicate the port
should be opened for inbound traffic, outbound
traffic, or both.
'''
ports = list(six.moves.map(six.text_type, ports))
diff = False
ret = {'name': ','.join(ports),
'changes': {},
'result': True,
'comment': 'Ports open.'}
current_ports = __salt__['csf.get_ports'](proto=proto, direction=direction)
direction = direction.upper()
directions = __salt__['csf.build_directions'](direction)
for direction in directions:
log.trace('current_ports[direction]: %s', current_ports[direction])
log.trace('ports: %s', ports)
if current_ports[direction] != ports:
diff = True
if diff:
result = __salt__['csf.allow_ports'](ports, proto=proto, direction=direction)
ret['changes']['Ports'] = 'Changed'
ret['comment'] = result
return ret |
def buildlist(self, enabled):
"""Run dialog buildlist
"""
choice = []
for item in self.data:
choice.append((item, False))
for item in enabled:
choice.append((item, True))
items = [(tag, tag, sta) for (tag, sta) in choice]
code, self.tags = self.d.buildlist(
text=self.text, items=items, visit_items=True, item_help=False,
title=self.title)
if code == "ok":
self.unicode_to_string()
return self.ununicode
if code in ["cancel", "esc"]:
self.exit() | Run dialog buildlist | Below is the the instruction that describes the task:
### Input:
Run dialog buildlist
### Response:
def buildlist(self, enabled):
"""Run dialog buildlist
"""
choice = []
for item in self.data:
choice.append((item, False))
for item in enabled:
choice.append((item, True))
items = [(tag, tag, sta) for (tag, sta) in choice]
code, self.tags = self.d.buildlist(
text=self.text, items=items, visit_items=True, item_help=False,
title=self.title)
if code == "ok":
self.unicode_to_string()
return self.ununicode
if code in ["cancel", "esc"]:
self.exit() |
def delete_secret(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the secret at the specified location.
Supported methods:
DELETE: /{mount_point}/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete.
This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_secret request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.delete(
url=api_path,
) | Delete the secret at the specified location.
Supported methods:
DELETE: /{mount_point}/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete.
This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_secret request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Delete the secret at the specified location.
Supported methods:
DELETE: /{mount_point}/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete.
This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_secret request.
:rtype: requests.Response
### Response:
def delete_secret(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the secret at the specified location.
Supported methods:
DELETE: /{mount_point}/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete.
This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_secret request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.delete(
url=api_path,
) |
def get_icon(brain_or_object, html_tag=True):
"""Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string
"""
# Manual approach, because `plone.app.layout.getIcon` does not reliable
# work for Bika Contents coming from other catalogs than the
# `portal_catalog`
portal_types = get_tool("portal_types")
fti = portal_types.getTypeInfo(brain_or_object.portal_type)
icon = fti.getIcon()
if not icon:
return ""
url = "%s/%s" % (get_url(get_portal()), icon)
if not html_tag:
return url
tag = '<img width="16" height="16" src="{url}" title="{title}" />'.format(
url=url, title=get_title(brain_or_object))
return tag | Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string | Below is the the instruction that describes the task:
### Input:
Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string
### Response:
def get_icon(brain_or_object, html_tag=True):
"""Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string
"""
# Manual approach, because `plone.app.layout.getIcon` does not reliable
# work for Bika Contents coming from other catalogs than the
# `portal_catalog`
portal_types = get_tool("portal_types")
fti = portal_types.getTypeInfo(brain_or_object.portal_type)
icon = fti.getIcon()
if not icon:
return ""
url = "%s/%s" % (get_url(get_portal()), icon)
if not html_tag:
return url
tag = '<img width="16" height="16" src="{url}" title="{title}" />'.format(
url=url, title=get_title(brain_or_object))
return tag |
def MessageReceived(self, m):
"""
Process a message.
Args:
m (neo.Network.Message):
"""
if m.Command == 'verack':
# only respond with a verack when we connect to another client, not when a client connected to us or
# we might end up in a verack loop
if self.incoming_client:
if self.expect_verack_next:
self.expect_verack_next = False
else:
self.HandleVerack()
elif m.Command == 'version':
self.HandleVersion(m.Payload)
elif m.Command == 'getaddr':
self.SendPeerInfo()
elif m.Command == 'getdata':
self.HandleGetDataMessageReceived(m.Payload)
elif m.Command == 'getblocks':
self.HandleGetBlocksMessageReceived(m.Payload)
elif m.Command == 'inv':
self.HandleInvMessage(m.Payload)
elif m.Command == 'block':
self.HandleBlockReceived(m.Payload)
elif m.Command == 'getheaders':
self.HandleGetHeadersMessageReceived(m.Payload)
elif m.Command == 'headers':
self.HandleBlockHeadersReceived(m.Payload)
elif m.Command == 'addr':
self.HandlePeerInfoReceived(m.Payload)
else:
logger.debug(f"{self.prefix} Command not implemented: {m.Command}") | Process a message.
Args:
m (neo.Network.Message): | Below is the the instruction that describes the task:
### Input:
Process a message.
Args:
m (neo.Network.Message):
### Response:
def MessageReceived(self, m):
"""
Process a message.
Args:
m (neo.Network.Message):
"""
if m.Command == 'verack':
# only respond with a verack when we connect to another client, not when a client connected to us or
# we might end up in a verack loop
if self.incoming_client:
if self.expect_verack_next:
self.expect_verack_next = False
else:
self.HandleVerack()
elif m.Command == 'version':
self.HandleVersion(m.Payload)
elif m.Command == 'getaddr':
self.SendPeerInfo()
elif m.Command == 'getdata':
self.HandleGetDataMessageReceived(m.Payload)
elif m.Command == 'getblocks':
self.HandleGetBlocksMessageReceived(m.Payload)
elif m.Command == 'inv':
self.HandleInvMessage(m.Payload)
elif m.Command == 'block':
self.HandleBlockReceived(m.Payload)
elif m.Command == 'getheaders':
self.HandleGetHeadersMessageReceived(m.Payload)
elif m.Command == 'headers':
self.HandleBlockHeadersReceived(m.Payload)
elif m.Command == 'addr':
self.HandlePeerInfoReceived(m.Payload)
else:
logger.debug(f"{self.prefix} Command not implemented: {m.Command}") |
def hostname(self):
"""Get the hostname that this connection is associated with"""
from six.moves.urllib.parse import urlparse
return urlparse(self._base_url).netloc.split(':', 1)[0] | Get the hostname that this connection is associated with | Below is the the instruction that describes the task:
### Input:
Get the hostname that this connection is associated with
### Response:
def hostname(self):
"""Get the hostname that this connection is associated with"""
from six.moves.urllib.parse import urlparse
return urlparse(self._base_url).netloc.split(':', 1)[0] |
def getLiftOps(self, valu, cmpr='='):
'''
Get a set of lift operations for use with an Xact.
'''
if valu is None:
iops = (('pref', b''),)
return (
('indx', ('byprop', self.pref, iops)),
)
# TODO: In an ideal world, this would get smashed down into the self.type.getLiftOps
# but since doing so breaks existing types, and fixing those could cause a cascade
# of fun failures, we'll put this off until another flag day
if cmpr == '~=':
return (
('form:re', (self.name, valu, {})),
)
lops = self.type.getLiftOps('form', cmpr, (None, self.name, valu))
if lops is not None:
return lops
iops = self.type.getIndxOps(valu, cmpr)
return (
('indx', ('byprop', self.pref, iops)),
) | Get a set of lift operations for use with an Xact. | Below is the the instruction that describes the task:
### Input:
Get a set of lift operations for use with an Xact.
### Response:
def getLiftOps(self, valu, cmpr='='):
'''
Get a set of lift operations for use with an Xact.
'''
if valu is None:
iops = (('pref', b''),)
return (
('indx', ('byprop', self.pref, iops)),
)
# TODO: In an ideal world, this would get smashed down into the self.type.getLiftOps
# but since doing so breaks existing types, and fixing those could cause a cascade
# of fun failures, we'll put this off until another flag day
if cmpr == '~=':
return (
('form:re', (self.name, valu, {})),
)
lops = self.type.getLiftOps('form', cmpr, (None, self.name, valu))
if lops is not None:
return lops
iops = self.type.getIndxOps(valu, cmpr)
return (
('indx', ('byprop', self.pref, iops)),
) |
def controlled(self, control_qubit):
"""
Add the CONTROLLED modifier to the gate with the given control qubit.
"""
control_qubit = unpack_qubit(control_qubit)
self.modifiers.insert(0, "CONTROLLED")
self.qubits.insert(0, control_qubit)
return self | Add the CONTROLLED modifier to the gate with the given control qubit. | Below is the the instruction that describes the task:
### Input:
Add the CONTROLLED modifier to the gate with the given control qubit.
### Response:
def controlled(self, control_qubit):
"""
Add the CONTROLLED modifier to the gate with the given control qubit.
"""
control_qubit = unpack_qubit(control_qubit)
self.modifiers.insert(0, "CONTROLLED")
self.qubits.insert(0, control_qubit)
return self |
def del_action_role(ctx):
"""Deletes a role from an action on objects"""
objects = ctx.obj['objects']
action = ctx.obj['action']
role = ctx.obj['role']
if action is None or role is None:
log('You need to specify an action or role to the RBAC command group for this to work.', lvl=warn)
return
for item in objects:
if role in item.perms[action]:
item.perms[action].remove(role)
item.save()
log("Done") | Deletes a role from an action on objects | Below is the the instruction that describes the task:
### Input:
Deletes a role from an action on objects
### Response:
def del_action_role(ctx):
"""Deletes a role from an action on objects"""
objects = ctx.obj['objects']
action = ctx.obj['action']
role = ctx.obj['role']
if action is None or role is None:
log('You need to specify an action or role to the RBAC command group for this to work.', lvl=warn)
return
for item in objects:
if role in item.perms[action]:
item.perms[action].remove(role)
item.save()
log("Done") |
def limit_x(
self,
limit_lower = None, # float
limit_upper = None # float
):
"""
get or set x limits of the current axes
x_min, x_max = limit_x() # return the current limit_x
limit_x(x_min, x_max) # set the limit_x to x_min, x_max
"""
if limit_lower is None and limit_upper is None:
return self._limit_x
elif hasattr(limit_lower, "__iter__"):
self._limit_x = limit_lower[:2]
else:
self._limit_x = [limit_lower, limit_upper]
if self._limit_x[0] == self._limit_x[1]:
self._limit_x[1] += 1
self._limit_x[0] -= self.mod_x
self._limit_x[1] += self.mod_x | get or set x limits of the current axes
x_min, x_max = limit_x() # return the current limit_x
limit_x(x_min, x_max) # set the limit_x to x_min, x_max | Below is the the instruction that describes the task:
### Input:
get or set x limits of the current axes
x_min, x_max = limit_x() # return the current limit_x
limit_x(x_min, x_max) # set the limit_x to x_min, x_max
### Response:
def limit_x(
self,
limit_lower = None, # float
limit_upper = None # float
):
"""
get or set x limits of the current axes
x_min, x_max = limit_x() # return the current limit_x
limit_x(x_min, x_max) # set the limit_x to x_min, x_max
"""
if limit_lower is None and limit_upper is None:
return self._limit_x
elif hasattr(limit_lower, "__iter__"):
self._limit_x = limit_lower[:2]
else:
self._limit_x = [limit_lower, limit_upper]
if self._limit_x[0] == self._limit_x[1]:
self._limit_x[1] += 1
self._limit_x[0] -= self.mod_x
self._limit_x[1] += self.mod_x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.