code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def configure(ctx, integration, args, show_args, editable):
"""Configure an integration with default parameters.
You can still provide one-off integration arguments to :func:`honeycomb.commands.service.run` if required.
"""
home = ctx.obj["HOME"]
integration_path = plugin_utils.get_plugin_path(home, defs.INTEGRATIONS, integration, editable)
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
logger.debug("loading {} ({})".format(integration, integration_path))
integration = register_integration(integration_path)
if show_args:
return plugin_utils.print_plugin_args(integration_path)
# get our integration class instance
integration_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(integration_path))
args_file = os.path.join(integration_path, defs.ARGS_JSON)
with open(args_file, "w") as f:
data = json.dumps(integration_args)
logger.debug("writing %s to %s", data, args_file)
f.write(json.dumps(integration_args))
click.secho("[*] {0} has been configured, make sure to test it with `honeycomb integration test {0}`"
.format(integration.name)) | Configure an integration with default parameters.
You can still provide one-off integration arguments to :func:`honeycomb.commands.service.run` if required. | Below is the the instruction that describes the task:
### Input:
Configure an integration with default parameters.
You can still provide one-off integration arguments to :func:`honeycomb.commands.service.run` if required.
### Response:
def configure(ctx, integration, args, show_args, editable):
"""Configure an integration with default parameters.
You can still provide one-off integration arguments to :func:`honeycomb.commands.service.run` if required.
"""
home = ctx.obj["HOME"]
integration_path = plugin_utils.get_plugin_path(home, defs.INTEGRATIONS, integration, editable)
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
logger.debug("loading {} ({})".format(integration, integration_path))
integration = register_integration(integration_path)
if show_args:
return plugin_utils.print_plugin_args(integration_path)
# get our integration class instance
integration_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(integration_path))
args_file = os.path.join(integration_path, defs.ARGS_JSON)
with open(args_file, "w") as f:
data = json.dumps(integration_args)
logger.debug("writing %s to %s", data, args_file)
f.write(json.dumps(integration_args))
click.secho("[*] {0} has been configured, make sure to test it with `honeycomb integration test {0}`"
.format(integration.name)) |
def handle_single_message(self, msg):
"""
Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None
"""
job_id = msg.message['job_id']
actual_msg = msg.message
if msg.type == MessageType.JOB_UPDATED:
progress = actual_msg['progress']
total_progress = actual_msg['total_progress']
self.storage_backend.update_job_progress(job_id, progress,
total_progress)
elif msg.type == MessageType.JOB_COMPLETED:
self.storage_backend.complete_job(job_id)
elif msg.type == MessageType.JOB_FAILED:
exc = actual_msg['exception']
trace = actual_msg['traceback']
self.storage_backend.mark_job_as_failed(job_id, exc, trace)
elif msg.type == MessageType.JOB_CANCELED:
self.storage_backend.mark_job_as_canceled(job_id)
else:
self.logger.error("Unknown message type: {}".format(msg.type)) | Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None | Below is the the instruction that describes the task:
### Input:
Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None
### Response:
def handle_single_message(self, msg):
"""
Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None
"""
job_id = msg.message['job_id']
actual_msg = msg.message
if msg.type == MessageType.JOB_UPDATED:
progress = actual_msg['progress']
total_progress = actual_msg['total_progress']
self.storage_backend.update_job_progress(job_id, progress,
total_progress)
elif msg.type == MessageType.JOB_COMPLETED:
self.storage_backend.complete_job(job_id)
elif msg.type == MessageType.JOB_FAILED:
exc = actual_msg['exception']
trace = actual_msg['traceback']
self.storage_backend.mark_job_as_failed(job_id, exc, trace)
elif msg.type == MessageType.JOB_CANCELED:
self.storage_backend.mark_job_as_canceled(job_id)
else:
self.logger.error("Unknown message type: {}".format(msg.type)) |
def RunValidationOutputToConsole(feed, options):
"""Validate feed, print reports and return an exit code."""
accumulator = CountingConsoleProblemAccumulator(
options.error_types_ignore_list)
problems = transitfeed.ProblemReporter(accumulator)
_, exit_code = RunValidation(feed, options, problems)
return exit_code | Validate feed, print reports and return an exit code. | Below is the the instruction that describes the task:
### Input:
Validate feed, print reports and return an exit code.
### Response:
def RunValidationOutputToConsole(feed, options):
"""Validate feed, print reports and return an exit code."""
accumulator = CountingConsoleProblemAccumulator(
options.error_types_ignore_list)
problems = transitfeed.ProblemReporter(accumulator)
_, exit_code = RunValidation(feed, options, problems)
return exit_code |
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)] | Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use. | Below is the the instruction that describes the task:
### Input:
Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
### Response:
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)] |
def _function_has_n_args(self, func, n):
"""
Returns true if func has n arguments. Arguments with default and self for
methods are not considered.
"""
if inspect.ismethod(func):
n += 1
argspec = inspect.getargspec(func)
defaults = argspec.defaults or ()
return len(argspec.args) - len(defaults) == n | Returns true if func has n arguments. Arguments with default and self for
methods are not considered. | Below is the the instruction that describes the task:
### Input:
Returns true if func has n arguments. Arguments with default and self for
methods are not considered.
### Response:
def _function_has_n_args(self, func, n):
"""
Returns true if func has n arguments. Arguments with default and self for
methods are not considered.
"""
if inspect.ismethod(func):
n += 1
argspec = inspect.getargspec(func)
defaults = argspec.defaults or ()
return len(argspec.args) - len(defaults) == n |
def _versioned_lib_suffix(env, suffix, version):
"""Generate versioned shared library suffix from a unversioned one.
If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll'"""
Verbose = False
if Verbose:
print("_versioned_lib_suffix: suffix= ", suffix)
print("_versioned_lib_suffix: version= ", version)
cygversion = re.sub('\.', '-', version)
if not suffix.startswith('-' + cygversion):
suffix = '-' + cygversion + suffix
if Verbose:
print("_versioned_lib_suffix: return suffix= ", suffix)
return suffix | Generate versioned shared library suffix from a unversioned one.
If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll | Below is the the instruction that describes the task:
### Input:
Generate versioned shared library suffix from a unversioned one.
If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll
### Response:
def _versioned_lib_suffix(env, suffix, version):
"""Generate versioned shared library suffix from a unversioned one.
If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll'"""
Verbose = False
if Verbose:
print("_versioned_lib_suffix: suffix= ", suffix)
print("_versioned_lib_suffix: version= ", version)
cygversion = re.sub('\.', '-', version)
if not suffix.startswith('-' + cygversion):
suffix = '-' + cygversion + suffix
if Verbose:
print("_versioned_lib_suffix: return suffix= ", suffix)
return suffix |
def _create_out_partition(self, tenant_id, tenant_name):
"""Function to create a service partition. """
vrf_prof_str = self.serv_part_vrf_prof
self.dcnm_obj.create_partition(tenant_name, fw_const.SERV_PART_NAME,
None, vrf_prof_str,
desc="Service Partition") | Function to create a service partition. | Below is the the instruction that describes the task:
### Input:
Function to create a service partition.
### Response:
def _create_out_partition(self, tenant_id, tenant_name):
"""Function to create a service partition. """
vrf_prof_str = self.serv_part_vrf_prof
self.dcnm_obj.create_partition(tenant_name, fw_const.SERV_PART_NAME,
None, vrf_prof_str,
desc="Service Partition") |
def wrap_inference_results(inference_result_proto):
"""Returns packaged inference results from the provided proto.
Args:
inference_result_proto: The classification or regression response proto.
Returns:
An InferenceResult proto with the result from the response.
"""
inference_proto = inference_pb2.InferenceResult()
if isinstance(inference_result_proto,
classification_pb2.ClassificationResponse):
inference_proto.classification_result.CopyFrom(
inference_result_proto.result)
elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):
inference_proto.regression_result.CopyFrom(inference_result_proto.result)
return inference_proto | Returns packaged inference results from the provided proto.
Args:
inference_result_proto: The classification or regression response proto.
Returns:
An InferenceResult proto with the result from the response. | Below is the the instruction that describes the task:
### Input:
Returns packaged inference results from the provided proto.
Args:
inference_result_proto: The classification or regression response proto.
Returns:
An InferenceResult proto with the result from the response.
### Response:
def wrap_inference_results(inference_result_proto):
"""Returns packaged inference results from the provided proto.
Args:
inference_result_proto: The classification or regression response proto.
Returns:
An InferenceResult proto with the result from the response.
"""
inference_proto = inference_pb2.InferenceResult()
if isinstance(inference_result_proto,
classification_pb2.ClassificationResponse):
inference_proto.classification_result.CopyFrom(
inference_result_proto.result)
elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):
inference_proto.regression_result.CopyFrom(inference_result_proto.result)
return inference_proto |
def getHostCertPath(self, name):
'''
Gets the path to a host certificate.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the host certificate for the host "myhost":
mypath = cdir.getHostCertPath('myhost')
Returns:
str: The path if exists.
'''
path = s_common.genpath(self.certdir, 'hosts', '%s.crt' % name)
if not os.path.isfile(path):
return None
return path | Gets the path to a host certificate.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the host certificate for the host "myhost":
mypath = cdir.getHostCertPath('myhost')
Returns:
str: The path if exists. | Below is the the instruction that describes the task:
### Input:
Gets the path to a host certificate.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the host certificate for the host "myhost":
mypath = cdir.getHostCertPath('myhost')
Returns:
str: The path if exists.
### Response:
def getHostCertPath(self, name):
'''
Gets the path to a host certificate.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the host certificate for the host "myhost":
mypath = cdir.getHostCertPath('myhost')
Returns:
str: The path if exists.
'''
path = s_common.genpath(self.certdir, 'hosts', '%s.crt' % name)
if not os.path.isfile(path):
return None
return path |
def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode and '+' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading: %s" % obj.mode)
name = obj.name
retval = pystringIO.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval.write(contents)
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj) | Save a file | Below is the the instruction that describes the task:
### Input:
Save a file
### Response:
def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode and '+' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading: %s" % obj.mode)
name = obj.name
retval = pystringIO.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval.write(contents)
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj) |
def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.")
if not obj.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.")
return obj | Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted. | Below is the the instruction that describes the task:
### Input:
Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
### Response:
def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.")
if not obj.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.")
return obj |
def info(self, auth, resource, options={}, defer=False):
""" Request creation and usage information of specified resource according to the specified
options.
Args:
auth: <cik>
resource: Alias or ID of resource
options: Options to define what info you would like returned.
"""
return self._call('info', auth, [resource, options], defer) | Request creation and usage information of specified resource according to the specified
options.
Args:
auth: <cik>
resource: Alias or ID of resource
options: Options to define what info you would like returned. | Below is the the instruction that describes the task:
### Input:
Request creation and usage information of specified resource according to the specified
options.
Args:
auth: <cik>
resource: Alias or ID of resource
options: Options to define what info you would like returned.
### Response:
def info(self, auth, resource, options={}, defer=False):
""" Request creation and usage information of specified resource according to the specified
options.
Args:
auth: <cik>
resource: Alias or ID of resource
options: Options to define what info you would like returned.
"""
return self._call('info', auth, [resource, options], defer) |
def _containing_contigs(self, hits):
'''Given a list of hits, all with same query,
returns a set of the contigs containing that query'''
return {hit.ref_name for hit in hits if self._contains(hit)} | Given a list of hits, all with same query,
returns a set of the contigs containing that query | Below is the the instruction that describes the task:
### Input:
Given a list of hits, all with same query,
returns a set of the contigs containing that query
### Response:
def _containing_contigs(self, hits):
'''Given a list of hits, all with same query,
returns a set of the contigs containing that query'''
return {hit.ref_name for hit in hits if self._contains(hit)} |
def resolve_rva(self, rva):
"""
RVAs are supposed to be used with the image of the file in memory.
There's no direct algorithm to calculate the offset of an RVA in
the file.
What we do here is to find the section that contains the RVA and
then we calculate the offset between the RVA of the section
and the offset of the section in the file. With this offset, we can
compute the position of the RVA in the file
"""
containing_section = self.get_section_of_rva(rva)
in_section_offset = containing_section.PointerToRawData -\
containing_section.VirtualAddress
return in_section_offset + rva | RVAs are supposed to be used with the image of the file in memory.
There's no direct algorithm to calculate the offset of an RVA in
the file.
What we do here is to find the section that contains the RVA and
then we calculate the offset between the RVA of the section
and the offset of the section in the file. With this offset, we can
compute the position of the RVA in the file | Below is the the instruction that describes the task:
### Input:
RVAs are supposed to be used with the image of the file in memory.
There's no direct algorithm to calculate the offset of an RVA in
the file.
What we do here is to find the section that contains the RVA and
then we calculate the offset between the RVA of the section
and the offset of the section in the file. With this offset, we can
compute the position of the RVA in the file
### Response:
def resolve_rva(self, rva):
"""
RVAs are supposed to be used with the image of the file in memory.
There's no direct algorithm to calculate the offset of an RVA in
the file.
What we do here is to find the section that contains the RVA and
then we calculate the offset between the RVA of the section
and the offset of the section in the file. With this offset, we can
compute the position of the RVA in the file
"""
containing_section = self.get_section_of_rva(rva)
in_section_offset = containing_section.PointerToRawData -\
containing_section.VirtualAddress
return in_section_offset + rva |
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills") | |coro|
Loads the players general stats | Below is the the instruction that describes the task:
### Input:
|coro|
Loads the players general stats
### Response:
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills") |
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op | compute stats and update/apply the new stats to the running average | Below is the the instruction that describes the task:
### Input:
compute stats and update/apply the new stats to the running average
### Response:
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op |
def list(per_page=None, page=None):
"""
List of payments. You have to handle pagination manually
:param page: the page number
:type page: int|None
:param per_page: number of payment per page. It's a good practice to increase this number if you know that you
will need a lot of payments.
:type per_page: int|None
:return A collection of payment
:rtype resources.APIResourceCollection
"""
# Comprehension dict are not supported in Python 2.6-. You can use this commented line instead of the current
# line when you drop support for Python 2.6.
# pagination = {key: value for (key, value) in [('page', page), ('per_page', per_page)] if value}
pagination = dict((key, value) for (key, value) in [('page', page), ('per_page', per_page)] if value)
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.PAYMENT_RESOURCE, pagination=pagination))
return resources.APIResourceCollection(resources.Payment, **response) | List of payments. You have to handle pagination manually
:param page: the page number
:type page: int|None
:param per_page: number of payment per page. It's a good practice to increase this number if you know that you
will need a lot of payments.
:type per_page: int|None
:return A collection of payment
:rtype resources.APIResourceCollection | Below is the the instruction that describes the task:
### Input:
List of payments. You have to handle pagination manually
:param page: the page number
:type page: int|None
:param per_page: number of payment per page. It's a good practice to increase this number if you know that you
will need a lot of payments.
:type per_page: int|None
:return A collection of payment
:rtype resources.APIResourceCollection
### Response:
def list(per_page=None, page=None):
"""
List of payments. You have to handle pagination manually
:param page: the page number
:type page: int|None
:param per_page: number of payment per page. It's a good practice to increase this number if you know that you
will need a lot of payments.
:type per_page: int|None
:return A collection of payment
:rtype resources.APIResourceCollection
"""
# Comprehension dict are not supported in Python 2.6-. You can use this commented line instead of the current
# line when you drop support for Python 2.6.
# pagination = {key: value for (key, value) in [('page', page), ('per_page', per_page)] if value}
pagination = dict((key, value) for (key, value) in [('page', page), ('per_page', per_page)] if value)
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.PAYMENT_RESOURCE, pagination=pagination))
return resources.APIResourceCollection(resources.Payment, **response) |
def update():
'''
Execute an svn update on all of the repos
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'svnfs'}
# _clear_old_remotes runs init(), so use the value from there to avoid a
# second init()
data['changed'], repos = _clear_old_remotes()
for repo in repos:
if os.path.exists(repo['lockfile']):
log.warning(
'Update lockfile is present for svnfs remote %s, skipping. '
'If this warning persists, it is possible that the update '
'process was interrupted. Removing %s or running '
'\'salt-run fileserver.clear_lock svnfs\' will allow updates '
'to continue for this remote.', repo['url'], repo['lockfile']
)
continue
_, errors = lock(repo)
if errors:
log.error(
'Unable to set update lock for svnfs remote %s, skipping.',
repo['url']
)
continue
log.debug('svnfs is fetching from %s', repo['url'])
old_rev = _rev(repo)
try:
CLIENT.update(repo['repo'])
except pysvn._pysvn.ClientError as exc:
log.error(
'Error updating svnfs remote %s (cachedir: %s): %s',
repo['url'], repo['cachedir'], exc
)
new_rev = _rev(repo)
if any((x is None for x in (old_rev, new_rev))):
# There were problems getting the revision ID
continue
if new_rev != old_rev:
data['changed'] = True
clear_lock(repo)
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
if data.get('changed', False) is True or not os.path.isfile(env_cache):
env_cachedir = os.path.dirname(env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
with salt.utils.files.fopen(env_cache, 'wb+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to %s', env_cache)
# if there is a change, fire an event
if __opts__.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=False)
event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver'))
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'svnfs/hash'),
find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass | Execute an svn update on all of the repos | Below is the the instruction that describes the task:
### Input:
Execute an svn update on all of the repos
### Response:
def update():
'''
Execute an svn update on all of the repos
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'svnfs'}
# _clear_old_remotes runs init(), so use the value from there to avoid a
# second init()
data['changed'], repos = _clear_old_remotes()
for repo in repos:
if os.path.exists(repo['lockfile']):
log.warning(
'Update lockfile is present for svnfs remote %s, skipping. '
'If this warning persists, it is possible that the update '
'process was interrupted. Removing %s or running '
'\'salt-run fileserver.clear_lock svnfs\' will allow updates '
'to continue for this remote.', repo['url'], repo['lockfile']
)
continue
_, errors = lock(repo)
if errors:
log.error(
'Unable to set update lock for svnfs remote %s, skipping.',
repo['url']
)
continue
log.debug('svnfs is fetching from %s', repo['url'])
old_rev = _rev(repo)
try:
CLIENT.update(repo['repo'])
except pysvn._pysvn.ClientError as exc:
log.error(
'Error updating svnfs remote %s (cachedir: %s): %s',
repo['url'], repo['cachedir'], exc
)
new_rev = _rev(repo)
if any((x is None for x in (old_rev, new_rev))):
# There were problems getting the revision ID
continue
if new_rev != old_rev:
data['changed'] = True
clear_lock(repo)
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
if data.get('changed', False) is True or not os.path.isfile(env_cache):
env_cachedir = os.path.dirname(env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
with salt.utils.files.fopen(env_cache, 'wb+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to %s', env_cache)
# if there is a change, fire an event
if __opts__.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=False)
event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver'))
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'svnfs/hash'),
find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass |
def next_request(self):
'''
Logic to handle getting a new url request, from a bunch of
different queues
'''
t = time.time()
# update the redis queues every so often
if t - self.update_time > self.update_interval:
self.update_time = t
self.create_queues()
self.expire_queues()
# update the ip address every so often
if t - self.update_ip_time > self.ip_update_interval:
self.update_ip_time = t
self.update_ipaddress()
self.report_self()
item = self.find_item()
if item:
self.logger.debug("Found url to crawl {url}" \
.format(url=item['url']))
try:
req = Request(item['url'])
except ValueError:
# need absolute url
# need better url validation here
req = Request('http://' + item['url'])
try:
if 'callback' in item and item['callback'] is not None:
req.callback = getattr(self.spider, item['callback'])
except AttributeError:
self.logger.warn("Unable to find callback method")
try:
if 'errback' in item and item['errback'] is not None:
req.errback = getattr(self.spider, item['errback'])
except AttributeError:
self.logger.warn("Unable to find errback method")
if 'meta' in item:
item = item['meta']
# defaults not in schema
if 'curdepth' not in item:
item['curdepth'] = 0
if "retry_times" not in item:
item['retry_times'] = 0
for key in list(item.keys()):
req.meta[key] = item[key]
# extra check to add items to request
if 'useragent' in item and item['useragent'] is not None:
req.headers['User-Agent'] = item['useragent']
if 'cookie' in item and item['cookie'] is not None:
if isinstance(item['cookie'], dict):
req.cookies = item['cookie']
elif isinstance(item['cookie'], basestring):
req.cookies = self.parse_cookie(item['cookie'])
return req
return None | Logic to handle getting a new url request, from a bunch of
different queues | Below is the the instruction that describes the task:
### Input:
Logic to handle getting a new url request, from a bunch of
different queues
### Response:
def next_request(self):
'''
Logic to handle getting a new url request, from a bunch of
different queues
'''
t = time.time()
# update the redis queues every so often
if t - self.update_time > self.update_interval:
self.update_time = t
self.create_queues()
self.expire_queues()
# update the ip address every so often
if t - self.update_ip_time > self.ip_update_interval:
self.update_ip_time = t
self.update_ipaddress()
self.report_self()
item = self.find_item()
if item:
self.logger.debug("Found url to crawl {url}" \
.format(url=item['url']))
try:
req = Request(item['url'])
except ValueError:
# need absolute url
# need better url validation here
req = Request('http://' + item['url'])
try:
if 'callback' in item and item['callback'] is not None:
req.callback = getattr(self.spider, item['callback'])
except AttributeError:
self.logger.warn("Unable to find callback method")
try:
if 'errback' in item and item['errback'] is not None:
req.errback = getattr(self.spider, item['errback'])
except AttributeError:
self.logger.warn("Unable to find errback method")
if 'meta' in item:
item = item['meta']
# defaults not in schema
if 'curdepth' not in item:
item['curdepth'] = 0
if "retry_times" not in item:
item['retry_times'] = 0
for key in list(item.keys()):
req.meta[key] = item[key]
# extra check to add items to request
if 'useragent' in item and item['useragent'] is not None:
req.headers['User-Agent'] = item['useragent']
if 'cookie' in item and item['cookie'] is not None:
if isinstance(item['cookie'], dict):
req.cookies = item['cookie']
elif isinstance(item['cookie'], basestring):
req.cookies = self.parse_cookie(item['cookie'])
return req
return None |
def main(*argv):
""" main driver of program """
try:
adminUsername = argv[0]
adminPassword = argv[1]
siteURL = argv[2]
deleteUser = argv[3]
# Logic
#
sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword)
admin = arcrest.manageorg.Administration(securityHandler=sh)
community = admin.community
user = community.user
res = user.deleteUser(username=deleteUser)
if res.has_key('success'):
arcpy.SetParameterAsText(4, str(res['success']).lower() == "true")
else:
arcpy.SetParameterAsText(4, False)
del sh
del admin
del community
del user
del res
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) | main driver of program | Below is the the instruction that describes the task:
### Input:
main driver of program
### Response:
def main(*argv):
""" main driver of program """
try:
adminUsername = argv[0]
adminPassword = argv[1]
siteURL = argv[2]
deleteUser = argv[3]
# Logic
#
sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword)
admin = arcrest.manageorg.Administration(securityHandler=sh)
community = admin.community
user = community.user
res = user.deleteUser(username=deleteUser)
if res.has_key('success'):
arcpy.SetParameterAsText(4, str(res['success']).lower() == "true")
else:
arcpy.SetParameterAsText(4, False)
del sh
del admin
del community
del user
del res
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) |
def _strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def.
This is mostly a utility function for graph(), and also originates here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
"""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>"%size)
return strip_def | Strip large constant values from graph_def.
This is mostly a utility function for graph(), and also originates here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb | Below is the the instruction that describes the task:
### Input:
Strip large constant values from graph_def.
This is mostly a utility function for graph(), and also originates here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
### Response:
def _strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def.
This is mostly a utility function for graph(), and also originates here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
"""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>"%size)
return strip_def |
def bind(self, callback):
"""Bind a ``callback`` to this event.
"""
handlers = self._handlers
if self._self is None:
raise RuntimeError('%s already fired, cannot add callbacks' % self)
if handlers is None:
handlers = []
self._handlers = handlers
handlers.append(callback) | Bind a ``callback`` to this event. | Below is the the instruction that describes the task:
### Input:
Bind a ``callback`` to this event.
### Response:
def bind(self, callback):
"""Bind a ``callback`` to this event.
"""
handlers = self._handlers
if self._self is None:
raise RuntimeError('%s already fired, cannot add callbacks' % self)
if handlers is None:
handlers = []
self._handlers = handlers
handlers.append(callback) |
def broadcast_tx(self, address, amount, secret, secondsecret=None, vendorfield=''):
"""broadcasts a transaction to the peerslist using ark-js library"""
peer = random.choice(self.PEERS)
park = Park(
peer,
4001,
constants.ARK_NETHASH,
'1.1.1'
)
return park.transactions().create(address, str(amount), vendorfield, secret, secondsecret) | broadcasts a transaction to the peerslist using ark-js library | Below is the the instruction that describes the task:
### Input:
broadcasts a transaction to the peerslist using ark-js library
### Response:
def broadcast_tx(self, address, amount, secret, secondsecret=None, vendorfield=''):
"""broadcasts a transaction to the peerslist using ark-js library"""
peer = random.choice(self.PEERS)
park = Park(
peer,
4001,
constants.ARK_NETHASH,
'1.1.1'
)
return park.transactions().create(address, str(amount), vendorfield, secret, secondsecret) |
def _nanmean_ddof_object(ddof, value, axis=None, **kwargs):
""" In house nanmean. ddof argument will be used in _nanvar method """
from .duck_array_ops import (count, fillna, _dask_or_eager_func,
where_method)
valid_count = count(value, axis=axis)
value = fillna(value, 0)
# As dtype inference is impossible for object dtype, we assume float
# https://github.com/dask/dask/issues/3162
dtype = kwargs.pop('dtype', None)
if dtype is None and value.dtype.kind == 'O':
dtype = value.dtype if value.dtype.kind in ['cf'] else float
data = _dask_or_eager_func('sum')(value, axis=axis, dtype=dtype, **kwargs)
data = data / (valid_count - ddof)
return where_method(data, valid_count != 0) | In house nanmean. ddof argument will be used in _nanvar method | Below is the the instruction that describes the task:
### Input:
In house nanmean. ddof argument will be used in _nanvar method
### Response:
def _nanmean_ddof_object(ddof, value, axis=None, **kwargs):
""" In house nanmean. ddof argument will be used in _nanvar method """
from .duck_array_ops import (count, fillna, _dask_or_eager_func,
where_method)
valid_count = count(value, axis=axis)
value = fillna(value, 0)
# As dtype inference is impossible for object dtype, we assume float
# https://github.com/dask/dask/issues/3162
dtype = kwargs.pop('dtype', None)
if dtype is None and value.dtype.kind == 'O':
dtype = value.dtype if value.dtype.kind in ['cf'] else float
data = _dask_or_eager_func('sum')(value, axis=axis, dtype=dtype, **kwargs)
data = data / (valid_count - ddof)
return where_method(data, valid_count != 0) |
def PermissiveDict(fields=None):
'''A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker.
'''
if fields:
check_user_facing_fields_dict(fields, 'PermissiveDict')
class _PermissiveDict(_ConfigComposite):
def __init__(self):
key = 'PermissiveDict.' + str(DictCounter.get_next_count())
super(_PermissiveDict, self).__init__(
name=None,
key=key,
fields=fields or dict(),
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
@property
def is_permissive_composite(self):
return True
return _PermissiveDict | A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker. | Below is the the instruction that describes the task:
### Input:
A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker.
### Response:
def PermissiveDict(fields=None):
'''A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker.
'''
if fields:
check_user_facing_fields_dict(fields, 'PermissiveDict')
class _PermissiveDict(_ConfigComposite):
def __init__(self):
key = 'PermissiveDict.' + str(DictCounter.get_next_count())
super(_PermissiveDict, self).__init__(
name=None,
key=key,
fields=fields or dict(),
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
@property
def is_permissive_composite(self):
return True
return _PermissiveDict |
def make_insert(cls, table, insert_tuple):
"""
[Deprecated] Make INSERT query.
:param str table: Table name of executing the query.
:param list/tuple insert_tuple: Insertion data.
:return: Query of SQLite.
:rtype: str
:raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|.
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
"""
validate_table_name(table)
table = Table(table)
if typepy.is_empty_sequence(insert_tuple):
raise ValueError("empty insert list/tuple")
return "INSERT INTO {:s} VALUES ({:s})".format(
table, ",".join(["?" for _i in insert_tuple])
) | [Deprecated] Make INSERT query.
:param str table: Table name of executing the query.
:param list/tuple insert_tuple: Insertion data.
:return: Query of SQLite.
:rtype: str
:raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|.
:raises simplesqlite.NameValidationError:
|raises_validate_table_name| | Below is the the instruction that describes the task:
### Input:
[Deprecated] Make INSERT query.
:param str table: Table name of executing the query.
:param list/tuple insert_tuple: Insertion data.
:return: Query of SQLite.
:rtype: str
:raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|.
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
### Response:
def make_insert(cls, table, insert_tuple):
"""
[Deprecated] Make INSERT query.
:param str table: Table name of executing the query.
:param list/tuple insert_tuple: Insertion data.
:return: Query of SQLite.
:rtype: str
:raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|.
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
"""
validate_table_name(table)
table = Table(table)
if typepy.is_empty_sequence(insert_tuple):
raise ValueError("empty insert list/tuple")
return "INSERT INTO {:s} VALUES ({:s})".format(
table, ",".join(["?" for _i in insert_tuple])
) |
def p_Interface(p):
"""Interface : interface IDENTIFIER Inheritance "{" InterfaceMembers "}" ";"
"""
p[0] = model.Interface(name=p[2], parent=p[3], members=p[5]) | Interface : interface IDENTIFIER Inheritance "{" InterfaceMembers "}" ";" | Below is the the instruction that describes the task:
### Input:
Interface : interface IDENTIFIER Inheritance "{" InterfaceMembers "}" ";"
### Response:
def p_Interface(p):
"""Interface : interface IDENTIFIER Inheritance "{" InterfaceMembers "}" ";"
"""
p[0] = model.Interface(name=p[2], parent=p[3], members=p[5]) |
def format_numbers(data, headers, column_types=(), integer_format=None,
float_format=None, **_):
"""Format numbers according to a format specification.
This uses Python's format specification to format numbers of the following
types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and
:class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more
information about the format strings.
.. NOTE::
A column is only formatted if all of its values are the same type
(except for :data:`None`).
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param iterable column_types: The columns' type objects (e.g. int or float).
:param str integer_format: The format string to use for integer columns.
:param str float_format: The format string to use for float columns.
:return: The processed data and headers.
:rtype: tuple
"""
if (integer_format is None and float_format is None) or not column_types:
return iter(data), headers
def _format_number(field, column_type):
if integer_format and column_type is int and type(field) in int_types:
return format(field, integer_format)
elif float_format and column_type is float and type(field) in float_types:
return format(field, float_format)
return field
data = ([_format_number(v, column_types[i]) for i, v in enumerate(row)] for row in data)
return data, headers | Format numbers according to a format specification.
This uses Python's format specification to format numbers of the following
types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and
:class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more
information about the format strings.
.. NOTE::
A column is only formatted if all of its values are the same type
(except for :data:`None`).
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param iterable column_types: The columns' type objects (e.g. int or float).
:param str integer_format: The format string to use for integer columns.
:param str float_format: The format string to use for float columns.
:return: The processed data and headers.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Format numbers according to a format specification.
This uses Python's format specification to format numbers of the following
types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and
:class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more
information about the format strings.
.. NOTE::
A column is only formatted if all of its values are the same type
(except for :data:`None`).
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param iterable column_types: The columns' type objects (e.g. int or float).
:param str integer_format: The format string to use for integer columns.
:param str float_format: The format string to use for float columns.
:return: The processed data and headers.
:rtype: tuple
### Response:
def format_numbers(data, headers, column_types=(), integer_format=None,
float_format=None, **_):
"""Format numbers according to a format specification.
This uses Python's format specification to format numbers of the following
types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and
:class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more
information about the format strings.
.. NOTE::
A column is only formatted if all of its values are the same type
(except for :data:`None`).
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param iterable column_types: The columns' type objects (e.g. int or float).
:param str integer_format: The format string to use for integer columns.
:param str float_format: The format string to use for float columns.
:return: The processed data and headers.
:rtype: tuple
"""
if (integer_format is None and float_format is None) or not column_types:
return iter(data), headers
def _format_number(field, column_type):
if integer_format and column_type is int and type(field) in int_types:
return format(field, integer_format)
elif float_format and column_type is float and type(field) in float_types:
return format(field, float_format)
return field
data = ([_format_number(v, column_types[i]) for i, v in enumerate(row)] for row in data)
return data, headers |
def django_pre_2_0_table_context(
request,
table,
links=None,
paginate_by=None,
page=None,
extra_context=None,
paginator=None,
show_hits=False,
hit_label='Items'):
"""
:type table: Table
"""
if extra_context is None: # pragma: no cover
extra_context = {}
assert table.data is not None
links, grouped_links = evaluate_and_group_links(links, table=table)
base_context = {
'links': links,
'grouped_links': grouped_links,
'table': table,
}
if paginate_by:
try:
paginate_by = int(request.GET.get('page_size', paginate_by))
except ValueError: # pragma: no cover
pass
if paginator is None:
paginator = Paginator(table.data, paginate_by)
object_list = None
else: # pragma: no cover
object_list = table.data
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
if page < 1: # pragma: no cover
page = 1
if page > paginator.num_pages: # pragma: no cover
page = paginator.num_pages
if object_list is None:
table.data = paginator.page(page).object_list
except (InvalidPage, ValueError): # pragma: no cover
if page == 1:
table.data = []
else:
raise Http404
base_context.update({
'request': request,
'is_paginated': paginator.num_pages > 1,
'results_per_page': paginate_by,
'has_next': paginator.num_pages > page,
'has_previous': page > 1,
'page_size': paginate_by,
'page': page,
'next': page + 1,
'previous': page - 1,
'pages': paginator.num_pages,
'hits': paginator.count,
'show_hits': show_hits,
'hit_label': hit_label})
else: # pragma: no cover
base_context.update({
'is_paginated': False})
base_context.update(extra_context)
return base_context | :type table: Table | Below is the the instruction that describes the task:
### Input:
:type table: Table
### Response:
def django_pre_2_0_table_context(
request,
table,
links=None,
paginate_by=None,
page=None,
extra_context=None,
paginator=None,
show_hits=False,
hit_label='Items'):
"""
:type table: Table
"""
if extra_context is None: # pragma: no cover
extra_context = {}
assert table.data is not None
links, grouped_links = evaluate_and_group_links(links, table=table)
base_context = {
'links': links,
'grouped_links': grouped_links,
'table': table,
}
if paginate_by:
try:
paginate_by = int(request.GET.get('page_size', paginate_by))
except ValueError: # pragma: no cover
pass
if paginator is None:
paginator = Paginator(table.data, paginate_by)
object_list = None
else: # pragma: no cover
object_list = table.data
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
if page < 1: # pragma: no cover
page = 1
if page > paginator.num_pages: # pragma: no cover
page = paginator.num_pages
if object_list is None:
table.data = paginator.page(page).object_list
except (InvalidPage, ValueError): # pragma: no cover
if page == 1:
table.data = []
else:
raise Http404
base_context.update({
'request': request,
'is_paginated': paginator.num_pages > 1,
'results_per_page': paginate_by,
'has_next': paginator.num_pages > page,
'has_previous': page > 1,
'page_size': paginate_by,
'page': page,
'next': page + 1,
'previous': page - 1,
'pages': paginator.num_pages,
'hits': paginator.count,
'show_hits': show_hits,
'hit_label': hit_label})
else: # pragma: no cover
base_context.update({
'is_paginated': False})
base_context.update(extra_context)
return base_context |
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
_check_kwargs(self.y, tfms, **kwargs)
self.tfm_y=True
if tfms is None:
self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms)))
self.tfmargs_y = {**self.tfmargs, **kwargs}
else:
tfms = list(filter(lambda t: t.use_on_y, tfms))
self.tfms_y,self.tfmargs_y = tfms,kwargs
return self | Set `tfms` to be applied to the targets only. | Below is the the instruction that describes the task:
### Input:
Set `tfms` to be applied to the targets only.
### Response:
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
_check_kwargs(self.y, tfms, **kwargs)
self.tfm_y=True
if tfms is None:
self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms)))
self.tfmargs_y = {**self.tfmargs, **kwargs}
else:
tfms = list(filter(lambda t: t.use_on_y, tfms))
self.tfms_y,self.tfmargs_y = tfms,kwargs
return self |
def get_block_info(self, blocktype, db_number):
"""Returns the block information for the specified block."""
blocktype = snap7.snap7types.block_types.get(blocktype)
if not blocktype:
raise Snap7Exception("The blocktype parameter was invalid")
logger.debug("retrieving block info for block %s of type %s" %
(db_number, blocktype))
data = TS7BlockInfo()
result = self.library.Cli_GetAgBlockInfo(
self.pointer, blocktype,
db_number, byref(data))
check_error(result, context="client")
return data | Returns the block information for the specified block. | Below is the the instruction that describes the task:
### Input:
Returns the block information for the specified block.
### Response:
def get_block_info(self, blocktype, db_number):
"""Returns the block information for the specified block."""
blocktype = snap7.snap7types.block_types.get(blocktype)
if not blocktype:
raise Snap7Exception("The blocktype parameter was invalid")
logger.debug("retrieving block info for block %s of type %s" %
(db_number, blocktype))
data = TS7BlockInfo()
result = self.library.Cli_GetAgBlockInfo(
self.pointer, blocktype,
db_number, byref(data))
check_error(result, context="client")
return data |
def CheckBlobsExist(self, blob_ids):
"""Check if blobs for the given digests already exist."""
res = {blob_id: False for blob_id in blob_ids}
urns = {self._BlobUrn(blob_id): blob_id for blob_id in blob_ids}
existing = aff4.FACTORY.MultiOpen(
urns, aff4_type=aff4.AFF4MemoryStreamBase, mode="r")
for blob in existing:
res[urns[blob.urn]] = True
return res | Check if blobs for the given digests already exist. | Below is the the instruction that describes the task:
### Input:
Check if blobs for the given digests already exist.
### Response:
def CheckBlobsExist(self, blob_ids):
"""Check if blobs for the given digests already exist."""
res = {blob_id: False for blob_id in blob_ids}
urns = {self._BlobUrn(blob_id): blob_id for blob_id in blob_ids}
existing = aff4.FACTORY.MultiOpen(
urns, aff4_type=aff4.AFF4MemoryStreamBase, mode="r")
for blob in existing:
res[urns[blob.urn]] = True
return res |
def _extractCreations(self, dataSets):
"""
Find the elements of C{dataSets} which represent the creation of new
objects.
@param dataSets: C{list} of C{dict} mapping C{unicode} form submission
keys to form submission values.
@return: iterator of C{tuple}s with the first element giving the opaque
identifier of an object which is to be created and the second
element giving a C{dict} of all the other creation arguments.
"""
for dataSet in dataSets:
modelObject = self._objectFromID(dataSet[self._IDENTIFIER_KEY])
if modelObject is self._NO_OBJECT_MARKER:
dataCopy = dataSet.copy()
identifier = dataCopy.pop(self._IDENTIFIER_KEY)
yield identifier, dataCopy | Find the elements of C{dataSets} which represent the creation of new
objects.
@param dataSets: C{list} of C{dict} mapping C{unicode} form submission
keys to form submission values.
@return: iterator of C{tuple}s with the first element giving the opaque
identifier of an object which is to be created and the second
element giving a C{dict} of all the other creation arguments. | Below is the the instruction that describes the task:
### Input:
Find the elements of C{dataSets} which represent the creation of new
objects.
@param dataSets: C{list} of C{dict} mapping C{unicode} form submission
keys to form submission values.
@return: iterator of C{tuple}s with the first element giving the opaque
identifier of an object which is to be created and the second
element giving a C{dict} of all the other creation arguments.
### Response:
def _extractCreations(self, dataSets):
"""
Find the elements of C{dataSets} which represent the creation of new
objects.
@param dataSets: C{list} of C{dict} mapping C{unicode} form submission
keys to form submission values.
@return: iterator of C{tuple}s with the first element giving the opaque
identifier of an object which is to be created and the second
element giving a C{dict} of all the other creation arguments.
"""
for dataSet in dataSets:
modelObject = self._objectFromID(dataSet[self._IDENTIFIER_KEY])
if modelObject is self._NO_OBJECT_MARKER:
dataCopy = dataSet.copy()
identifier = dataCopy.pop(self._IDENTIFIER_KEY)
yield identifier, dataCopy |
def _mod_defpriv_opts(object_type, defprivileges):
'''
Format options
'''
object_type = object_type.lower()
defprivileges = '' if defprivileges is None else defprivileges
_defprivs = re.split(r'\s?,\s?', defprivileges.upper())
return object_type, defprivileges, _defprivs | Format options | Below is the the instruction that describes the task:
### Input:
Format options
### Response:
def _mod_defpriv_opts(object_type, defprivileges):
'''
Format options
'''
object_type = object_type.lower()
defprivileges = '' if defprivileges is None else defprivileges
_defprivs = re.split(r'\s?,\s?', defprivileges.upper())
return object_type, defprivileges, _defprivs |
def walkable(self, x, y):
"""
check, if the tile is inside grid and if it is set as walkable
"""
return self.inside(x, y) and self.nodes[y][x].walkable | check, if the tile is inside grid and if it is set as walkable | Below is the the instruction that describes the task:
### Input:
check, if the tile is inside grid and if it is set as walkable
### Response:
def walkable(self, x, y):
"""
check, if the tile is inside grid and if it is set as walkable
"""
return self.inside(x, y) and self.nodes[y][x].walkable |
def process_pem(self, data, name):
"""
PEM processing - splitting further by the type of the records
:param data:
:param name:
:return:
"""
try:
ret = []
data = to_string(data)
parts = re.split(r'-----BEGIN', data)
if len(parts) == 0:
return None
if len(parts[0]) == 0:
parts.pop(0)
crt_arr = ['-----BEGIN' + x for x in parts]
for idx, pem_rec in enumerate(crt_arr):
pem_rec = pem_rec.strip()
if len(pem_rec) == 0:
continue
if startswith(pem_rec, '-----BEGIN CERTIFICATE REQUEST'):
return self.process_pem_csr(pem_rec, name, idx)
elif startswith(pem_rec, '-----BEGIN CERTIF'):
return self.process_pem_cert(pem_rec, name, idx)
elif startswith(pem_rec, '-----BEGIN '): # fallback
return self.process_pem_rsakey(pem_rec, name, idx)
return ret
except Exception as e:
logger.debug('Exception processing PEM file %s : %s' % (name, e))
self.trace_logger.log(e)
return None | PEM processing - splitting further by the type of the records
:param data:
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
PEM processing - splitting further by the type of the records
:param data:
:param name:
:return:
### Response:
def process_pem(self, data, name):
"""
PEM processing - splitting further by the type of the records
:param data:
:param name:
:return:
"""
try:
ret = []
data = to_string(data)
parts = re.split(r'-----BEGIN', data)
if len(parts) == 0:
return None
if len(parts[0]) == 0:
parts.pop(0)
crt_arr = ['-----BEGIN' + x for x in parts]
for idx, pem_rec in enumerate(crt_arr):
pem_rec = pem_rec.strip()
if len(pem_rec) == 0:
continue
if startswith(pem_rec, '-----BEGIN CERTIFICATE REQUEST'):
return self.process_pem_csr(pem_rec, name, idx)
elif startswith(pem_rec, '-----BEGIN CERTIF'):
return self.process_pem_cert(pem_rec, name, idx)
elif startswith(pem_rec, '-----BEGIN '): # fallback
return self.process_pem_rsakey(pem_rec, name, idx)
return ret
except Exception as e:
logger.debug('Exception processing PEM file %s : %s' % (name, e))
self.trace_logger.log(e)
return None |
def apply_upstring(upstring, component_list):
"""Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method.
"""
assert len(upstring) == len(component_list)
def add_up_key(comp_dict, up_indicator):
assert up_indicator == 'U' or up_indicator == "_"
comp_dict['up'] = up_indicator == 'U'
for comp_dict, up_indicator in zip(component_list, upstring):
add_up_key(comp_dict, up_indicator) | Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method. | Below is the the instruction that describes the task:
### Input:
Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method.
### Response:
def apply_upstring(upstring, component_list):
"""Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method.
"""
assert len(upstring) == len(component_list)
def add_up_key(comp_dict, up_indicator):
assert up_indicator == 'U' or up_indicator == "_"
comp_dict['up'] = up_indicator == 'U'
for comp_dict, up_indicator in zip(component_list, upstring):
add_up_key(comp_dict, up_indicator) |
def load_bytecode_definitions(*, path=None) -> dict:
"""Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions.
"""
if path is not None:
with open(path, 'rb') as file_in:
j = json.load(file_in)
else:
try:
j = json.loads(pkgutil.get_data('jawa.util', 'bytecode.json'))
except json.JSONDecodeError:
# Unfortunately our best way to handle missing/malformed/empty
# bytecode.json files since it may not actually be backed by a
# "real" file.
return {}
for definition in j.values():
# If the entry has any operands take the text labels and convert
# them into pre-cached struct objects and operand types.
operands = definition['operands']
if operands:
definition['operands'] = [
[getattr(OperandFmts, oo[0]), OperandTypes[oo[1]]]
for oo in operands
]
# Return one dict that contains both mnemonic keys and opcode keys.
return {**j, **{v['op']: v for v in j.values()}} | Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions. | Below is the the instruction that describes the task:
### Input:
Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions.
### Response:
def load_bytecode_definitions(*, path=None) -> dict:
"""Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions.
"""
if path is not None:
with open(path, 'rb') as file_in:
j = json.load(file_in)
else:
try:
j = json.loads(pkgutil.get_data('jawa.util', 'bytecode.json'))
except json.JSONDecodeError:
# Unfortunately our best way to handle missing/malformed/empty
# bytecode.json files since it may not actually be backed by a
# "real" file.
return {}
for definition in j.values():
# If the entry has any operands take the text labels and convert
# them into pre-cached struct objects and operand types.
operands = definition['operands']
if operands:
definition['operands'] = [
[getattr(OperandFmts, oo[0]), OperandTypes[oo[1]]]
for oo in operands
]
# Return one dict that contains both mnemonic keys and opcode keys.
return {**j, **{v['op']: v for v in j.values()}} |
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z)) | Return a new :class:`PseudoTable` with pseudos sorted by Z | Below is the the instruction that describes the task:
### Input:
Return a new :class:`PseudoTable` with pseudos sorted by Z
### Response:
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z)) |
def scroll_to_beginning_vertically(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toBeginning(steps=steps) | Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details. | Below is the the instruction that describes the task:
### Input:
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
### Response:
def scroll_to_beginning_vertically(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toBeginning(steps=steps) |
def listPrimaryDatasets(self, primary_ds_name="", primary_ds_type=""):
"""
Returns all primary dataset if primary_ds_name or primary_ds_type are not passed.
"""
conn = self.dbi.connection()
try:
result = self.primdslist.execute(conn, primary_ds_name, primary_ds_type)
if conn: conn.close()
return result
finally:
if conn:
conn.close() | Returns all primary dataset if primary_ds_name or primary_ds_type are not passed. | Below is the the instruction that describes the task:
### Input:
Returns all primary dataset if primary_ds_name or primary_ds_type are not passed.
### Response:
def listPrimaryDatasets(self, primary_ds_name="", primary_ds_type=""):
"""
Returns all primary dataset if primary_ds_name or primary_ds_type are not passed.
"""
conn = self.dbi.connection()
try:
result = self.primdslist.execute(conn, primary_ds_name, primary_ds_type)
if conn: conn.close()
return result
finally:
if conn:
conn.close() |
def binom_est_error(p, N, hedge = float(0)):
r"""
"""
# asymptotic np.sqrt(p * (1 - p) / N)
return np.sqrt(p*(1-p)/(N+2*hedge+1)) | r""" | Below is the the instruction that describes the task:
### Input:
r"""
### Response:
def binom_est_error(p, N, hedge = float(0)):
r"""
"""
# asymptotic np.sqrt(p * (1 - p) / N)
return np.sqrt(p*(1-p)/(N+2*hedge+1)) |
def requestPositionUpdates(self, subscribe=True):
""" Request/cancel request real-time position data for all accounts. """
if self.subscribePositions != subscribe:
self.subscribePositions = subscribe
if subscribe == True:
self.ibConn.reqPositions()
else:
self.ibConn.cancelPositions() | Request/cancel request real-time position data for all accounts. | Below is the the instruction that describes the task:
### Input:
Request/cancel request real-time position data for all accounts.
### Response:
def requestPositionUpdates(self, subscribe=True):
""" Request/cancel request real-time position data for all accounts. """
if self.subscribePositions != subscribe:
self.subscribePositions = subscribe
if subscribe == True:
self.ibConn.reqPositions()
else:
self.ibConn.cancelPositions() |
def transform(fields, function, *tables):
"Return a new table based on other tables and a transformation function"
new_table = Table(fields=fields)
for table in tables:
for row in filter(bool, map(lambda row: function(row, table), table)):
new_table.append(row)
return new_table | Return a new table based on other tables and a transformation function | Below is the the instruction that describes the task:
### Input:
Return a new table based on other tables and a transformation function
### Response:
def transform(fields, function, *tables):
"Return a new table based on other tables and a transformation function"
new_table = Table(fields=fields)
for table in tables:
for row in filter(bool, map(lambda row: function(row, table), table)):
new_table.append(row)
return new_table |
def hv_mv_station_load(network):
"""
Checks for over-loading of HV/MV station.
Parameters
----------
network : :class:`~.grid.network.Network`
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded HV/MV stations, their apparent power
at maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations of type
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Over-load is determined based on allowed load factors for feed-in and
load cases that are defined in the config file 'config_grid_expansion' in
section 'grid_expansion_load_factors'.
"""
crit_stations = pd.DataFrame()
crit_stations = _station_load(network, network.mv_grid.station,
crit_stations)
if not crit_stations.empty:
logger.debug('==> HV/MV station has load issues.')
else:
logger.debug('==> No HV/MV station load issues.')
return crit_stations | Checks for over-loading of HV/MV station.
Parameters
----------
network : :class:`~.grid.network.Network`
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded HV/MV stations, their apparent power
at maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations of type
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Over-load is determined based on allowed load factors for feed-in and
load cases that are defined in the config file 'config_grid_expansion' in
section 'grid_expansion_load_factors'. | Below is the the instruction that describes the task:
### Input:
Checks for over-loading of HV/MV station.
Parameters
----------
network : :class:`~.grid.network.Network`
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded HV/MV stations, their apparent power
at maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations of type
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Over-load is determined based on allowed load factors for feed-in and
load cases that are defined in the config file 'config_grid_expansion' in
section 'grid_expansion_load_factors'.
### Response:
def hv_mv_station_load(network):
"""
Checks for over-loading of HV/MV station.
Parameters
----------
network : :class:`~.grid.network.Network`
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded HV/MV stations, their apparent power
at maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations of type
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Over-load is determined based on allowed load factors for feed-in and
load cases that are defined in the config file 'config_grid_expansion' in
section 'grid_expansion_load_factors'.
"""
crit_stations = pd.DataFrame()
crit_stations = _station_load(network, network.mv_grid.station,
crit_stations)
if not crit_stations.empty:
logger.debug('==> HV/MV station has load issues.')
else:
logger.debug('==> No HV/MV station load issues.')
return crit_stations |
def to_utc_datetime(self, value):
"""
from value to datetime with tzinfo format (datetime.datetime instance)
"""
value = self.to_naive_datetime(value)
if timezone.is_naive(value):
value = timezone.make_aware(value, timezone.utc)
else:
value = timezone.localtime(value, timezone.utc)
return value | from value to datetime with tzinfo format (datetime.datetime instance) | Below is the the instruction that describes the task:
### Input:
from value to datetime with tzinfo format (datetime.datetime instance)
### Response:
def to_utc_datetime(self, value):
"""
from value to datetime with tzinfo format (datetime.datetime instance)
"""
value = self.to_naive_datetime(value)
if timezone.is_naive(value):
value = timezone.make_aware(value, timezone.utc)
else:
value = timezone.localtime(value, timezone.utc)
return value |
def is_clockwise(vertices):
""" Evaluate whether vertices are in clockwise order.
Args:
vertices: list of vertices (x, y) in polygon.
Returns:
True: clockwise, False: counter-clockwise
Raises:
ValueError: the polygon is complex or overlapped.
"""
it = iterator.consecutive(cycle(vertices), 3)
clockwise = 0
counter = 0
for _ in range(len(vertices)):
p0, p1, p2 = next(it)
cross = cross_product(p1, p2, p0)
int_angle = interior_angle(p0, p2, p1) # raises ValueError
if cross < 0:
clockwise += int_angle
counter += 2 * pi - int_angle
else:
clockwise += 2 * pi - int_angle
counter += int_angle
if round(clockwise / pi) == len(vertices) - 2:
return True
elif round(counter / pi) == len(vertices) - 2:
return False
else:
raise ValueError("the polygon is complex or overlapped") | Evaluate whether vertices are in clockwise order.
Args:
vertices: list of vertices (x, y) in polygon.
Returns:
True: clockwise, False: counter-clockwise
Raises:
ValueError: the polygon is complex or overlapped. | Below is the the instruction that describes the task:
### Input:
Evaluate whether vertices are in clockwise order.
Args:
vertices: list of vertices (x, y) in polygon.
Returns:
True: clockwise, False: counter-clockwise
Raises:
ValueError: the polygon is complex or overlapped.
### Response:
def is_clockwise(vertices):
""" Evaluate whether vertices are in clockwise order.
Args:
vertices: list of vertices (x, y) in polygon.
Returns:
True: clockwise, False: counter-clockwise
Raises:
ValueError: the polygon is complex or overlapped.
"""
it = iterator.consecutive(cycle(vertices), 3)
clockwise = 0
counter = 0
for _ in range(len(vertices)):
p0, p1, p2 = next(it)
cross = cross_product(p1, p2, p0)
int_angle = interior_angle(p0, p2, p1) # raises ValueError
if cross < 0:
clockwise += int_angle
counter += 2 * pi - int_angle
else:
clockwise += 2 * pi - int_angle
counter += int_angle
if round(clockwise / pi) == len(vertices) - 2:
return True
elif round(counter / pi) == len(vertices) - 2:
return False
else:
raise ValueError("the polygon is complex or overlapped") |
def copy(self, deep=False):
"""Returns a copy of the list
Parameters
----------
deep: bool
If False (default), only the list is copied and not the contained
arrays, otherwise the contained arrays are deep copied"""
if not deep:
return self.__class__(self[:], attrs=self.attrs.copy(),
auto_update=not bool(self.no_auto_update))
else:
return self.__class__(
[arr.psy.copy(deep) for arr in self], attrs=self.attrs.copy(),
auto_update=not bool(self.auto_update)) | Returns a copy of the list
Parameters
----------
deep: bool
If False (default), only the list is copied and not the contained
arrays, otherwise the contained arrays are deep copied | Below is the the instruction that describes the task:
### Input:
Returns a copy of the list
Parameters
----------
deep: bool
If False (default), only the list is copied and not the contained
arrays, otherwise the contained arrays are deep copied
### Response:
def copy(self, deep=False):
"""Returns a copy of the list
Parameters
----------
deep: bool
If False (default), only the list is copied and not the contained
arrays, otherwise the contained arrays are deep copied"""
if not deep:
return self.__class__(self[:], attrs=self.attrs.copy(),
auto_update=not bool(self.no_auto_update))
else:
return self.__class__(
[arr.psy.copy(deep) for arr in self], attrs=self.attrs.copy(),
auto_update=not bool(self.auto_update)) |
def request(self, cmds):
"""
Single Execution element is permitted.
cmds can be a list or single command
"""
if isinstance(cmds, list):
cmd = '\n'.join(cmds)
elif isinstance(cmds, str) or isinstance(cmds, unicode):
cmd = cmds
node = etree.Element(qualify('CLI', BASE_NS_1_0))
etree.SubElement(node, qualify('Execution',
BASE_NS_1_0)).text = cmd
return self._request(node) | Single Execution element is permitted.
cmds can be a list or single command | Below is the the instruction that describes the task:
### Input:
Single Execution element is permitted.
cmds can be a list or single command
### Response:
def request(self, cmds):
"""
Single Execution element is permitted.
cmds can be a list or single command
"""
if isinstance(cmds, list):
cmd = '\n'.join(cmds)
elif isinstance(cmds, str) or isinstance(cmds, unicode):
cmd = cmds
node = etree.Element(qualify('CLI', BASE_NS_1_0))
etree.SubElement(node, qualify('Execution',
BASE_NS_1_0)).text = cmd
return self._request(node) |
def interpolate(self, factor, minGlyph, maxGlyph,
round=True, suppressError=True):
"""
Interpolate the contents of this glyph at location ``factor``
in a linear interpolation between ``minGlyph`` and ``maxGlyph``.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2)
``factor`` may be a :ref:`type-int-float` or a tuple containing
two :ref:`type-int-float` values representing x and y factors.
>>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2)
``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0
in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph`
and will be located at 1.0 in the interpolation range. If ``round``
is ``True``, the contents of the glyph will be rounded to integers
after the interpolation is performed.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True)
This method assumes that ``minGlyph`` and ``maxGlyph`` are completely
compatible with each other for interpolation. If not, any errors
encountered will raise a :class:`FontPartsError`. If ``suppressError``
is ``True``, no exception will be raised and errors will be silently
ignored.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
minGlyph.__class__.__name__))
if not isinstance(maxGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
maxGlyph.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minGlyph, maxGlyph,
round=round, suppressError=suppressError) | Interpolate the contents of this glyph at location ``factor``
in a linear interpolation between ``minGlyph`` and ``maxGlyph``.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2)
``factor`` may be a :ref:`type-int-float` or a tuple containing
two :ref:`type-int-float` values representing x and y factors.
>>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2)
``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0
in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph`
and will be located at 1.0 in the interpolation range. If ``round``
is ``True``, the contents of the glyph will be rounded to integers
after the interpolation is performed.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True)
This method assumes that ``minGlyph`` and ``maxGlyph`` are completely
compatible with each other for interpolation. If not, any errors
encountered will raise a :class:`FontPartsError`. If ``suppressError``
is ``True``, no exception will be raised and errors will be silently
ignored. | Below is the the instruction that describes the task:
### Input:
Interpolate the contents of this glyph at location ``factor``
in a linear interpolation between ``minGlyph`` and ``maxGlyph``.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2)
``factor`` may be a :ref:`type-int-float` or a tuple containing
two :ref:`type-int-float` values representing x and y factors.
>>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2)
``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0
in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph`
and will be located at 1.0 in the interpolation range. If ``round``
is ``True``, the contents of the glyph will be rounded to integers
after the interpolation is performed.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True)
This method assumes that ``minGlyph`` and ``maxGlyph`` are completely
compatible with each other for interpolation. If not, any errors
encountered will raise a :class:`FontPartsError`. If ``suppressError``
is ``True``, no exception will be raised and errors will be silently
ignored.
### Response:
def interpolate(self, factor, minGlyph, maxGlyph,
round=True, suppressError=True):
"""
Interpolate the contents of this glyph at location ``factor``
in a linear interpolation between ``minGlyph`` and ``maxGlyph``.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2)
``factor`` may be a :ref:`type-int-float` or a tuple containing
two :ref:`type-int-float` values representing x and y factors.
>>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2)
``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0
in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph`
and will be located at 1.0 in the interpolation range. If ``round``
is ``True``, the contents of the glyph will be rounded to integers
after the interpolation is performed.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True)
This method assumes that ``minGlyph`` and ``maxGlyph`` are completely
compatible with each other for interpolation. If not, any errors
encountered will raise a :class:`FontPartsError`. If ``suppressError``
is ``True``, no exception will be raised and errors will be silently
ignored.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
minGlyph.__class__.__name__))
if not isinstance(maxGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
maxGlyph.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minGlyph, maxGlyph,
round=round, suppressError=suppressError) |
async def on_shutdown(self, app):
"""
Graceful shutdown handler
See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown
"""
for ws in self.clients.copy():
await ws.close(code=WSCloseCode.GOING_AWAY,
message='Server shutdown')
self.shutdown() | Graceful shutdown handler
See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown | Below is the the instruction that describes the task:
### Input:
Graceful shutdown handler
See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown
### Response:
async def on_shutdown(self, app):
"""
Graceful shutdown handler
See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown
"""
for ws in self.clients.copy():
await ws.close(code=WSCloseCode.GOING_AWAY,
message='Server shutdown')
self.shutdown() |
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v') | Extracts video ID from URL. | Below is the the instruction that describes the task:
### Input:
Extracts video ID from URL.
### Response:
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v') |
def sys_names(self):
"""
Return a list of unique systematic names from OverallSys and HistoSys
"""
names = {}
for osys in self.overall_sys:
names[osys.name] = None
for hsys in self.histo_sys:
names[hsys.name] = None
return names.keys() | Return a list of unique systematic names from OverallSys and HistoSys | Below is the the instruction that describes the task:
### Input:
Return a list of unique systematic names from OverallSys and HistoSys
### Response:
def sys_names(self):
"""
Return a list of unique systematic names from OverallSys and HistoSys
"""
names = {}
for osys in self.overall_sys:
names[osys.name] = None
for hsys in self.histo_sys:
names[hsys.name] = None
return names.keys() |
def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT):
"""
Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param retry: A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: google.cloud.container_v1.types.Cluster
"""
self.log.info(
"Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)",
project_id or self.project_id, self.location, name
)
return self.get_client().get_cluster(project_id=project_id or self.project_id,
zone=self.location,
cluster_id=name,
retry=retry,
timeout=timeout).self_link | Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param retry: A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: google.cloud.container_v1.types.Cluster | Below is the the instruction that describes the task:
### Input:
Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param retry: A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: google.cloud.container_v1.types.Cluster
### Response:
def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT):
"""
Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param retry: A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: google.cloud.container_v1.types.Cluster
"""
self.log.info(
"Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)",
project_id or self.project_id, self.location, name
)
return self.get_client().get_cluster(project_id=project_id or self.project_id,
zone=self.location,
cluster_id=name,
retry=retry,
timeout=timeout).self_link |
def _shallow_clone(self, ref, git_cmd): # pragma: windows no cover
"""Perform a shallow clone of a repository and its submodules """
git_config = 'protocol.version=2'
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
git_cmd('checkout', ref)
git_cmd(
'-c', git_config, 'submodule', 'update', '--init',
'--recursive', '--depth=1',
) | Perform a shallow clone of a repository and its submodules | Below is the the instruction that describes the task:
### Input:
Perform a shallow clone of a repository and its submodules
### Response:
def _shallow_clone(self, ref, git_cmd): # pragma: windows no cover
"""Perform a shallow clone of a repository and its submodules """
git_config = 'protocol.version=2'
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
git_cmd('checkout', ref)
git_cmd(
'-c', git_config, 'submodule', 'update', '--init',
'--recursive', '--depth=1',
) |
def inject_params(self, params):
"""Inject params into sys.argv from secureParams API, AOT, or user provided.
Args:
params (dict): A dictionary containing all parameters that need to be injected as args.
"""
for arg, value in params.items():
cli_arg = '--{}'.format(arg)
if cli_arg in sys.argv:
# arg already passed on the command line
self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg))
continue
# ThreatConnect secure/AOT params should be updated in the future to proper JSON format.
# MultiChoice data should be represented as JSON array and Boolean values should be a
# JSON boolean and not a string.
param_data = self.tcex.install_json_params.get(arg) or {}
if param_data.get('type', '').lower() == 'multichoice':
# update "|" delimited value to a proper array for params that have type of
# MultiChoice.
value = value.split('|')
elif param_data.get('type', '').lower() == 'boolean':
# update value to be a boolean instead of string "true"/"false".
value = self.tcex.utils.to_bool(value)
elif arg in self.tc_bool_args:
value = self.tcex.utils.to_bool(value)
if isinstance(value, (bool)):
# handle bool values as flags (e.g., --flag) with no value
if value is True:
sys.argv.append(cli_arg)
elif isinstance(value, (list)):
for mcv in value:
sys.argv.append('{}={}'.format(cli_arg, mcv))
else:
sys.argv.append('{}={}'.format(cli_arg, value))
# reset default_args now that values have been injected into sys.argv
self._default_args, unknown = self.parser.parse_known_args() # pylint: disable=W0612
# reinitialize logger with new log level and api settings
self.tcex._logger() | Inject params into sys.argv from secureParams API, AOT, or user provided.
Args:
params (dict): A dictionary containing all parameters that need to be injected as args. | Below is the the instruction that describes the task:
### Input:
Inject params into sys.argv from secureParams API, AOT, or user provided.
Args:
params (dict): A dictionary containing all parameters that need to be injected as args.
### Response:
def inject_params(self, params):
"""Inject params into sys.argv from secureParams API, AOT, or user provided.
Args:
params (dict): A dictionary containing all parameters that need to be injected as args.
"""
for arg, value in params.items():
cli_arg = '--{}'.format(arg)
if cli_arg in sys.argv:
# arg already passed on the command line
self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg))
continue
# ThreatConnect secure/AOT params should be updated in the future to proper JSON format.
# MultiChoice data should be represented as JSON array and Boolean values should be a
# JSON boolean and not a string.
param_data = self.tcex.install_json_params.get(arg) or {}
if param_data.get('type', '').lower() == 'multichoice':
# update "|" delimited value to a proper array for params that have type of
# MultiChoice.
value = value.split('|')
elif param_data.get('type', '').lower() == 'boolean':
# update value to be a boolean instead of string "true"/"false".
value = self.tcex.utils.to_bool(value)
elif arg in self.tc_bool_args:
value = self.tcex.utils.to_bool(value)
if isinstance(value, (bool)):
# handle bool values as flags (e.g., --flag) with no value
if value is True:
sys.argv.append(cli_arg)
elif isinstance(value, (list)):
for mcv in value:
sys.argv.append('{}={}'.format(cli_arg, mcv))
else:
sys.argv.append('{}={}'.format(cli_arg, value))
# reset default_args now that values have been injected into sys.argv
self._default_args, unknown = self.parser.parse_known_args() # pylint: disable=W0612
# reinitialize logger with new log level and api settings
self.tcex._logger() |
def send(self, stream=False):
"""Send the HTTP request via Python Requests modules.
This method will send the request to the remote endpoint. It will try to handle
temporary communications issues by retrying the request automatically.
Args:
stream (bool): Boolean to enable stream download.
Returns:
Requests.Response: The Request response
"""
#
# api request (gracefully handle temporary communications issues with the API)
#
try:
response = self.session.request(
self._http_method,
self._url,
auth=self._basic_auth,
data=self._body,
files=self._files,
headers=self._headers,
params=self._payload,
stream=stream,
timeout=self._timeout,
)
except Exception as e:
err = 'Failed making HTTP request ({}).'.format(e)
raise RuntimeError(err)
# self.tcex.log.info(u'URL ({}): {}'.format(self._http_method, response.url))
self.tcex.log.info(u'Status Code: {}'.format(response.status_code))
return response | Send the HTTP request via Python Requests modules.
This method will send the request to the remote endpoint. It will try to handle
temporary communications issues by retrying the request automatically.
Args:
stream (bool): Boolean to enable stream download.
Returns:
Requests.Response: The Request response | Below is the the instruction that describes the task:
### Input:
Send the HTTP request via Python Requests modules.
This method will send the request to the remote endpoint. It will try to handle
temporary communications issues by retrying the request automatically.
Args:
stream (bool): Boolean to enable stream download.
Returns:
Requests.Response: The Request response
### Response:
def send(self, stream=False):
"""Send the HTTP request via Python Requests modules.
This method will send the request to the remote endpoint. It will try to handle
temporary communications issues by retrying the request automatically.
Args:
stream (bool): Boolean to enable stream download.
Returns:
Requests.Response: The Request response
"""
#
# api request (gracefully handle temporary communications issues with the API)
#
try:
response = self.session.request(
self._http_method,
self._url,
auth=self._basic_auth,
data=self._body,
files=self._files,
headers=self._headers,
params=self._payload,
stream=stream,
timeout=self._timeout,
)
except Exception as e:
err = 'Failed making HTTP request ({}).'.format(e)
raise RuntimeError(err)
# self.tcex.log.info(u'URL ({}): {}'.format(self._http_method, response.url))
self.tcex.log.info(u'Status Code: {}'.format(response.status_code))
return response |
def distro_check():
"""Return a string containing the distro package manager."""
distro_data = platform.linux_distribution()
distro = [d.lower() for d in distro_data if d.isalpha()]
if any(['ubuntu' in distro, 'debian' in distro]) is True:
return 'apt'
elif any(['centos' in distro, 'redhat' in distro]) is True:
return 'yum'
elif any(['suse' in distro]) is True:
return 'zypper'
else:
raise AssertionError(
'Distro [ %s ] is unsupported.' % distro
) | Return a string containing the distro package manager. | Below is the the instruction that describes the task:
### Input:
Return a string containing the distro package manager.
### Response:
def distro_check():
"""Return a string containing the distro package manager."""
distro_data = platform.linux_distribution()
distro = [d.lower() for d in distro_data if d.isalpha()]
if any(['ubuntu' in distro, 'debian' in distro]) is True:
return 'apt'
elif any(['centos' in distro, 'redhat' in distro]) is True:
return 'yum'
elif any(['suse' in distro]) is True:
return 'zypper'
else:
raise AssertionError(
'Distro [ %s ] is unsupported.' % distro
) |
def add_link_to_self(self, source, weight):
"""
Create and add a ``Link`` from a source node to ``self``.
Args:
source (Node): The node that will own the new ``Link``
pointing to ``self``
weight (int or float): The weight of the newly created ``Link``
Returns: None
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link_to_self(node_2, 5)
>>> new_link = node_2.link_list[0]
>>> print('{} {}'.format(new_link.target.value, new_link.weight))
One 5
>>> print(new_link)
node.Link instance pointing to node with value "One" with weight 5
"""
# Generalize source to a list to simplify code
if not isinstance(source, list):
source = [source]
for source_node in source:
source_node.add_link(self, weight=weight) | Create and add a ``Link`` from a source node to ``self``.
Args:
source (Node): The node that will own the new ``Link``
pointing to ``self``
weight (int or float): The weight of the newly created ``Link``
Returns: None
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link_to_self(node_2, 5)
>>> new_link = node_2.link_list[0]
>>> print('{} {}'.format(new_link.target.value, new_link.weight))
One 5
>>> print(new_link)
node.Link instance pointing to node with value "One" with weight 5 | Below is the the instruction that describes the task:
### Input:
Create and add a ``Link`` from a source node to ``self``.
Args:
source (Node): The node that will own the new ``Link``
pointing to ``self``
weight (int or float): The weight of the newly created ``Link``
Returns: None
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link_to_self(node_2, 5)
>>> new_link = node_2.link_list[0]
>>> print('{} {}'.format(new_link.target.value, new_link.weight))
One 5
>>> print(new_link)
node.Link instance pointing to node with value "One" with weight 5
### Response:
def add_link_to_self(self, source, weight):
"""
Create and add a ``Link`` from a source node to ``self``.
Args:
source (Node): The node that will own the new ``Link``
pointing to ``self``
weight (int or float): The weight of the newly created ``Link``
Returns: None
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link_to_self(node_2, 5)
>>> new_link = node_2.link_list[0]
>>> print('{} {}'.format(new_link.target.value, new_link.weight))
One 5
>>> print(new_link)
node.Link instance pointing to node with value "One" with weight 5
"""
# Generalize source to a list to simplify code
if not isinstance(source, list):
source = [source]
for source_node in source:
source_node.add_link(self, weight=weight) |
def _author_uid_get(post):
"""Get the UID of the post author.
:param Post post: The post object to determine authorship of
:return: Author UID
:rtype: str
"""
u = post.meta('author.uid')
return u if u else str(current_user.uid) | Get the UID of the post author.
:param Post post: The post object to determine authorship of
:return: Author UID
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the UID of the post author.
:param Post post: The post object to determine authorship of
:return: Author UID
:rtype: str
### Response:
def _author_uid_get(post):
"""Get the UID of the post author.
:param Post post: The post object to determine authorship of
:return: Author UID
:rtype: str
"""
u = post.meta('author.uid')
return u if u else str(current_user.uid) |
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant) | Concatenate and compress CSS files | Below is the the instruction that describes the task:
### Input:
Concatenate and compress CSS files
### Response:
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant) |
def memoize(f):
"""Cache value returned by the function."""
@wraps(f)
def w(*args, **kw):
memoize.mem[f] = v = f(*args, **kw)
return v
return w | Cache value returned by the function. | Below is the the instruction that describes the task:
### Input:
Cache value returned by the function.
### Response:
def memoize(f):
"""Cache value returned by the function."""
@wraps(f)
def w(*args, **kw):
memoize.mem[f] = v = f(*args, **kw)
return v
return w |
def _series_fluoview(self):
"""Return image series in FluoView file."""
pages = self.pages._getlist(validate=False)
mm = self.fluoview_metadata
mmhd = list(reversed(mm['Dimensions']))
axes = ''.join(TIFF.MM_DIMENSIONS.get(i[0].upper(), 'Q')
for i in mmhd if i[1] > 1)
shape = tuple(int(i[1]) for i in mmhd if i[1] > 1)
self.is_uniform = True
return [TiffPageSeries(pages, shape, pages[0].dtype, axes,
name=mm['ImageName'], kind='FluoView')] | Return image series in FluoView file. | Below is the the instruction that describes the task:
### Input:
Return image series in FluoView file.
### Response:
def _series_fluoview(self):
"""Return image series in FluoView file."""
pages = self.pages._getlist(validate=False)
mm = self.fluoview_metadata
mmhd = list(reversed(mm['Dimensions']))
axes = ''.join(TIFF.MM_DIMENSIONS.get(i[0].upper(), 'Q')
for i in mmhd if i[1] > 1)
shape = tuple(int(i[1]) for i in mmhd if i[1] > 1)
self.is_uniform = True
return [TiffPageSeries(pages, shape, pages[0].dtype, axes,
name=mm['ImageName'], kind='FluoView')] |
def arguments(function, extra_arguments=0):
"""Returns the name of all arguments a function takes"""
if not hasattr(function, '__code__'):
return ()
return function.__code__.co_varnames[:function.__code__.co_argcount + extra_arguments] | Returns the name of all arguments a function takes | Below is the the instruction that describes the task:
### Input:
Returns the name of all arguments a function takes
### Response:
def arguments(function, extra_arguments=0):
"""Returns the name of all arguments a function takes"""
if not hasattr(function, '__code__'):
return ()
return function.__code__.co_varnames[:function.__code__.co_argcount + extra_arguments] |
def changelist_view(self, request, extra_context=None):
"""
Inject extra links into template context.
"""
links = []
for action in self.get_extra_actions():
links.append({
'label': self._get_action_label(action),
'href': self._get_action_href(action)
})
extra_context = extra_context or {}
extra_context['extra_links'] = links
return super(ExtraActionsMixin, self).changelist_view(
request, extra_context=extra_context,
) | Inject extra links into template context. | Below is the the instruction that describes the task:
### Input:
Inject extra links into template context.
### Response:
def changelist_view(self, request, extra_context=None):
"""
Inject extra links into template context.
"""
links = []
for action in self.get_extra_actions():
links.append({
'label': self._get_action_label(action),
'href': self._get_action_href(action)
})
extra_context = extra_context or {}
extra_context['extra_links'] = links
return super(ExtraActionsMixin, self).changelist_view(
request, extra_context=extra_context,
) |
def get_followers(self, first_user_id=None):
"""
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=获取关注者列表
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
"""
params = {"access_token": self.token}
if first_user_id:
params["next_openid"] = first_user_id
return self.get(
"https://api.weixin.qq.com/cgi-bin/user/get", params=params
) | 获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=获取关注者列表
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=获取关注者列表
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
### Response:
def get_followers(self, first_user_id=None):
"""
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=获取关注者列表
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
"""
params = {"access_token": self.token}
if first_user_id:
params["next_openid"] = first_user_id
return self.get(
"https://api.weixin.qq.com/cgi-bin/user/get", params=params
) |
def refresh_rooms(self):
"""Calls GET /joined_rooms to refresh rooms list."""
for room_id in self.user_api.get_joined_rooms()["joined_rooms"]:
self._rooms[room_id] = MatrixRoom(room_id, self.user_api) | Calls GET /joined_rooms to refresh rooms list. | Below is the the instruction that describes the task:
### Input:
Calls GET /joined_rooms to refresh rooms list.
### Response:
def refresh_rooms(self):
"""Calls GET /joined_rooms to refresh rooms list."""
for room_id in self.user_api.get_joined_rooms()["joined_rooms"]:
self._rooms[room_id] = MatrixRoom(room_id, self.user_api) |
def infer(msg, mrar=False):
"""Estimate the most likely BDS code of an message.
Args:
msg (String): 28 bytes hexadecimal message string
mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
"""
df = common.df(msg)
if common.allzeros(msg):
return 'EMPTY'
# For ADS-B / Mode-S extended squitter
if df == 17:
tc = common.typecode(msg)
if 1 <= tc <= 4:
return 'BDS08' # indentification and category
if 5 <= tc <= 8:
return 'BDS06' # surface movement
if 9 <= tc <= 18:
return 'BDS05' # airborne position, baro-alt
if tc == 19:
return 'BDS09' # airborne velocity
if 20 <= tc <= 22:
return 'BDS05' # airborne position, gnss-alt
if tc == 28:
return 'BDS61' # aircraft status
if tc == 29:
return 'BDS62' # target state and status
if tc == 31:
return 'BDS65' # operational status
# For Comm-B replies
IS10 = bds10.is10(msg)
IS17 = bds17.is17(msg)
IS20 = bds20.is20(msg)
IS30 = bds30.is30(msg)
IS40 = bds40.is40(msg)
IS50 = bds50.is50(msg)
IS60 = bds60.is60(msg)
IS44 = bds44.is44(msg)
IS45 = bds45.is45(msg)
if mrar:
allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40",
"BDS44", "BDS45", "BDS50", "BDS60"])
mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60]
else:
allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40",
"BDS50", "BDS60"])
mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60]
bds = ','.join(sorted(allbds[mask]))
if len(bds) == 0:
return None
else:
return bds | Estimate the most likely BDS code of an message.
Args:
msg (String): 28 bytes hexadecimal message string
mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.
Returns:
String or None: BDS version, or possible versions, or None if nothing matches. | Below is the the instruction that describes the task:
### Input:
Estimate the most likely BDS code of an message.
Args:
msg (String): 28 bytes hexadecimal message string
mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
### Response:
def infer(msg, mrar=False):
"""Estimate the most likely BDS code of an message.
Args:
msg (String): 28 bytes hexadecimal message string
mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
"""
df = common.df(msg)
if common.allzeros(msg):
return 'EMPTY'
# For ADS-B / Mode-S extended squitter
if df == 17:
tc = common.typecode(msg)
if 1 <= tc <= 4:
return 'BDS08' # indentification and category
if 5 <= tc <= 8:
return 'BDS06' # surface movement
if 9 <= tc <= 18:
return 'BDS05' # airborne position, baro-alt
if tc == 19:
return 'BDS09' # airborne velocity
if 20 <= tc <= 22:
return 'BDS05' # airborne position, gnss-alt
if tc == 28:
return 'BDS61' # aircraft status
if tc == 29:
return 'BDS62' # target state and status
if tc == 31:
return 'BDS65' # operational status
# For Comm-B replies
IS10 = bds10.is10(msg)
IS17 = bds17.is17(msg)
IS20 = bds20.is20(msg)
IS30 = bds30.is30(msg)
IS40 = bds40.is40(msg)
IS50 = bds50.is50(msg)
IS60 = bds60.is60(msg)
IS44 = bds44.is44(msg)
IS45 = bds45.is45(msg)
if mrar:
allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40",
"BDS44", "BDS45", "BDS50", "BDS60"])
mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60]
else:
allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40",
"BDS50", "BDS60"])
mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60]
bds = ','.join(sorted(allbds[mask]))
if len(bds) == 0:
return None
else:
return bds |
def update(vm, from_file=None, key='uuid', **kwargs):
'''
Update a new vm
vm : string
vm to be updated
from_file : string
json file to update the vm with -- if present, all other options will be ignored
key : string [uuid|alias|hostname]
value type of 'vm' parameter
kwargs : string|int|...
options to update for the vm
CLI Example:
.. code-block:: bash
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024
'''
ret = {}
# prepare vmcfg
vmcfg = {}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
for k, v in six.iteritems(kwargs):
vmcfg[k] = v
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
uuid = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in uuid:
return uuid
if from_file:
return _create_update_from_file('update', uuid, path=from_file)
else:
return _create_update_from_cfg('update', uuid, vmcfg=vmcfg) | Update a new vm
vm : string
vm to be updated
from_file : string
json file to update the vm with -- if present, all other options will be ignored
key : string [uuid|alias|hostname]
value type of 'vm' parameter
kwargs : string|int|...
options to update for the vm
CLI Example:
.. code-block:: bash
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024 | Below is the the instruction that describes the task:
### Input:
Update a new vm
vm : string
vm to be updated
from_file : string
json file to update the vm with -- if present, all other options will be ignored
key : string [uuid|alias|hostname]
value type of 'vm' parameter
kwargs : string|int|...
options to update for the vm
CLI Example:
.. code-block:: bash
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024
### Response:
def update(vm, from_file=None, key='uuid', **kwargs):
'''
Update a new vm
vm : string
vm to be updated
from_file : string
json file to update the vm with -- if present, all other options will be ignored
key : string [uuid|alias|hostname]
value type of 'vm' parameter
kwargs : string|int|...
options to update for the vm
CLI Example:
.. code-block:: bash
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024
'''
ret = {}
# prepare vmcfg
vmcfg = {}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
for k, v in six.iteritems(kwargs):
vmcfg[k] = v
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
uuid = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in uuid:
return uuid
if from_file:
return _create_update_from_file('update', uuid, path=from_file)
else:
return _create_update_from_cfg('update', uuid, vmcfg=vmcfg) |
def build_single(mode):
"""Build, in the single-user mode."""
if mode == 'force':
amode = ['-a']
else:
amode = []
if executable.endswith('uwsgi'):
# hack, might fail in some environments!
_executable = executable[:-5] + 'python'
else:
_executable = executable
p = subprocess.Popen([_executable, '-m', 'nikola', 'build'] + amode,
stderr=subprocess.PIPE)
p.wait()
rl = p.stderr.readlines()
try:
out = ''.join(rl)
except TypeError:
out = ''.join(l.decode('utf-8') for l in rl)
return (p.returncode == 0), out | Build, in the single-user mode. | Below is the the instruction that describes the task:
### Input:
Build, in the single-user mode.
### Response:
def build_single(mode):
"""Build, in the single-user mode."""
if mode == 'force':
amode = ['-a']
else:
amode = []
if executable.endswith('uwsgi'):
# hack, might fail in some environments!
_executable = executable[:-5] + 'python'
else:
_executable = executable
p = subprocess.Popen([_executable, '-m', 'nikola', 'build'] + amode,
stderr=subprocess.PIPE)
p.wait()
rl = p.stderr.readlines()
try:
out = ''.join(rl)
except TypeError:
out = ''.join(l.decode('utf-8') for l in rl)
return (p.returncode == 0), out |
def extract_scopes(self, request):
"""
Extract scopes from a request object.
"""
payload = self.extract_payload(request)
if not payload:
return None
scopes_attribute = self.config.scopes_name()
return payload.get(scopes_attribute, None) | Extract scopes from a request object. | Below is the the instruction that describes the task:
### Input:
Extract scopes from a request object.
### Response:
def extract_scopes(self, request):
"""
Extract scopes from a request object.
"""
payload = self.extract_payload(request)
if not payload:
return None
scopes_attribute = self.config.scopes_name()
return payload.get(scopes_attribute, None) |
def register_auth_method(self, auth_method):
"""
Register a new authentication method
name
the name of the authentication method, typically displayed by the webapp
input_to_display
Only available after that the Plugin Manager is loaded
"""
if not self._loaded:
raise PluginManagerNotLoadedException()
self._user_manager.register_auth_method(auth_method) | Register a new authentication method
name
the name of the authentication method, typically displayed by the webapp
input_to_display
Only available after that the Plugin Manager is loaded | Below is the the instruction that describes the task:
### Input:
Register a new authentication method
name
the name of the authentication method, typically displayed by the webapp
input_to_display
Only available after that the Plugin Manager is loaded
### Response:
def register_auth_method(self, auth_method):
"""
Register a new authentication method
name
the name of the authentication method, typically displayed by the webapp
input_to_display
Only available after that the Plugin Manager is loaded
"""
if not self._loaded:
raise PluginManagerNotLoadedException()
self._user_manager.register_auth_method(auth_method) |
def open_dataarray(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=None, decode_times=True, autoclose=None,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None,
backend_kwargs=None, use_cftime=None):
"""Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(filename_or_obj, group=group, decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times, autoclose=autoclose,
concat_characters=concat_characters,
decode_coords=decode_coords, engine=engine,
chunks=chunks, lock=lock, cache=cache,
drop_variables=drop_variables,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime)
if len(dataset.data_vars) != 1:
raise ValueError('Given file dataset contains more than one data '
'variable. Please read with xarray.open_dataset and '
'then select the variable you want.')
else:
data_array, = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array | Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset | Below is the the instruction that describes the task:
### Input:
Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
### Response:
def open_dataarray(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=None, decode_times=True, autoclose=None,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None,
backend_kwargs=None, use_cftime=None):
"""Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(filename_or_obj, group=group, decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times, autoclose=autoclose,
concat_characters=concat_characters,
decode_coords=decode_coords, engine=engine,
chunks=chunks, lock=lock, cache=cache,
drop_variables=drop_variables,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime)
if len(dataset.data_vars) != 1:
raise ValueError('Given file dataset contains more than one data '
'variable. Please read with xarray.open_dataset and '
'then select the variable you want.')
else:
data_array, = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array |
def _bend_cos_low(a, b, deriv):
"""Similar to bend_cos, but with relative vectors"""
a = Vector3(6, deriv, a, (0, 1, 2))
b = Vector3(6, deriv, b, (3, 4, 5))
a /= a.norm()
b /= b.norm()
return dot(a, b).results() | Similar to bend_cos, but with relative vectors | Below is the the instruction that describes the task:
### Input:
Similar to bend_cos, but with relative vectors
### Response:
def _bend_cos_low(a, b, deriv):
"""Similar to bend_cos, but with relative vectors"""
a = Vector3(6, deriv, a, (0, 1, 2))
b = Vector3(6, deriv, b, (3, 4, 5))
a /= a.norm()
b /= b.norm()
return dot(a, b).results() |
def get_path_to_repo(self, repo: str) -> Path:
""" Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored.
:param repo: Repo URL
:return: Path to where branches from this repository are cloned.
"""
return Path(self.base_dir) / "repos" / self.repo_id(repo) | Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored.
:param repo: Repo URL
:return: Path to where branches from this repository are cloned. | Below is the the instruction that describes the task:
### Input:
Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored.
:param repo: Repo URL
:return: Path to where branches from this repository are cloned.
### Response:
def get_path_to_repo(self, repo: str) -> Path:
""" Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored.
:param repo: Repo URL
:return: Path to where branches from this repository are cloned.
"""
return Path(self.base_dir) / "repos" / self.repo_id(repo) |
def read_hdf5_dict(h5f, names=None, group=None, **kwargs):
"""Read a `TimeSeriesDict` from HDF5
"""
# find group from which to read
if group:
h5g = h5f[group]
else:
h5g = h5f
# find list of names to read
if names is None:
names = [key for key in h5g if _is_timeseries_dataset(h5g[key])]
# read names
out = kwargs.pop('dict_type', TimeSeriesDict)()
kwargs.setdefault('array_type', out.EntryClass)
for name in names:
out[name] = read_hdf5_timeseries(h5g[name], **kwargs)
return out | Read a `TimeSeriesDict` from HDF5 | Below is the the instruction that describes the task:
### Input:
Read a `TimeSeriesDict` from HDF5
### Response:
def read_hdf5_dict(h5f, names=None, group=None, **kwargs):
"""Read a `TimeSeriesDict` from HDF5
"""
# find group from which to read
if group:
h5g = h5f[group]
else:
h5g = h5f
# find list of names to read
if names is None:
names = [key for key in h5g if _is_timeseries_dataset(h5g[key])]
# read names
out = kwargs.pop('dict_type', TimeSeriesDict)()
kwargs.setdefault('array_type', out.EntryClass)
for name in names:
out[name] = read_hdf5_timeseries(h5g[name], **kwargs)
return out |
def _connect(dbfile: 'PathLike') -> apsw.Connection:
"""Connect to SQLite database file."""
conn = apsw.Connection(os.fspath(dbfile))
_set_foreign_keys(conn, 1)
assert _get_foreign_keys(conn) == 1
return conn | Connect to SQLite database file. | Below is the the instruction that describes the task:
### Input:
Connect to SQLite database file.
### Response:
def _connect(dbfile: 'PathLike') -> apsw.Connection:
"""Connect to SQLite database file."""
conn = apsw.Connection(os.fspath(dbfile))
_set_foreign_keys(conn, 1)
assert _get_foreign_keys(conn) == 1
return conn |
def agm(x, y, context=None):
"""
Return the arithmetic geometric mean of x and y.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_agm,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) | Return the arithmetic geometric mean of x and y. | Below is the the instruction that describes the task:
### Input:
Return the arithmetic geometric mean of x and y.
### Response:
def agm(x, y, context=None):
"""
Return the arithmetic geometric mean of x and y.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_agm,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) |
def HWProcess(cls, proc: HWProcess, ctx: ResourceContext) -> None:
"""
Gues resource usage by HWProcess
"""
seen = ctx.seen
for stm in proc.statements:
encl = stm._enclosed_for
full_ev_dep = stm._is_completly_event_dependent
now_ev_dep = stm._now_is_event_dependent
ev_dep = full_ev_dep or now_ev_dep
out_mux_dim = count_mux_inputs_for_outputs(stm)
for o in stm._outputs:
if o in seen:
continue
i = out_mux_dim[o]
if isinstance(o._dtype, HArray):
assert i == 1, (o, i, " only one ram port per HWProcess")
for a in walk_assignments(stm, o):
assert len(a.indexes) == 1, "one address per RAM port"
addr = a.indexes[0]
ctx.registerRAM_write_port(o, addr, ev_dep)
elif ev_dep:
ctx.registerFF(o)
if i > 1:
ctx.registerMUX(stm, o, i)
elif o not in encl:
ctx.registerLatch(o)
if i > 1:
ctx.registerMUX(stm, o, i)
elif i > 1:
ctx.registerMUX(stm, o, i)
else:
# just a connection
continue
if isinstance(stm, SwitchContainer):
caseEqs = set([stm.switchOn._eq(c[0]) for c in stm.cases])
inputs = chain(
[sig for sig in stm._inputs if sig not in caseEqs], [stm.switchOn])
else:
inputs = stm._inputs
for i in inputs:
# discover only internal signals in this statements for
# operators
if not i.hidden or i in seen:
continue
cls.HWProcess_operators(i, ctx, ev_dep) | Gues resource usage by HWProcess | Below is the the instruction that describes the task:
### Input:
Gues resource usage by HWProcess
### Response:
def HWProcess(cls, proc: HWProcess, ctx: ResourceContext) -> None:
"""
Gues resource usage by HWProcess
"""
seen = ctx.seen
for stm in proc.statements:
encl = stm._enclosed_for
full_ev_dep = stm._is_completly_event_dependent
now_ev_dep = stm._now_is_event_dependent
ev_dep = full_ev_dep or now_ev_dep
out_mux_dim = count_mux_inputs_for_outputs(stm)
for o in stm._outputs:
if o in seen:
continue
i = out_mux_dim[o]
if isinstance(o._dtype, HArray):
assert i == 1, (o, i, " only one ram port per HWProcess")
for a in walk_assignments(stm, o):
assert len(a.indexes) == 1, "one address per RAM port"
addr = a.indexes[0]
ctx.registerRAM_write_port(o, addr, ev_dep)
elif ev_dep:
ctx.registerFF(o)
if i > 1:
ctx.registerMUX(stm, o, i)
elif o not in encl:
ctx.registerLatch(o)
if i > 1:
ctx.registerMUX(stm, o, i)
elif i > 1:
ctx.registerMUX(stm, o, i)
else:
# just a connection
continue
if isinstance(stm, SwitchContainer):
caseEqs = set([stm.switchOn._eq(c[0]) for c in stm.cases])
inputs = chain(
[sig for sig in stm._inputs if sig not in caseEqs], [stm.switchOn])
else:
inputs = stm._inputs
for i in inputs:
# discover only internal signals in this statements for
# operators
if not i.hidden or i in seen:
continue
cls.HWProcess_operators(i, ctx, ev_dep) |
def save_frames(obj, filename, fmt=None, backend=None, options=None):
"""
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
"""
backend = Store.current_backend if backend is None else backend
renderer = Store.renderers[backend]
fmt = renderer.params('fig').objects[0] if fmt is None else fmt
plot = renderer.get_plot(obj)
for i in range(len(plot)):
plot.update(i)
renderer.save(plot, '%s_%s' % (filename, i), fmt=fmt, options=options) | Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default. | Below is the the instruction that describes the task:
### Input:
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
### Response:
def save_frames(obj, filename, fmt=None, backend=None, options=None):
"""
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
"""
backend = Store.current_backend if backend is None else backend
renderer = Store.renderers[backend]
fmt = renderer.params('fig').objects[0] if fmt is None else fmt
plot = renderer.get_plot(obj)
for i in range(len(plot)):
plot.update(i)
renderer.save(plot, '%s_%s' % (filename, i), fmt=fmt, options=options) |
def create(self, **kwargs):
'''Create the device service cluster group and add devices to it.'''
self._set_attributes(**kwargs)
self._check_type()
pollster(self._check_all_devices_in_sync)()
dg = self.devices[0].tm.cm.device_groups.device_group
dg.create(name=self.name, partition=self.partition, type=self.type)
for device in self.devices:
self._add_device_to_device_group(device)
device.tm.sys.config.exec_cmd('save')
self.ensure_all_devices_in_sync() | Create the device service cluster group and add devices to it. | Below is the the instruction that describes the task:
### Input:
Create the device service cluster group and add devices to it.
### Response:
def create(self, **kwargs):
'''Create the device service cluster group and add devices to it.'''
self._set_attributes(**kwargs)
self._check_type()
pollster(self._check_all_devices_in_sync)()
dg = self.devices[0].tm.cm.device_groups.device_group
dg.create(name=self.name, partition=self.partition, type=self.type)
for device in self.devices:
self._add_device_to_device_group(device)
device.tm.sys.config.exec_cmd('save')
self.ensure_all_devices_in_sync() |
def use_music_service(self, service_name, api_key):
"""
Sets the current music service to service_name.
:param str service_name: Name of the music service
:param str api_key: Optional API key if necessary
"""
try:
self.current_music = self.music_services[service_name]
except KeyError:
if service_name == 'youtube':
self.music_services['youtube'] = Youtube()
self.current_music = self.music_services['youtube']
elif service_name == 'soundcloud':
self.music_services['soundcloud'] = Soundcloud(api_key=api_key)
self.current_music = self.music_services['soundcloud']
else:
log.error('Music service name is not recognized.') | Sets the current music service to service_name.
:param str service_name: Name of the music service
:param str api_key: Optional API key if necessary | Below is the the instruction that describes the task:
### Input:
Sets the current music service to service_name.
:param str service_name: Name of the music service
:param str api_key: Optional API key if necessary
### Response:
def use_music_service(self, service_name, api_key):
"""
Sets the current music service to service_name.
:param str service_name: Name of the music service
:param str api_key: Optional API key if necessary
"""
try:
self.current_music = self.music_services[service_name]
except KeyError:
if service_name == 'youtube':
self.music_services['youtube'] = Youtube()
self.current_music = self.music_services['youtube']
elif service_name == 'soundcloud':
self.music_services['soundcloud'] = Soundcloud(api_key=api_key)
self.current_music = self.music_services['soundcloud']
else:
log.error('Music service name is not recognized.') |
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ | Create WSGI environ entries to be merged into each request. | Below is the the instruction that describes the task:
### Input:
Create WSGI environ entries to be merged into each request.
### Response:
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ |
def print_user(self, user):
'''print a filesystem database user. A "database" folder that might end with
the participant status (e.g. _finished) is extracted to print in format
[folder] [identifier][studyid]
/scif/data/expfactory/xxxx-xxxx xxxx-xxxx[studyid]
'''
status = "active"
if user.endswith('_finished'):
status = "finished"
elif user.endswith('_revoked'):
status = "revoked"
subid = os.path.basename(user)
for ext in ['_revoked','_finished']:
subid = subid.replace(ext, '')
subid = '%s\t%s[%s]' %(user, subid, status)
print(subid)
return subid | print a filesystem database user. A "database" folder that might end with
the participant status (e.g. _finished) is extracted to print in format
[folder] [identifier][studyid]
/scif/data/expfactory/xxxx-xxxx xxxx-xxxx[studyid] | Below is the the instruction that describes the task:
### Input:
print a filesystem database user. A "database" folder that might end with
the participant status (e.g. _finished) is extracted to print in format
[folder] [identifier][studyid]
/scif/data/expfactory/xxxx-xxxx xxxx-xxxx[studyid]
### Response:
def print_user(self, user):
'''print a filesystem database user. A "database" folder that might end with
the participant status (e.g. _finished) is extracted to print in format
[folder] [identifier][studyid]
/scif/data/expfactory/xxxx-xxxx xxxx-xxxx[studyid]
'''
status = "active"
if user.endswith('_finished'):
status = "finished"
elif user.endswith('_revoked'):
status = "revoked"
subid = os.path.basename(user)
for ext in ['_revoked','_finished']:
subid = subid.replace(ext, '')
subid = '%s\t%s[%s]' %(user, subid, status)
print(subid)
return subid |
def disconnect(self, callback: Callable) -> None:
"""
Disconnects the given callback.
The callback will no longer receive events from this signal.
No action is taken if the callback is not on the list of listener callbacks.
:param callback: the callable to remove
"""
assert check_argument_types()
try:
if self.listeners is not None:
self.listeners.remove(callback)
except ValueError:
pass | Disconnects the given callback.
The callback will no longer receive events from this signal.
No action is taken if the callback is not on the list of listener callbacks.
:param callback: the callable to remove | Below is the the instruction that describes the task:
### Input:
Disconnects the given callback.
The callback will no longer receive events from this signal.
No action is taken if the callback is not on the list of listener callbacks.
:param callback: the callable to remove
### Response:
def disconnect(self, callback: Callable) -> None:
"""
Disconnects the given callback.
The callback will no longer receive events from this signal.
No action is taken if the callback is not on the list of listener callbacks.
:param callback: the callable to remove
"""
assert check_argument_types()
try:
if self.listeners is not None:
self.listeners.remove(callback)
except ValueError:
pass |
def python_zip(file_list, gallery_path, extension='.py'):
"""Stores all files in file_list into an zip file
Parameters
----------
file_list : list
Holds all the file names to be included in zip file
gallery_path : str
path to where the zipfile is stored
extension : str
'.py' or '.ipynb' In order to deal with downloads of python
sources and jupyter notebooks the file extension from files in
file_list will be removed and replace with the value of this
variable while generating the zip file
Returns
-------
zipname : str
zip file name, written as `target_dir_{python,jupyter}.zip`
depending on the extension
"""
zipname = os.path.basename(os.path.normpath(gallery_path))
zipname += '_python' if extension == '.py' else '_jupyter'
zipname = os.path.join(gallery_path, zipname + '.zip')
zipname_new = zipname + '.new'
with zipfile.ZipFile(zipname_new, mode='w') as zipf:
for fname in file_list:
file_src = os.path.splitext(fname)[0] + extension
zipf.write(file_src, os.path.relpath(file_src, gallery_path))
_replace_md5(zipname_new)
return zipname | Stores all files in file_list into an zip file
Parameters
----------
file_list : list
Holds all the file names to be included in zip file
gallery_path : str
path to where the zipfile is stored
extension : str
'.py' or '.ipynb' In order to deal with downloads of python
sources and jupyter notebooks the file extension from files in
file_list will be removed and replace with the value of this
variable while generating the zip file
Returns
-------
zipname : str
zip file name, written as `target_dir_{python,jupyter}.zip`
depending on the extension | Below is the the instruction that describes the task:
### Input:
Stores all files in file_list into an zip file
Parameters
----------
file_list : list
Holds all the file names to be included in zip file
gallery_path : str
path to where the zipfile is stored
extension : str
'.py' or '.ipynb' In order to deal with downloads of python
sources and jupyter notebooks the file extension from files in
file_list will be removed and replace with the value of this
variable while generating the zip file
Returns
-------
zipname : str
zip file name, written as `target_dir_{python,jupyter}.zip`
depending on the extension
### Response:
def python_zip(file_list, gallery_path, extension='.py'):
"""Stores all files in file_list into an zip file
Parameters
----------
file_list : list
Holds all the file names to be included in zip file
gallery_path : str
path to where the zipfile is stored
extension : str
'.py' or '.ipynb' In order to deal with downloads of python
sources and jupyter notebooks the file extension from files in
file_list will be removed and replace with the value of this
variable while generating the zip file
Returns
-------
zipname : str
zip file name, written as `target_dir_{python,jupyter}.zip`
depending on the extension
"""
zipname = os.path.basename(os.path.normpath(gallery_path))
zipname += '_python' if extension == '.py' else '_jupyter'
zipname = os.path.join(gallery_path, zipname + '.zip')
zipname_new = zipname + '.new'
with zipfile.ZipFile(zipname_new, mode='w') as zipf:
for fname in file_list:
file_src = os.path.splitext(fname)[0] + extension
zipf.write(file_src, os.path.relpath(file_src, gallery_path))
_replace_md5(zipname_new)
return zipname |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
text_format = gf.safe_unicode(self.actual_arguments[0])
if text_format == u"list":
text = gf.safe_unicode(self.actual_arguments[1])
elif text_format in TextFileFormat.ALLOWED_VALUES:
text = self.actual_arguments[1]
if not self.check_input_file(text):
return self.ERROR_EXIT_CODE
else:
return self.print_help()
l1_id_regex = self.has_option_with_value(u"--l1-id-regex")
l2_id_regex = self.has_option_with_value(u"--l2-id-regex")
l3_id_regex = self.has_option_with_value(u"--l3-id-regex")
id_regex = self.has_option_with_value(u"--id-regex")
id_format = self.has_option_with_value(u"--id-format")
class_regex = self.has_option_with_value(u"--class-regex")
sort = self.has_option_with_value(u"--sort")
parameters = {
gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX: l1_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX: l2_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX: l3_id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX: id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX: class_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT: sort,
gc.PPN_TASK_OS_FILE_ID_REGEX: id_format
}
if (text_format == TextFileFormat.MUNPARSED) and ((l1_id_regex is None) or (l2_id_regex is None) or (l3_id_regex is None)):
self.print_error(u"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format")
return self.ERROR_EXIT_CODE
if (text_format == TextFileFormat.UNPARSED) and (id_regex is None) and (class_regex is None):
self.print_error(u"You must specify --id-regex and/or --class-regex for unparsed format")
return self.ERROR_EXIT_CODE
if (text_format in [TextFileFormat.PLAIN, TextFileFormat.SUBTITLES]) and (id_format is not None):
try:
identifier = id_format % 1
except (TypeError, ValueError):
self.print_error(u"The given string '%s' is not a valid id format" % id_format)
return self.ERROR_EXIT_CODE
text_file = self.get_text_file(text_format, text, parameters)
if text_file is None:
self.print_error(u"Unable to build a TextFile from the given parameters")
elif len(text_file) == 0:
self.print_error(u"No text fragments found")
else:
self.print_generic(text_file.__unicode__())
return self.NO_ERROR_EXIT_CODE
return self.ERROR_EXIT_CODE | Perform command and return the appropriate exit code.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Perform command and return the appropriate exit code.
:rtype: int
### Response:
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
text_format = gf.safe_unicode(self.actual_arguments[0])
if text_format == u"list":
text = gf.safe_unicode(self.actual_arguments[1])
elif text_format in TextFileFormat.ALLOWED_VALUES:
text = self.actual_arguments[1]
if not self.check_input_file(text):
return self.ERROR_EXIT_CODE
else:
return self.print_help()
l1_id_regex = self.has_option_with_value(u"--l1-id-regex")
l2_id_regex = self.has_option_with_value(u"--l2-id-regex")
l3_id_regex = self.has_option_with_value(u"--l3-id-regex")
id_regex = self.has_option_with_value(u"--id-regex")
id_format = self.has_option_with_value(u"--id-format")
class_regex = self.has_option_with_value(u"--class-regex")
sort = self.has_option_with_value(u"--sort")
parameters = {
gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX: l1_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX: l2_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX: l3_id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX: id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX: class_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT: sort,
gc.PPN_TASK_OS_FILE_ID_REGEX: id_format
}
if (text_format == TextFileFormat.MUNPARSED) and ((l1_id_regex is None) or (l2_id_regex is None) or (l3_id_regex is None)):
self.print_error(u"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format")
return self.ERROR_EXIT_CODE
if (text_format == TextFileFormat.UNPARSED) and (id_regex is None) and (class_regex is None):
self.print_error(u"You must specify --id-regex and/or --class-regex for unparsed format")
return self.ERROR_EXIT_CODE
if (text_format in [TextFileFormat.PLAIN, TextFileFormat.SUBTITLES]) and (id_format is not None):
try:
identifier = id_format % 1
except (TypeError, ValueError):
self.print_error(u"The given string '%s' is not a valid id format" % id_format)
return self.ERROR_EXIT_CODE
text_file = self.get_text_file(text_format, text, parameters)
if text_file is None:
self.print_error(u"Unable to build a TextFile from the given parameters")
elif len(text_file) == 0:
self.print_error(u"No text fragments found")
else:
self.print_generic(text_file.__unicode__())
return self.NO_ERROR_EXIT_CODE
return self.ERROR_EXIT_CODE |
def get_unix_tput_terminal_size():
"""
Get the terminal size of a UNIX terminal using the tput UNIX command.
Ref: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
"""
import subprocess
try:
proc = subprocess.Popen(["tput", "cols"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = proc.communicate(input=None)
cols = int(output[0])
proc = subprocess.Popen(["tput", "lines"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = proc.communicate(input=None)
rows = int(output[0])
return cols, rows
except (IOError, OSError):
return None | Get the terminal size of a UNIX terminal using the tput UNIX command.
Ref: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window | Below is the the instruction that describes the task:
### Input:
Get the terminal size of a UNIX terminal using the tput UNIX command.
Ref: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
### Response:
def get_unix_tput_terminal_size():
"""
Get the terminal size of a UNIX terminal using the tput UNIX command.
Ref: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
"""
import subprocess
try:
proc = subprocess.Popen(["tput", "cols"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = proc.communicate(input=None)
cols = int(output[0])
proc = subprocess.Popen(["tput", "lines"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = proc.communicate(input=None)
rows = int(output[0])
return cols, rows
except (IOError, OSError):
return None |
def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured')
if not self._offsets.fetch:
raise KafkaConfigurationError(
'No fetch offsets found when calling fetch_messages'
)
fetches = [FetchRequest(topic, partition,
self._offsets.fetch[(topic, partition)],
max_bytes)
for (topic, partition) in self._topics]
# send_fetch_request will batch topic/partition requests by leader
responses = self._client.send_fetch_request(
fetches,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
fail_on_error=False
)
for resp in responses:
if isinstance(resp, FailedPayloadsError):
logger.warning('FailedPayloadsError attempting to fetch data')
self._refresh_metadata_on_error()
continue
topic = kafka_bytestring(resp.topic)
partition = resp.partition
try:
check_error(resp)
except OffsetOutOfRangeError:
logger.warning('OffsetOutOfRange: topic %s, partition %d, '
'offset %d (Highwatermark: %d)',
topic, partition,
self._offsets.fetch[(topic, partition)],
resp.highwaterMark)
# Reset offset
self._offsets.fetch[(topic, partition)] = (
self._reset_partition_offset((topic, partition))
)
continue
except NotLeaderForPartitionError:
logger.warning("NotLeaderForPartitionError for %s - %d. "
"Metadata may be out of date",
topic, partition)
self._refresh_metadata_on_error()
continue
except RequestTimedOutError:
logger.warning("RequestTimedOutError for %s - %d",
topic, partition)
continue
# Track server highwater mark
self._offsets.highwater[(topic, partition)] = resp.highwaterMark
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[(topic, partition)]:
logger.debug('message offset less than fetched offset '
'skipping: %s', msg)
continue
# Only increment fetch offset
# if we safely got the message and deserialized
self._offsets.fetch[(topic, partition)] = offset + 1
# Then yield to user
yield msg | Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset` | Below is the the instruction that describes the task:
### Input:
Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
### Response:
def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured')
if not self._offsets.fetch:
raise KafkaConfigurationError(
'No fetch offsets found when calling fetch_messages'
)
fetches = [FetchRequest(topic, partition,
self._offsets.fetch[(topic, partition)],
max_bytes)
for (topic, partition) in self._topics]
# send_fetch_request will batch topic/partition requests by leader
responses = self._client.send_fetch_request(
fetches,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
fail_on_error=False
)
for resp in responses:
if isinstance(resp, FailedPayloadsError):
logger.warning('FailedPayloadsError attempting to fetch data')
self._refresh_metadata_on_error()
continue
topic = kafka_bytestring(resp.topic)
partition = resp.partition
try:
check_error(resp)
except OffsetOutOfRangeError:
logger.warning('OffsetOutOfRange: topic %s, partition %d, '
'offset %d (Highwatermark: %d)',
topic, partition,
self._offsets.fetch[(topic, partition)],
resp.highwaterMark)
# Reset offset
self._offsets.fetch[(topic, partition)] = (
self._reset_partition_offset((topic, partition))
)
continue
except NotLeaderForPartitionError:
logger.warning("NotLeaderForPartitionError for %s - %d. "
"Metadata may be out of date",
topic, partition)
self._refresh_metadata_on_error()
continue
except RequestTimedOutError:
logger.warning("RequestTimedOutError for %s - %d",
topic, partition)
continue
# Track server highwater mark
self._offsets.highwater[(topic, partition)] = resp.highwaterMark
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[(topic, partition)]:
logger.debug('message offset less than fetched offset '
'skipping: %s', msg)
continue
# Only increment fetch offset
# if we safely got the message and deserialized
self._offsets.fetch[(topic, partition)] = offset + 1
# Then yield to user
yield msg |
def create_iam_resources(env='dev', app='', **_):
"""Create the IAM Resources for the application.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): Spinnaker Application name.
Returns:
True upon successful completion.
"""
session = boto3.session.Session(profile_name=env)
client = session.client('iam')
app_properties = get_properties(env='pipeline')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
deployment_type = app_properties['type']
role_trust_template = get_template(
'infrastructure/iam/trust/{0}_role.json.j2'.format(deployment_type), formats=generated)
resource_action(
client,
action='create_role',
log_format='Created Role: %(RoleName)s',
RoleName=details.role,
AssumeRolePolicyDocument=role_trust_template)
resource_action(
client,
action='create_instance_profile',
log_format='Created Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
attach_profile_to_role(client, role_name=details.role, profile_name=details.profile)
iam_policy = construct_policy(app=app, group=details.group, env=env, pipeline_settings=app_properties)
if iam_policy:
resource_action(
client,
action='put_role_policy',
log_format='Added IAM Policy: %(PolicyName)s',
RoleName=details.role,
PolicyName=details.policy,
PolicyDocument=iam_policy)
resource_action(client, action='create_user', log_format='Created User: %(UserName)s', UserName=details.user)
resource_action(client, action='create_group', log_format='Created Group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='add_user_to_group',
log_format='Added User to Group: %(UserName)s -> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
return True | Create the IAM Resources for the application.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): Spinnaker Application name.
Returns:
True upon successful completion. | Below is the the instruction that describes the task:
### Input:
Create the IAM Resources for the application.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): Spinnaker Application name.
Returns:
True upon successful completion.
### Response:
def create_iam_resources(env='dev', app='', **_):
"""Create the IAM Resources for the application.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): Spinnaker Application name.
Returns:
True upon successful completion.
"""
session = boto3.session.Session(profile_name=env)
client = session.client('iam')
app_properties = get_properties(env='pipeline')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
deployment_type = app_properties['type']
role_trust_template = get_template(
'infrastructure/iam/trust/{0}_role.json.j2'.format(deployment_type), formats=generated)
resource_action(
client,
action='create_role',
log_format='Created Role: %(RoleName)s',
RoleName=details.role,
AssumeRolePolicyDocument=role_trust_template)
resource_action(
client,
action='create_instance_profile',
log_format='Created Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
attach_profile_to_role(client, role_name=details.role, profile_name=details.profile)
iam_policy = construct_policy(app=app, group=details.group, env=env, pipeline_settings=app_properties)
if iam_policy:
resource_action(
client,
action='put_role_policy',
log_format='Added IAM Policy: %(PolicyName)s',
RoleName=details.role,
PolicyName=details.policy,
PolicyDocument=iam_policy)
resource_action(client, action='create_user', log_format='Created User: %(UserName)s', UserName=details.user)
resource_action(client, action='create_group', log_format='Created Group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='add_user_to_group',
log_format='Added User to Group: %(UserName)s -> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
return True |
def add_campaign(self, name, device_filter, **kwargs):
"""Add new update campaign.
Add an update campaign with a name and device filtering. Example:
.. code-block:: python
device_api, update_api = DeviceDirectoryAPI(), UpdateAPI()
# Get a filter to use for update campaign
query_obj = device_api.get_query(query_id="MYID")
# Create the campaign
new_campaign = update_api.add_campaign(
name="foo",
device_filter=query_obj.filter
)
:param str name: Name of the update campaign (Required)
:param str device_filter: The device filter to use (Required)
:param str manifest_id: ID of the manifest with description of the update
:param str description: Description of the campaign
:param int scheduled_at: The timestamp at which update campaign is scheduled to start
:param str state: The state of the campaign. Values:
"draft", "scheduled", "devicefetch", "devicecopy", "publishing",
"deploying", "deployed", "manifestremoved", "expired"
:return: newly created campaign object
:rtype: Campaign
"""
device_filter = filters.legacy_filter_formatter(
dict(filter=device_filter),
Device._get_attributes_map()
)
campaign = Campaign._create_request_map(kwargs)
if 'when' in campaign:
# FIXME: randomly validating an input here is a sure route to nasty surprises elsewhere
campaign['when'] = force_utc(campaign['when'])
body = UpdateCampaignPostRequest(
name=name,
device_filter=device_filter['filter'],
**campaign)
api = self._get_api(update_service.DefaultApi)
return Campaign(api.update_campaign_create(body)) | Add new update campaign.
Add an update campaign with a name and device filtering. Example:
.. code-block:: python
device_api, update_api = DeviceDirectoryAPI(), UpdateAPI()
# Get a filter to use for update campaign
query_obj = device_api.get_query(query_id="MYID")
# Create the campaign
new_campaign = update_api.add_campaign(
name="foo",
device_filter=query_obj.filter
)
:param str name: Name of the update campaign (Required)
:param str device_filter: The device filter to use (Required)
:param str manifest_id: ID of the manifest with description of the update
:param str description: Description of the campaign
:param int scheduled_at: The timestamp at which update campaign is scheduled to start
:param str state: The state of the campaign. Values:
"draft", "scheduled", "devicefetch", "devicecopy", "publishing",
"deploying", "deployed", "manifestremoved", "expired"
:return: newly created campaign object
:rtype: Campaign | Below is the the instruction that describes the task:
### Input:
Add new update campaign.
Add an update campaign with a name and device filtering. Example:
.. code-block:: python
device_api, update_api = DeviceDirectoryAPI(), UpdateAPI()
# Get a filter to use for update campaign
query_obj = device_api.get_query(query_id="MYID")
# Create the campaign
new_campaign = update_api.add_campaign(
name="foo",
device_filter=query_obj.filter
)
:param str name: Name of the update campaign (Required)
:param str device_filter: The device filter to use (Required)
:param str manifest_id: ID of the manifest with description of the update
:param str description: Description of the campaign
:param int scheduled_at: The timestamp at which update campaign is scheduled to start
:param str state: The state of the campaign. Values:
"draft", "scheduled", "devicefetch", "devicecopy", "publishing",
"deploying", "deployed", "manifestremoved", "expired"
:return: newly created campaign object
:rtype: Campaign
### Response:
def add_campaign(self, name, device_filter, **kwargs):
"""Add new update campaign.
Add an update campaign with a name and device filtering. Example:
.. code-block:: python
device_api, update_api = DeviceDirectoryAPI(), UpdateAPI()
# Get a filter to use for update campaign
query_obj = device_api.get_query(query_id="MYID")
# Create the campaign
new_campaign = update_api.add_campaign(
name="foo",
device_filter=query_obj.filter
)
:param str name: Name of the update campaign (Required)
:param str device_filter: The device filter to use (Required)
:param str manifest_id: ID of the manifest with description of the update
:param str description: Description of the campaign
:param int scheduled_at: The timestamp at which update campaign is scheduled to start
:param str state: The state of the campaign. Values:
"draft", "scheduled", "devicefetch", "devicecopy", "publishing",
"deploying", "deployed", "manifestremoved", "expired"
:return: newly created campaign object
:rtype: Campaign
"""
device_filter = filters.legacy_filter_formatter(
dict(filter=device_filter),
Device._get_attributes_map()
)
campaign = Campaign._create_request_map(kwargs)
if 'when' in campaign:
# FIXME: randomly validating an input here is a sure route to nasty surprises elsewhere
campaign['when'] = force_utc(campaign['when'])
body = UpdateCampaignPostRequest(
name=name,
device_filter=device_filter['filter'],
**campaign)
api = self._get_api(update_service.DefaultApi)
return Campaign(api.update_campaign_create(body)) |
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None} | Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None. | Below is the the instruction that describes the task:
### Input:
Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
### Response:
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None} |
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' | Add Builders and construction variables for lib to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for lib to an Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' |
def get_version(self, as_tuple=False):
"""Returns uWSGI version string or tuple.
:param bool as_tuple:
:rtype: str|tuple
"""
if as_tuple:
return uwsgi.version_info
return decode(uwsgi.version) | Returns uWSGI version string or tuple.
:param bool as_tuple:
:rtype: str|tuple | Below is the the instruction that describes the task:
### Input:
Returns uWSGI version string or tuple.
:param bool as_tuple:
:rtype: str|tuple
### Response:
def get_version(self, as_tuple=False):
"""Returns uWSGI version string or tuple.
:param bool as_tuple:
:rtype: str|tuple
"""
if as_tuple:
return uwsgi.version_info
return decode(uwsgi.version) |
def names_container(self):
"""Returns True if this URI is not representing input/output stream
and names a directory.
"""
if not self.stream:
return os.path.isdir(self.object_name)
else:
return False | Returns True if this URI is not representing input/output stream
and names a directory. | Below is the the instruction that describes the task:
### Input:
Returns True if this URI is not representing input/output stream
and names a directory.
### Response:
def names_container(self):
"""Returns True if this URI is not representing input/output stream
and names a directory.
"""
if not self.stream:
return os.path.isdir(self.object_name)
else:
return False |
def localize_date(date, city):
""" Localize date into city
Date: datetime
City: timezone city definitio. Example: 'Asia/Qatar', 'America/New York'..
"""
local = pytz.timezone(city)
local_dt = local.localize(date, is_dst=None)
return local_dt | Localize date into city
Date: datetime
City: timezone city definitio. Example: 'Asia/Qatar', 'America/New York'.. | Below is the the instruction that describes the task:
### Input:
Localize date into city
Date: datetime
City: timezone city definitio. Example: 'Asia/Qatar', 'America/New York'..
### Response:
def localize_date(date, city):
""" Localize date into city
Date: datetime
City: timezone city definitio. Example: 'Asia/Qatar', 'America/New York'..
"""
local = pytz.timezone(city)
local_dt = local.localize(date, is_dst=None)
return local_dt |
def get_stp_mst_detail_output_cist_port_port_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
port_hello_time = ET.SubElement(port, "port-hello-time")
port_hello_time.text = kwargs.pop('port_hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_cist_port_port_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
port_hello_time = ET.SubElement(port, "port-hello-time")
port_hello_time.text = kwargs.pop('port_hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def colfieldnames(self, columnname, keyword=''):
"""Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword.
"""
if isinstance(keyword, str):
return self._getfieldnames(columnname, keyword, -1)
else:
return self._getfieldnames(columnname, '', keyword) | Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword. | Below is the the instruction that describes the task:
### Input:
Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword.
### Response:
def colfieldnames(self, columnname, keyword=''):
"""Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword.
"""
if isinstance(keyword, str):
return self._getfieldnames(columnname, keyword, -1)
else:
return self._getfieldnames(columnname, '', keyword) |
def _initalize_tree(self, position, momentum, slice_var, stepsize):
"""
Initalizes root node of the tree, i.e depth = 0
"""
position_bar, momentum_bar, _ = self.simulate_dynamics(self.model, position, momentum, stepsize,
self.grad_log_pdf).get_proposed_values()
_, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
hamiltonian = logp_bar - 0.5 * np.dot(momentum_bar, momentum_bar)
candidate_set_size = slice_var < np.exp(hamiltonian)
accept_set_bool = hamiltonian > np.log(slice_var) - 10000 # delta_max = 10000
return position_bar, momentum_bar, candidate_set_size, accept_set_bool | Initalizes root node of the tree, i.e depth = 0 | Below is the the instruction that describes the task:
### Input:
Initalizes root node of the tree, i.e depth = 0
### Response:
def _initalize_tree(self, position, momentum, slice_var, stepsize):
"""
Initalizes root node of the tree, i.e depth = 0
"""
position_bar, momentum_bar, _ = self.simulate_dynamics(self.model, position, momentum, stepsize,
self.grad_log_pdf).get_proposed_values()
_, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
hamiltonian = logp_bar - 0.5 * np.dot(momentum_bar, momentum_bar)
candidate_set_size = slice_var < np.exp(hamiltonian)
accept_set_bool = hamiltonian > np.log(slice_var) - 10000 # delta_max = 10000
return position_bar, momentum_bar, candidate_set_size, accept_set_bool |
def add_subcommands(parser, namespace, functions, **namespace_kwargs):
"""
A wrapper for :func:`add_commands`.
These examples are equivalent::
add_commands(parser, [get, put], namespace='db',
namespace_kwargs={
'title': 'database commands',
'help': 'CRUD for our silly database'
})
add_subcommands(parser, 'db', [get, put],
title='database commands',
help='CRUD for our silly database')
"""
add_commands(parser, functions, namespace=namespace,
namespace_kwargs=namespace_kwargs) | A wrapper for :func:`add_commands`.
These examples are equivalent::
add_commands(parser, [get, put], namespace='db',
namespace_kwargs={
'title': 'database commands',
'help': 'CRUD for our silly database'
})
add_subcommands(parser, 'db', [get, put],
title='database commands',
help='CRUD for our silly database') | Below is the the instruction that describes the task:
### Input:
A wrapper for :func:`add_commands`.
These examples are equivalent::
add_commands(parser, [get, put], namespace='db',
namespace_kwargs={
'title': 'database commands',
'help': 'CRUD for our silly database'
})
add_subcommands(parser, 'db', [get, put],
title='database commands',
help='CRUD for our silly database')
### Response:
def add_subcommands(parser, namespace, functions, **namespace_kwargs):
"""
A wrapper for :func:`add_commands`.
These examples are equivalent::
add_commands(parser, [get, put], namespace='db',
namespace_kwargs={
'title': 'database commands',
'help': 'CRUD for our silly database'
})
add_subcommands(parser, 'db', [get, put],
title='database commands',
help='CRUD for our silly database')
"""
add_commands(parser, functions, namespace=namespace,
namespace_kwargs=namespace_kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.