code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_job_class(job_class=None):
"""
Return job class from RQ settings, otherwise return Job.
If `job_class` is not None, it is used as an override (can be
python import path as string).
"""
RQ = getattr(settings, 'RQ', {})
if job_class is None:
job_class = RQ.get('JOB_CLASS', Job)
if isinstance(job_class, six.string_types):
job_class = import_attribute(job_class)
return job_class | Return job class from RQ settings, otherwise return Job.
If `job_class` is not None, it is used as an override (can be
python import path as string). | Below is the the instruction that describes the task:
### Input:
Return job class from RQ settings, otherwise return Job.
If `job_class` is not None, it is used as an override (can be
python import path as string).
### Response:
def get_job_class(job_class=None):
"""
Return job class from RQ settings, otherwise return Job.
If `job_class` is not None, it is used as an override (can be
python import path as string).
"""
RQ = getattr(settings, 'RQ', {})
if job_class is None:
job_class = RQ.get('JOB_CLASS', Job)
if isinstance(job_class, six.string_types):
job_class = import_attribute(job_class)
return job_class |
def mk_mopheader(expnum, ccd, version, dry_run=False, prefix=""):
"""Run the OSSOS mopheader script.
"""
## confirm destination directory exists.
destdir = os.path.dirname(
storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits'))
if not dry_run:
storage.mkdir(destdir)
## get image from the vospace storage area
filename = storage.get_image(expnum, ccd, version=version, prefix=prefix)
logging.info("Running mopheader on %s %d" % (expnum, ccd))
## launch the mopheader script
## launch the makepsf script
expname = os.path.basename(filename).strip('.fits')
logging.info(util.exec_prog(['stepZjmp',
'-f',
expname]))
mopheader_filename = expname+".mopheader"
# mopheader_filename = mopheader.main(filename)
if dry_run:
return
destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader')
source = mopheader_filename
storage.copy(source, destination)
return | Run the OSSOS mopheader script. | Below is the the instruction that describes the task:
### Input:
Run the OSSOS mopheader script.
### Response:
def mk_mopheader(expnum, ccd, version, dry_run=False, prefix=""):
"""Run the OSSOS mopheader script.
"""
## confirm destination directory exists.
destdir = os.path.dirname(
storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits'))
if not dry_run:
storage.mkdir(destdir)
## get image from the vospace storage area
filename = storage.get_image(expnum, ccd, version=version, prefix=prefix)
logging.info("Running mopheader on %s %d" % (expnum, ccd))
## launch the mopheader script
## launch the makepsf script
expname = os.path.basename(filename).strip('.fits')
logging.info(util.exec_prog(['stepZjmp',
'-f',
expname]))
mopheader_filename = expname+".mopheader"
# mopheader_filename = mopheader.main(filename)
if dry_run:
return
destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader')
source = mopheader_filename
storage.copy(source, destination)
return |
def get_categories(self):
""" Return a list of categories that a safe deposit box can belong to"""
sdb_resp = get_with_retry(self.cerberus_url + '/v1/category',
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json() | Return a list of categories that a safe deposit box can belong to | Below is the the instruction that describes the task:
### Input:
Return a list of categories that a safe deposit box can belong to
### Response:
def get_categories(self):
""" Return a list of categories that a safe deposit box can belong to"""
sdb_resp = get_with_retry(self.cerberus_url + '/v1/category',
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json() |
def sample_cleanup(data, sample):
""" stats, cleanup, and link to samples """
## get maxlen and depths array from clusters
maxlens, depths = get_quick_depths(data, sample)
try:
depths.max()
except ValueError:
## If depths is an empty array max() will raise
print(" no clusters found for {}".format(sample.name))
return
## Test if depths is non-empty, but just full of zeros.
if depths.max():
## store which min was used to calculate hidepth here
sample.stats_dfs.s3["hidepth_min"] = data.paramsdict["mindepth_majrule"]
## If our longest sequence is longer than the current max_fragment_length
## then update max_fragment_length. For assurance we require that
## max len is 4 greater than maxlen, to allow for pair separators.
hidepths = depths >= data.paramsdict["mindepth_majrule"]
maxlens = maxlens[hidepths]
## Handle the case where there are no hidepth clusters
if maxlens.any():
maxlen = int(maxlens.mean() + (2.*maxlens.std()))
else:
maxlen = 0
if maxlen > data._hackersonly["max_fragment_length"]:
data._hackersonly["max_fragment_length"] = maxlen + 4
## make sense of stats
keepmj = depths[depths >= data.paramsdict["mindepth_majrule"]]
keepstat = depths[depths >= data.paramsdict["mindepth_statistical"]]
## sample summary stat assignments
sample.stats["state"] = 3
sample.stats["clusters_total"] = depths.shape[0]
sample.stats["clusters_hidepth"] = keepmj.shape[0]
## store depths histogram as a dict. Limit to first 25 bins
bars, bins = np.histogram(depths, bins=range(1, 26))
sample.depths = {int(i):v for i, v in zip(bins, bars) if v}
## sample stat assignments
## Trap numpy warnings ("mean of empty slice") printed by samples
## with few reads.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
sample.stats_dfs.s3["merged_pairs"] = sample.stats.reads_merged
sample.stats_dfs.s3["clusters_total"] = depths.shape[0]
try:
sample.stats_dfs.s3["clusters_hidepth"] = int(sample.stats["clusters_hidepth"])
except ValueError:
## Handle clusters_hidepth == NaN
sample.stats_dfs.s3["clusters_hidepth"] = 0
sample.stats_dfs.s3["avg_depth_total"] = depths.mean()
LOGGER.debug("total depth {}".format(sample.stats_dfs.s3["avg_depth_total"]))
sample.stats_dfs.s3["avg_depth_mj"] = keepmj.mean()
LOGGER.debug("mj depth {}".format(sample.stats_dfs.s3["avg_depth_mj"]))
sample.stats_dfs.s3["avg_depth_stat"] = keepstat.mean()
sample.stats_dfs.s3["sd_depth_total"] = depths.std()
sample.stats_dfs.s3["sd_depth_mj"] = keepmj.std()
sample.stats_dfs.s3["sd_depth_stat"] = keepstat.std()
else:
print(" no clusters found for {}".format(sample.name))
## Get some stats from the bam files
## This is moderately hackish. samtools flagstat returns
## the number of reads in the bam file as the first element
## of the first line, this call makes this assumption.
if not data.paramsdict["assembly_method"] == "denovo":
refmap_stats(data, sample)
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
## Clean up loose files only if not in DEBUG
##- edits/*derep, utemp, *utemp.sort, *htemp, *clust.gz
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
for f in [derepfile, mergefile, uhandle, usort, hhandle, clusters]:
try:
os.remove(f)
except:
pass | stats, cleanup, and link to samples | Below is the the instruction that describes the task:
### Input:
stats, cleanup, and link to samples
### Response:
def sample_cleanup(data, sample):
""" stats, cleanup, and link to samples """
## get maxlen and depths array from clusters
maxlens, depths = get_quick_depths(data, sample)
try:
depths.max()
except ValueError:
## If depths is an empty array max() will raise
print(" no clusters found for {}".format(sample.name))
return
## Test if depths is non-empty, but just full of zeros.
if depths.max():
## store which min was used to calculate hidepth here
sample.stats_dfs.s3["hidepth_min"] = data.paramsdict["mindepth_majrule"]
## If our longest sequence is longer than the current max_fragment_length
## then update max_fragment_length. For assurance we require that
## max len is 4 greater than maxlen, to allow for pair separators.
hidepths = depths >= data.paramsdict["mindepth_majrule"]
maxlens = maxlens[hidepths]
## Handle the case where there are no hidepth clusters
if maxlens.any():
maxlen = int(maxlens.mean() + (2.*maxlens.std()))
else:
maxlen = 0
if maxlen > data._hackersonly["max_fragment_length"]:
data._hackersonly["max_fragment_length"] = maxlen + 4
## make sense of stats
keepmj = depths[depths >= data.paramsdict["mindepth_majrule"]]
keepstat = depths[depths >= data.paramsdict["mindepth_statistical"]]
## sample summary stat assignments
sample.stats["state"] = 3
sample.stats["clusters_total"] = depths.shape[0]
sample.stats["clusters_hidepth"] = keepmj.shape[0]
## store depths histogram as a dict. Limit to first 25 bins
bars, bins = np.histogram(depths, bins=range(1, 26))
sample.depths = {int(i):v for i, v in zip(bins, bars) if v}
## sample stat assignments
## Trap numpy warnings ("mean of empty slice") printed by samples
## with few reads.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
sample.stats_dfs.s3["merged_pairs"] = sample.stats.reads_merged
sample.stats_dfs.s3["clusters_total"] = depths.shape[0]
try:
sample.stats_dfs.s3["clusters_hidepth"] = int(sample.stats["clusters_hidepth"])
except ValueError:
## Handle clusters_hidepth == NaN
sample.stats_dfs.s3["clusters_hidepth"] = 0
sample.stats_dfs.s3["avg_depth_total"] = depths.mean()
LOGGER.debug("total depth {}".format(sample.stats_dfs.s3["avg_depth_total"]))
sample.stats_dfs.s3["avg_depth_mj"] = keepmj.mean()
LOGGER.debug("mj depth {}".format(sample.stats_dfs.s3["avg_depth_mj"]))
sample.stats_dfs.s3["avg_depth_stat"] = keepstat.mean()
sample.stats_dfs.s3["sd_depth_total"] = depths.std()
sample.stats_dfs.s3["sd_depth_mj"] = keepmj.std()
sample.stats_dfs.s3["sd_depth_stat"] = keepstat.std()
else:
print(" no clusters found for {}".format(sample.name))
## Get some stats from the bam files
## This is moderately hackish. samtools flagstat returns
## the number of reads in the bam file as the first element
## of the first line, this call makes this assumption.
if not data.paramsdict["assembly_method"] == "denovo":
refmap_stats(data, sample)
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
## Clean up loose files only if not in DEBUG
##- edits/*derep, utemp, *utemp.sort, *htemp, *clust.gz
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
for f in [derepfile, mergefile, uhandle, usort, hhandle, clusters]:
try:
os.remove(f)
except:
pass |
def _register_renderers(self, attrs):
"""
Go through the passed in list of attributes and register those renderers
in the render map.
"""
for method in attrs:
func = getattr(self, method)
mimetypes = getattr(func, 'mimetypes', [])
for mimetype in mimetypes:
if not '/' in mimetype:
self.reject_map[mimetype] = func
if mimetype not in self.render_map:
self.render_map[mimetype] = func
else:
# about to redefine an already defined renderer.
# make sure this new render method is not on a base class.
base_classes = self.__class__.mro()[1:]
from_baseclass = any([x for x in base_classes if func.__name__ in dir(x)])
if not from_baseclass:
self.render_map[mimetype] = func | Go through the passed in list of attributes and register those renderers
in the render map. | Below is the the instruction that describes the task:
### Input:
Go through the passed in list of attributes and register those renderers
in the render map.
### Response:
def _register_renderers(self, attrs):
"""
Go through the passed in list of attributes and register those renderers
in the render map.
"""
for method in attrs:
func = getattr(self, method)
mimetypes = getattr(func, 'mimetypes', [])
for mimetype in mimetypes:
if not '/' in mimetype:
self.reject_map[mimetype] = func
if mimetype not in self.render_map:
self.render_map[mimetype] = func
else:
# about to redefine an already defined renderer.
# make sure this new render method is not on a base class.
base_classes = self.__class__.mro()[1:]
from_baseclass = any([x for x in base_classes if func.__name__ in dir(x)])
if not from_baseclass:
self.render_map[mimetype] = func |
def image_cache(article_cache, img_dir):
"""
The method to be used by get_images() for copying images out of the cache.
"""
log.debug('Looking for image directory in the cache')
if os.path.isdir(article_cache):
log.info('Cached image directory found: {0}'.format(article_cache))
shutil.copytree(article_cache, img_dir)
return True
return False | The method to be used by get_images() for copying images out of the cache. | Below is the the instruction that describes the task:
### Input:
The method to be used by get_images() for copying images out of the cache.
### Response:
def image_cache(article_cache, img_dir):
"""
The method to be used by get_images() for copying images out of the cache.
"""
log.debug('Looking for image directory in the cache')
if os.path.isdir(article_cache):
log.info('Cached image directory found: {0}'.format(article_cache))
shutil.copytree(article_cache, img_dir)
return True
return False |
def _notin(expr, values):
"""
Return a boolean sequence or scalar showing whether
each element is not contained in the passed `values`.
:param expr: sequence or scalar
:param values: `list` object or sequence
:return: boolean sequence or scalar
"""
if isinstance(expr, SequenceExpr):
return NotIn(_input=expr, _values=values, _data_type=types.boolean)
elif isinstance(expr, Scalar):
return NotIn(_input=expr, _values=values, _value_type=types.boolean) | Return a boolean sequence or scalar showing whether
each element is not contained in the passed `values`.
:param expr: sequence or scalar
:param values: `list` object or sequence
:return: boolean sequence or scalar | Below is the the instruction that describes the task:
### Input:
Return a boolean sequence or scalar showing whether
each element is not contained in the passed `values`.
:param expr: sequence or scalar
:param values: `list` object or sequence
:return: boolean sequence or scalar
### Response:
def _notin(expr, values):
"""
Return a boolean sequence or scalar showing whether
each element is not contained in the passed `values`.
:param expr: sequence or scalar
:param values: `list` object or sequence
:return: boolean sequence or scalar
"""
if isinstance(expr, SequenceExpr):
return NotIn(_input=expr, _values=values, _data_type=types.boolean)
elif isinstance(expr, Scalar):
return NotIn(_input=expr, _values=values, _value_type=types.boolean) |
def modify(name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None):
# pylint: disable=anomalous-backslash-in-string
'''
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
'''
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG |
win32service.SERVICE_QUERY_CONFIG)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
changes['BinaryPath'] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes['ServiceType'] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
changes['StartType'] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes['ServiceAccount'] = account_name
if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:
account_password = ''
if account_password is not None:
changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'
if load_order_group is not None:
changes['LoadOrderGroup'] = load_order_group
if dependencies is not None:
changes['Dependencies'] = dependencies
if display_name is not None:
changes['DisplayName'] = display_name
win32service.ChangeServiceConfig(handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
changes['Description'] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
changes['StartTypeDelayed'] = start_delayed
else:
changes['Warning'] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes | Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled | Below is the the instruction that describes the task:
### Input:
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
### Response:
def modify(name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None):
# pylint: disable=anomalous-backslash-in-string
'''
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
'''
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG |
win32service.SERVICE_QUERY_CONFIG)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
changes['BinaryPath'] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes['ServiceType'] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
changes['StartType'] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes['ServiceAccount'] = account_name
if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:
account_password = ''
if account_password is not None:
changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'
if load_order_group is not None:
changes['LoadOrderGroup'] = load_order_group
if dependencies is not None:
changes['Dependencies'] = dependencies
if display_name is not None:
changes['DisplayName'] = display_name
win32service.ChangeServiceConfig(handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
changes['Description'] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
changes['StartTypeDelayed'] = start_delayed
else:
changes['Warning'] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes |
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout) | Run the mapper on the hadoop node. | Below is the the instruction that describes the task:
### Input:
Run the mapper on the hadoop node.
### Response:
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout) |
def read(self, size=-1):
"""Read bytes from the buffer and advance the read position. Returns
the bytes in a bytestring.
Parameters
----------
size: int, optional
Maximum number of bytes to read. If negative or not supplied, read
all unread bytes in the buffer.
Returns
-------
bytes
"""
part = self.peek(size)
self._pos += len(part)
return part | Read bytes from the buffer and advance the read position. Returns
the bytes in a bytestring.
Parameters
----------
size: int, optional
Maximum number of bytes to read. If negative or not supplied, read
all unread bytes in the buffer.
Returns
-------
bytes | Below is the the instruction that describes the task:
### Input:
Read bytes from the buffer and advance the read position. Returns
the bytes in a bytestring.
Parameters
----------
size: int, optional
Maximum number of bytes to read. If negative or not supplied, read
all unread bytes in the buffer.
Returns
-------
bytes
### Response:
def read(self, size=-1):
"""Read bytes from the buffer and advance the read position. Returns
the bytes in a bytestring.
Parameters
----------
size: int, optional
Maximum number of bytes to read. If negative or not supplied, read
all unread bytes in the buffer.
Returns
-------
bytes
"""
part = self.peek(size)
self._pos += len(part)
return part |
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
"""Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message
"""
for sentry_unit in sentry_units:
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
return ('Unexpected condition: ssl is disabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None | Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message | Below is the the instruction that describes the task:
### Input:
Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message
### Response:
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
"""Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message
"""
for sentry_unit in sentry_units:
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
return ('Unexpected condition: ssl is disabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None |
def load(name,
split=None,
data_dir=None,
batch_size=1,
download=True,
as_supervised=False,
with_info=False,
builder_kwargs=None,
download_and_prepare_kwargs=None,
as_dataset_kwargs=None,
try_gcs=False):
"""Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
name_builder_kwargs.update(builder_kwargs or {})
builder_kwargs = name_builder_kwargs
# Set data_dir
if try_gcs and gcs_utils.is_dataset_on_gcs(name):
data_dir = constants.GCS_DATA_DIR
elif data_dir is None:
data_dir = constants.DATA_DIR
dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs["split"] = split
as_dataset_kwargs["as_supervised"] = as_supervised
as_dataset_kwargs["batch_size"] = batch_size
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds | Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`. | Below is the the instruction that describes the task:
### Input:
Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
### Response:
def load(name,
split=None,
data_dir=None,
batch_size=1,
download=True,
as_supervised=False,
with_info=False,
builder_kwargs=None,
download_and_prepare_kwargs=None,
as_dataset_kwargs=None,
try_gcs=False):
"""Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
name_builder_kwargs.update(builder_kwargs or {})
builder_kwargs = name_builder_kwargs
# Set data_dir
if try_gcs and gcs_utils.is_dataset_on_gcs(name):
data_dir = constants.GCS_DATA_DIR
elif data_dir is None:
data_dir = constants.DATA_DIR
dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs["split"] = split
as_dataset_kwargs["as_supervised"] = as_supervised
as_dataset_kwargs["batch_size"] = batch_size
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds |
def parse_args(argv=()):
"""Parse command line arguments, and return a dict.
"""
_p, _args, argv = base_argparser(argv)
config_files = []
if not _args.no_config: # process config files
# extra config file specified with --config <fname> has highest precedence
if _args.config:
config_files.append(_args.config)
# .pydeps file specified in current directory is next
local_pydeps = os.path.join(os.getcwd(), '.pydeps')
if os.path.exists(local_pydeps):
config_files.append(local_pydeps)
# finally the .pydeps file in the the user's homedir
home = os.environ['USERPROFILE' if sys.platform == 'win32' else 'HOME']
home_pydeps = os.path.join(home, '.pydeps')
if os.path.exists(home_pydeps):
config_files.append(home_pydeps)
args = Arguments(config_files, debug=True, parents=[_p])
args.add('fname', kind="FNAME:input", help='filename')
args.add('-v', '--verbose', default=0, action='count', help="be more verbose (-vv, -vvv for more verbosity)")
args.add('-o', default=None, kind="FNAME:output", dest='output', metavar="file", help="write output to 'file'")
args.add('-T', default='svg', dest='format', help="output format (svg|png)")
args.add('--display', kind="FNAME:exe", default=None, help="program to use to display the graph (png or svg file depending on the T parameter)", metavar="PROGRAM")
args.add('--noshow', action='store_true', help="don't call external program to display graph")
args.add('--show-deps', action='store_true', help="show output of dependency analysis")
args.add('--show-raw-deps', action='store_true', help="show output of dependency analysis before removing skips")
args.add('--show-dot', action='store_true', help="show output of dot conversion")
args.add('--nodot', action='store_true', help="skip dot conversion")
args.add('--no-output', action='store_true', help="don't create .svg/.png file, implies --no-show (-t/-o will be ignored)")
args.add('--show-cycles', action='store_true', help="show only import cycles")
args.add('--debug-mf', default=0, type=int, metavar="INT", help="set the ModuleFinder.debug flag to this value")
args.add('--noise-level', default=200, type=int, metavar="INT", help="exclude sources or sinks with degree greater than noise-level")
args.add('--max-bacon', default=2, type=int, metavar="INT", help="exclude nodes that are more than n hops away (default=2, 0 -> infinite)")
args.add('--pylib', action='store_true', help="include python std lib modules")
args.add('--pylib-all', action='store_true', help="include python all std lib modules (incl. C modules)")
args.add('--include-missing', action='store_true', help="include modules that are not installed (or can't be found on sys.path)")
args.add('-x', '--exclude', default=[], nargs="+", metavar="FNAME", help="input files to skip")
args.add('--externals', action='store_true', help='create list of direct external dependencies')
args.add('--reverse', action='store_true', help="draw arrows to (instead of from) imported modules")
_args = args.parse_args(argv)
if _args.externals:
return dict(
T='svg', config=None, debug=False, display=None, exclude=[], externals=True,
fname=_args.fname, format='svg', max_bacon=10, no_config=False, nodot=False,
noise_level=200, noshow=True, output=None, pylib=False, pylib_all=False,
show=False, show_cycles=False, show_deps=False, show_dot=False,
show_raw_deps=False, verbose=0, include_missing=True, reverse=False,
)
_args.show = True
if _args.no_output:
_args.noshow = True
if _args.noshow:
_args.show = False
if _args.nodot and _args.show_cycles:
error("Can't use --nodot and --show-cycles together") # pragma: nocover
if _args.nodot:
_args.show_dot = False
if _args.max_bacon == 0:
_args.max_bacon = sys.maxsize
_args.format = getattr(_args, 'T', getattr(_args, 'format', None))
verbose = _mkverbose(max(_args.verbose, int(_args.debug)))
verbose(2, _args, '\n')
if _args.debug: # pragma: nocover
_args.verbose = 1
_args.show = True
_args.show_deps = True
_args.show_dot = True
return vars(_args) | Parse command line arguments, and return a dict. | Below is the the instruction that describes the task:
### Input:
Parse command line arguments, and return a dict.
### Response:
def parse_args(argv=()):
"""Parse command line arguments, and return a dict.
"""
_p, _args, argv = base_argparser(argv)
config_files = []
if not _args.no_config: # process config files
# extra config file specified with --config <fname> has highest precedence
if _args.config:
config_files.append(_args.config)
# .pydeps file specified in current directory is next
local_pydeps = os.path.join(os.getcwd(), '.pydeps')
if os.path.exists(local_pydeps):
config_files.append(local_pydeps)
# finally the .pydeps file in the the user's homedir
home = os.environ['USERPROFILE' if sys.platform == 'win32' else 'HOME']
home_pydeps = os.path.join(home, '.pydeps')
if os.path.exists(home_pydeps):
config_files.append(home_pydeps)
args = Arguments(config_files, debug=True, parents=[_p])
args.add('fname', kind="FNAME:input", help='filename')
args.add('-v', '--verbose', default=0, action='count', help="be more verbose (-vv, -vvv for more verbosity)")
args.add('-o', default=None, kind="FNAME:output", dest='output', metavar="file", help="write output to 'file'")
args.add('-T', default='svg', dest='format', help="output format (svg|png)")
args.add('--display', kind="FNAME:exe", default=None, help="program to use to display the graph (png or svg file depending on the T parameter)", metavar="PROGRAM")
args.add('--noshow', action='store_true', help="don't call external program to display graph")
args.add('--show-deps', action='store_true', help="show output of dependency analysis")
args.add('--show-raw-deps', action='store_true', help="show output of dependency analysis before removing skips")
args.add('--show-dot', action='store_true', help="show output of dot conversion")
args.add('--nodot', action='store_true', help="skip dot conversion")
args.add('--no-output', action='store_true', help="don't create .svg/.png file, implies --no-show (-t/-o will be ignored)")
args.add('--show-cycles', action='store_true', help="show only import cycles")
args.add('--debug-mf', default=0, type=int, metavar="INT", help="set the ModuleFinder.debug flag to this value")
args.add('--noise-level', default=200, type=int, metavar="INT", help="exclude sources or sinks with degree greater than noise-level")
args.add('--max-bacon', default=2, type=int, metavar="INT", help="exclude nodes that are more than n hops away (default=2, 0 -> infinite)")
args.add('--pylib', action='store_true', help="include python std lib modules")
args.add('--pylib-all', action='store_true', help="include python all std lib modules (incl. C modules)")
args.add('--include-missing', action='store_true', help="include modules that are not installed (or can't be found on sys.path)")
args.add('-x', '--exclude', default=[], nargs="+", metavar="FNAME", help="input files to skip")
args.add('--externals', action='store_true', help='create list of direct external dependencies')
args.add('--reverse', action='store_true', help="draw arrows to (instead of from) imported modules")
_args = args.parse_args(argv)
if _args.externals:
return dict(
T='svg', config=None, debug=False, display=None, exclude=[], externals=True,
fname=_args.fname, format='svg', max_bacon=10, no_config=False, nodot=False,
noise_level=200, noshow=True, output=None, pylib=False, pylib_all=False,
show=False, show_cycles=False, show_deps=False, show_dot=False,
show_raw_deps=False, verbose=0, include_missing=True, reverse=False,
)
_args.show = True
if _args.no_output:
_args.noshow = True
if _args.noshow:
_args.show = False
if _args.nodot and _args.show_cycles:
error("Can't use --nodot and --show-cycles together") # pragma: nocover
if _args.nodot:
_args.show_dot = False
if _args.max_bacon == 0:
_args.max_bacon = sys.maxsize
_args.format = getattr(_args, 'T', getattr(_args, 'format', None))
verbose = _mkverbose(max(_args.verbose, int(_args.debug)))
verbose(2, _args, '\n')
if _args.debug: # pragma: nocover
_args.verbose = 1
_args.show = True
_args.show_deps = True
_args.show_dot = True
return vars(_args) |
def _set_get_vnetwork_portgroups(self, v, load=False):
"""
Setter method for get_vnetwork_portgroups, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_portgroups (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_portgroups is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_portgroups() directly.
YANG Description: Shows discovered PortGroups
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_portgroups.get_vnetwork_portgroups, is_leaf=True, yang_name="get-vnetwork-portgroups", rest_name="get-vnetwork-portgroups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'pg-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_portgroups must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_portgroups.get_vnetwork_portgroups, is_leaf=True, yang_name="get-vnetwork-portgroups", rest_name="get-vnetwork-portgroups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'pg-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_portgroups = t
if hasattr(self, '_set'):
self._set() | Setter method for get_vnetwork_portgroups, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_portgroups (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_portgroups is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_portgroups() directly.
YANG Description: Shows discovered PortGroups | Below is the the instruction that describes the task:
### Input:
Setter method for get_vnetwork_portgroups, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_portgroups (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_portgroups is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_portgroups() directly.
YANG Description: Shows discovered PortGroups
### Response:
def _set_get_vnetwork_portgroups(self, v, load=False):
"""
Setter method for get_vnetwork_portgroups, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_portgroups (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_portgroups is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_portgroups() directly.
YANG Description: Shows discovered PortGroups
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_portgroups.get_vnetwork_portgroups, is_leaf=True, yang_name="get-vnetwork-portgroups", rest_name="get-vnetwork-portgroups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'pg-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_portgroups must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_portgroups.get_vnetwork_portgroups, is_leaf=True, yang_name="get-vnetwork-portgroups", rest_name="get-vnetwork-portgroups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'pg-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_portgroups = t
if hasattr(self, '_set'):
self._set() |
def get_random_value(field):
"""
Calls the dispatch method (``get_factory_func``) and passes the field
obj argument to the callable returned.
Returns:
random value depending on field type and constraints in the field
object
"""
func = get_factory_func(field)
if field.default is not None:
if callable(field.default):
return field.default()
return field.default
if field.choices:
return random.choice(field.choices)
return func(field) | Calls the dispatch method (``get_factory_func``) and passes the field
obj argument to the callable returned.
Returns:
random value depending on field type and constraints in the field
object | Below is the the instruction that describes the task:
### Input:
Calls the dispatch method (``get_factory_func``) and passes the field
obj argument to the callable returned.
Returns:
random value depending on field type and constraints in the field
object
### Response:
def get_random_value(field):
"""
Calls the dispatch method (``get_factory_func``) and passes the field
obj argument to the callable returned.
Returns:
random value depending on field type and constraints in the field
object
"""
func = get_factory_func(field)
if field.default is not None:
if callable(field.default):
return field.default()
return field.default
if field.choices:
return random.choice(field.choices)
return func(field) |
def get_shard_by_num(self, shard_num):
"""
get_shard_by_num returns the shard at index shard_num.
Keyword arguments:
shard_num -- The shard index
Returns a redis.StrictRedis connection or raises a ValueError.
"""
if shard_num < 0 or shard_num >= self.num_shards():
raise ValueError("requested invalid shard# {0}".format(shard_num))
return self._shards[shard_num] | get_shard_by_num returns the shard at index shard_num.
Keyword arguments:
shard_num -- The shard index
Returns a redis.StrictRedis connection or raises a ValueError. | Below is the the instruction that describes the task:
### Input:
get_shard_by_num returns the shard at index shard_num.
Keyword arguments:
shard_num -- The shard index
Returns a redis.StrictRedis connection or raises a ValueError.
### Response:
def get_shard_by_num(self, shard_num):
"""
get_shard_by_num returns the shard at index shard_num.
Keyword arguments:
shard_num -- The shard index
Returns a redis.StrictRedis connection or raises a ValueError.
"""
if shard_num < 0 or shard_num >= self.num_shards():
raise ValueError("requested invalid shard# {0}".format(shard_num))
return self._shards[shard_num] |
def return_item(self, item, priority):
"""Complete work on an item from ``check_out_item()``.
If this instance no longer owns ``item``, raise ``LostLease``.
If ``priority`` is None, the item is removed from the queue;
otherwise it is re-added with the specified priority. Any
locked items associated with this item are unlocked.
"""
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
script = conn.register_script("""
-- expired?
if redis.call("hget", KEYS[4], "i" .. ARGV[1]) ~= "w" .. ARGV[3]
then return -1 end
-- will no longer expire
redis.call("zrem", KEYS[2], ARGV[1])
-- update priority, readd to available list
if ARGV[2] == "None"
then
redis.call("hdel", KEYS[3], ARGV[1])
else
redis.call("hset", KEYS[3], ARGV[1], ARGV[2])
redis.call("zadd", KEYS[1], ARGV[2], ARGV[1])
end
-- release all reservations
local reservations = redis.call("smembers", KEYS[5])
for i = 1, #reservations do
local item = reservations[i]
local pri = redis.call("hget", KEYS[3], item)
redis.call("zadd", KEYS[1], pri, item)
end
-- clear out workers
redis.call("hdel", KEYS[4], "i" .. ARGV[1])
redis.call("hdel", KEYS[4], "w" .. ARGV[3])
return 0
""")
# work around python -> redis -> lua marshaling
if priority is None: priority = "None"
result = script(keys=[self._key_available(), self._key_expiration(),
self._key_priorities(), self._key_workers(),
self._key_reservations(item)],
args=[item, priority, self._get_worker_id(conn)])
if result == -1: raise LostLease(item)
return | Complete work on an item from ``check_out_item()``.
If this instance no longer owns ``item``, raise ``LostLease``.
If ``priority`` is None, the item is removed from the queue;
otherwise it is re-added with the specified priority. Any
locked items associated with this item are unlocked. | Below is the the instruction that describes the task:
### Input:
Complete work on an item from ``check_out_item()``.
If this instance no longer owns ``item``, raise ``LostLease``.
If ``priority`` is None, the item is removed from the queue;
otherwise it is re-added with the specified priority. Any
locked items associated with this item are unlocked.
### Response:
def return_item(self, item, priority):
"""Complete work on an item from ``check_out_item()``.
If this instance no longer owns ``item``, raise ``LostLease``.
If ``priority`` is None, the item is removed from the queue;
otherwise it is re-added with the specified priority. Any
locked items associated with this item are unlocked.
"""
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
script = conn.register_script("""
-- expired?
if redis.call("hget", KEYS[4], "i" .. ARGV[1]) ~= "w" .. ARGV[3]
then return -1 end
-- will no longer expire
redis.call("zrem", KEYS[2], ARGV[1])
-- update priority, readd to available list
if ARGV[2] == "None"
then
redis.call("hdel", KEYS[3], ARGV[1])
else
redis.call("hset", KEYS[3], ARGV[1], ARGV[2])
redis.call("zadd", KEYS[1], ARGV[2], ARGV[1])
end
-- release all reservations
local reservations = redis.call("smembers", KEYS[5])
for i = 1, #reservations do
local item = reservations[i]
local pri = redis.call("hget", KEYS[3], item)
redis.call("zadd", KEYS[1], pri, item)
end
-- clear out workers
redis.call("hdel", KEYS[4], "i" .. ARGV[1])
redis.call("hdel", KEYS[4], "w" .. ARGV[3])
return 0
""")
# work around python -> redis -> lua marshaling
if priority is None: priority = "None"
result = script(keys=[self._key_available(), self._key_expiration(),
self._key_priorities(), self._key_workers(),
self._key_reservations(item)],
args=[item, priority, self._get_worker_id(conn)])
if result == -1: raise LostLease(item)
return |
def view_from_url(named_url): # noqa
"""
Finds and returns the view class from a named url
"""
# code below is `stolen` from django's reverse method.
resolver = get_resolver(get_urlconf())
if type(named_url) in (list, tuple):
named_url = named_url[0]
parts = named_url.split(":")
parts.reverse()
view = parts[0]
path = parts[1:]
current_path = None
resolved_path = []
ns_pattern = ""
ns_converters = {}
# if it's a local url permission already given, so we just return true
if named_url.startswith("#"):
class LocalUrlDummyView:
@staticmethod
def has_permission(user):
return True
return LocalUrlDummyView
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
try:
ns_converters.update(resolver.pattern.converters)
except Exception:
pass
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'"
% (key, ":".join(resolved_path))
)
else:
raise NoReverseMatch("%s is not a registered namespace" % key)
if ns_pattern:
try:
resolver = get_ns_resolver(
ns_pattern, resolver, tuple(ns_converters.items())
)
except Exception:
resolver = get_ns_resolver(ns_pattern, resolver)
# custom code, get view from reverse_dict
reverse_dict = resolver.reverse_dict.dict()
for key, url_obj in reverse_dict.items():
if url_obj == reverse_dict[view] and key != view:
module = importlib.import_module(key.__module__)
return getattr(module, key.__name__) | Finds and returns the view class from a named url | Below is the the instruction that describes the task:
### Input:
Finds and returns the view class from a named url
### Response:
def view_from_url(named_url): # noqa
"""
Finds and returns the view class from a named url
"""
# code below is `stolen` from django's reverse method.
resolver = get_resolver(get_urlconf())
if type(named_url) in (list, tuple):
named_url = named_url[0]
parts = named_url.split(":")
parts.reverse()
view = parts[0]
path = parts[1:]
current_path = None
resolved_path = []
ns_pattern = ""
ns_converters = {}
# if it's a local url permission already given, so we just return true
if named_url.startswith("#"):
class LocalUrlDummyView:
@staticmethod
def has_permission(user):
return True
return LocalUrlDummyView
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
try:
ns_converters.update(resolver.pattern.converters)
except Exception:
pass
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'"
% (key, ":".join(resolved_path))
)
else:
raise NoReverseMatch("%s is not a registered namespace" % key)
if ns_pattern:
try:
resolver = get_ns_resolver(
ns_pattern, resolver, tuple(ns_converters.items())
)
except Exception:
resolver = get_ns_resolver(ns_pattern, resolver)
# custom code, get view from reverse_dict
reverse_dict = resolver.reverse_dict.dict()
for key, url_obj in reverse_dict.items():
if url_obj == reverse_dict[view] and key != view:
module = importlib.import_module(key.__module__)
return getattr(module, key.__name__) |
def _decode_response(response):
""" Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
"""
content = response.content.strip()
logging.debug(content[:512])
response.raise_for_status()
if content.startswith(GERRIT_MAGIC_JSON_PREFIX):
content = content[len(GERRIT_MAGIC_JSON_PREFIX):]
try:
return json.loads(content)
except ValueError:
logging.error('Invalid json content: %s' % content)
raise | Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code. | Below is the the instruction that describes the task:
### Input:
Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
### Response:
def _decode_response(response):
""" Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
"""
content = response.content.strip()
logging.debug(content[:512])
response.raise_for_status()
if content.startswith(GERRIT_MAGIC_JSON_PREFIX):
content = content[len(GERRIT_MAGIC_JSON_PREFIX):]
try:
return json.loads(content)
except ValueError:
logging.error('Invalid json content: %s' % content)
raise |
def _put_resource(self, url, body):
"""
When I Work PUT method.
"""
headers = {"Content-Type": "application/json",
"Accept": "application/json"}
if self.token:
headers["W-Token"] = "%s" % self.token
response = WhenIWork_DAO().putURL(url, headers, json.dumps(body))
if not (response.status == 200 or response.status == 201 or
response.status == 204):
raise DataFailureException(url, response.status, response.data)
return json.loads(response.data) | When I Work PUT method. | Below is the the instruction that describes the task:
### Input:
When I Work PUT method.
### Response:
def _put_resource(self, url, body):
"""
When I Work PUT method.
"""
headers = {"Content-Type": "application/json",
"Accept": "application/json"}
if self.token:
headers["W-Token"] = "%s" % self.token
response = WhenIWork_DAO().putURL(url, headers, json.dumps(body))
if not (response.status == 200 or response.status == 201 or
response.status == 204):
raise DataFailureException(url, response.status, response.data)
return json.loads(response.data) |
def GE(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an GE instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalGreaterEqual(classical_reg1, classical_reg2, classical_reg3) | Produce an GE instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterEqual instance. | Below is the the instruction that describes the task:
### Input:
Produce an GE instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterEqual instance.
### Response:
def GE(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an GE instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalGreaterEqual(classical_reg1, classical_reg2, classical_reg3) |
def init_with_instance(self, instance):
"""Initialize with an instance object
"""
self._uid = api.get_uid(instance)
self._brain = None
self._catalog = self.get_catalog_for(instance)
self._instance = instance | Initialize with an instance object | Below is the the instruction that describes the task:
### Input:
Initialize with an instance object
### Response:
def init_with_instance(self, instance):
"""Initialize with an instance object
"""
self._uid = api.get_uid(instance)
self._brain = None
self._catalog = self.get_catalog_for(instance)
self._instance = instance |
def extract_concepts(self, sentences=None, ids=None,
composite_phrase=4, filename=None,
file_format='sldi', allow_acronym_variants=False,
word_sense_disambiguation=False, allow_large_n=False,
strict_model=False, relaxed_model=False,
allow_overmatches=False, allow_concept_gaps=False,
term_processing=False, no_derivational_variants=False,
derivational_variants=False, ignore_word_order=False,
unique_acronym_variants=False,
prefer_multiple_concepts=False,
ignore_stop_phrases=False, compute_all_mappings=False,
mm_data_version=False, exclude_sources=[],
restrict_to_sources=[], restrict_to_sts=[], exclude_sts=[]):
""" extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMap.
Supported Options:
Composite Phrase -Q
Word Sense Disambiguation -y
use strict model -A
use relaxed model -C
allow large N -l
allow overmatches -o
allow concept gaps -g
term processing -z
No Derivational Variants -d
All Derivational Variants -D
Ignore Word Order -i
Allow Acronym Variants -a
Unique Acronym Variants -u
Prefer Multiple Concepts -Y
Ignore Stop Phrases -K
Compute All Mappings -b
MM Data Version -V
Exclude Sources -e
Restrict to Sources -R
Restrict to Semantic Types -J
Exclude Semantic Types -k
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
"""
if allow_acronym_variants and unique_acronym_variants:
raise ValueError("You can't use both allow_acronym_variants and "
"unique_acronym_variants.")
if (sentences is not None and filename is not None) or \
(sentences is None and filename is None):
raise ValueError("You must either pass a list of sentences "
"OR a filename.")
if file_format not in ['sldi','sldiID']:
raise ValueError("file_format must be either sldi or sldiID")
input_file = None
if sentences is not None:
input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False)
else:
input_file = open(filename, 'r')
output_file = tempfile.NamedTemporaryFile(mode="r", delete=False)
error = None
try:
if sentences is not None:
if ids is not None:
for identifier, sentence in zip(ids, sentences):
input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
command = [self.metamap_filename, '-N']
command.append('-Q')
command.append(str(composite_phrase))
if mm_data_version is not False:
if mm_data_version not in ['Base', 'USAbase', 'NLM']:
raise ValueError("mm_data_version must be Base, USAbase, or NLM.")
command.append('-V')
command.append(str(mm_data_version))
if word_sense_disambiguation:
command.append('-y')
if strict_model:
command.append('-A')
if relaxed_model:
command.append('-C')
if allow_large_n:
command.append('-l')
if allow_overmatches:
command.append('-o')
if allow_concept_gaps:
command.append('-g')
if term_processing:
command.append('-z')
if no_derivational_variants:
command.append('-d')
if derivational_variants:
command.append('-D')
if ignore_word_order:
command.append('-i')
if allow_acronym_variants:
command.append('-a')
if unique_acronym_variants:
command.append('-u')
if prefer_multiple_concepts:
command.append('-Y')
if ignore_stop_phrases:
command.append('-K')
if compute_all_mappings:
command.append('-b')
if len(exclude_sources) > 0:
command.append('-e')
command.append(str(','.join(exclude_sources)))
if len(restrict_to_sources) > 0:
command.append('-R')
command.append(str(','.join(restrict_to_sources)))
if len(restrict_to_sts) > 0:
command.append('-J')
command.append(str(','.join(restrict_to_sts)))
if len(exclude_sts) > 0:
command.append('-k')
command.append(str(','.join(exclude_sts)))
if ids is not None or (file_format == 'sldiID' and
sentences is None):
command.append('--sldiID')
else:
command.append('--sldi')
command.append(input_file.name)
command.append(output_file.name)
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while metamap_process.poll() is None:
stdout = str(metamap_process.stdout.readline())
if 'ERROR' in stdout:
metamap_process.terminate()
error = stdout.rstrip()
output = str(output_file.read())
finally:
if sentences is not None:
os.remove(input_file.name)
else:
input_file.close()
os.remove(output_file.name)
concepts = Corpus.load(output.splitlines())
return (concepts, error) | extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMap.
Supported Options:
Composite Phrase -Q
Word Sense Disambiguation -y
use strict model -A
use relaxed model -C
allow large N -l
allow overmatches -o
allow concept gaps -g
term processing -z
No Derivational Variants -d
All Derivational Variants -D
Ignore Word Order -i
Allow Acronym Variants -a
Unique Acronym Variants -u
Prefer Multiple Concepts -Y
Ignore Stop Phrases -K
Compute All Mappings -b
MM Data Version -V
Exclude Sources -e
Restrict to Sources -R
Restrict to Semantic Types -J
Exclude Semantic Types -k
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found. | Below is the the instruction that describes the task:
### Input:
extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMap.
Supported Options:
Composite Phrase -Q
Word Sense Disambiguation -y
use strict model -A
use relaxed model -C
allow large N -l
allow overmatches -o
allow concept gaps -g
term processing -z
No Derivational Variants -d
All Derivational Variants -D
Ignore Word Order -i
Allow Acronym Variants -a
Unique Acronym Variants -u
Prefer Multiple Concepts -Y
Ignore Stop Phrases -K
Compute All Mappings -b
MM Data Version -V
Exclude Sources -e
Restrict to Sources -R
Restrict to Semantic Types -J
Exclude Semantic Types -k
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
### Response:
def extract_concepts(self, sentences=None, ids=None,
composite_phrase=4, filename=None,
file_format='sldi', allow_acronym_variants=False,
word_sense_disambiguation=False, allow_large_n=False,
strict_model=False, relaxed_model=False,
allow_overmatches=False, allow_concept_gaps=False,
term_processing=False, no_derivational_variants=False,
derivational_variants=False, ignore_word_order=False,
unique_acronym_variants=False,
prefer_multiple_concepts=False,
ignore_stop_phrases=False, compute_all_mappings=False,
mm_data_version=False, exclude_sources=[],
restrict_to_sources=[], restrict_to_sts=[], exclude_sts=[]):
""" extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMap.
Supported Options:
Composite Phrase -Q
Word Sense Disambiguation -y
use strict model -A
use relaxed model -C
allow large N -l
allow overmatches -o
allow concept gaps -g
term processing -z
No Derivational Variants -d
All Derivational Variants -D
Ignore Word Order -i
Allow Acronym Variants -a
Unique Acronym Variants -u
Prefer Multiple Concepts -Y
Ignore Stop Phrases -K
Compute All Mappings -b
MM Data Version -V
Exclude Sources -e
Restrict to Sources -R
Restrict to Semantic Types -J
Exclude Semantic Types -k
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
"""
if allow_acronym_variants and unique_acronym_variants:
raise ValueError("You can't use both allow_acronym_variants and "
"unique_acronym_variants.")
if (sentences is not None and filename is not None) or \
(sentences is None and filename is None):
raise ValueError("You must either pass a list of sentences "
"OR a filename.")
if file_format not in ['sldi','sldiID']:
raise ValueError("file_format must be either sldi or sldiID")
input_file = None
if sentences is not None:
input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False)
else:
input_file = open(filename, 'r')
output_file = tempfile.NamedTemporaryFile(mode="r", delete=False)
error = None
try:
if sentences is not None:
if ids is not None:
for identifier, sentence in zip(ids, sentences):
input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
command = [self.metamap_filename, '-N']
command.append('-Q')
command.append(str(composite_phrase))
if mm_data_version is not False:
if mm_data_version not in ['Base', 'USAbase', 'NLM']:
raise ValueError("mm_data_version must be Base, USAbase, or NLM.")
command.append('-V')
command.append(str(mm_data_version))
if word_sense_disambiguation:
command.append('-y')
if strict_model:
command.append('-A')
if relaxed_model:
command.append('-C')
if allow_large_n:
command.append('-l')
if allow_overmatches:
command.append('-o')
if allow_concept_gaps:
command.append('-g')
if term_processing:
command.append('-z')
if no_derivational_variants:
command.append('-d')
if derivational_variants:
command.append('-D')
if ignore_word_order:
command.append('-i')
if allow_acronym_variants:
command.append('-a')
if unique_acronym_variants:
command.append('-u')
if prefer_multiple_concepts:
command.append('-Y')
if ignore_stop_phrases:
command.append('-K')
if compute_all_mappings:
command.append('-b')
if len(exclude_sources) > 0:
command.append('-e')
command.append(str(','.join(exclude_sources)))
if len(restrict_to_sources) > 0:
command.append('-R')
command.append(str(','.join(restrict_to_sources)))
if len(restrict_to_sts) > 0:
command.append('-J')
command.append(str(','.join(restrict_to_sts)))
if len(exclude_sts) > 0:
command.append('-k')
command.append(str(','.join(exclude_sts)))
if ids is not None or (file_format == 'sldiID' and
sentences is None):
command.append('--sldiID')
else:
command.append('--sldi')
command.append(input_file.name)
command.append(output_file.name)
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while metamap_process.poll() is None:
stdout = str(metamap_process.stdout.readline())
if 'ERROR' in stdout:
metamap_process.terminate()
error = stdout.rstrip()
output = str(output_file.read())
finally:
if sentences is not None:
os.remove(input_file.name)
else:
input_file.close()
os.remove(output_file.name)
concepts = Corpus.load(output.splitlines())
return (concepts, error) |
def create_blueprint(state, import_name):
"""Creates the security extension blueprint"""
bp = Blueprint(state.blueprint_name, import_name,
url_prefix=state.url_prefix,
subdomain=state.subdomain,
template_folder='templates')
bp.route(state.logout_url, endpoint='logout')(logout)
if state.passwordless:
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(send_login)
bp.route(state.login_url + slash_url_suffix(state.login_url,
'<token>'),
endpoint='token_login')(token_login)
else:
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(login)
if state.registerable:
bp.route(state.register_url,
methods=['GET', 'POST'],
endpoint='register')(register)
if state.recoverable:
bp.route(state.reset_url,
methods=['GET', 'POST'],
endpoint='forgot_password')(forgot_password)
bp.route(state.reset_url + slash_url_suffix(state.reset_url,
'<token>'),
methods=['GET', 'POST'],
endpoint='reset_password')(reset_password)
if state.changeable:
bp.route(state.change_url,
methods=['GET', 'POST'],
endpoint='change_password')(change_password)
if state.confirmable:
bp.route(state.confirm_url,
methods=['GET', 'POST'],
endpoint='send_confirmation')(send_confirmation)
bp.route(state.confirm_url + slash_url_suffix(state.confirm_url,
'<token>'),
methods=['GET', 'POST'],
endpoint='confirm_email')(confirm_email)
return bp | Creates the security extension blueprint | Below is the the instruction that describes the task:
### Input:
Creates the security extension blueprint
### Response:
def create_blueprint(state, import_name):
"""Creates the security extension blueprint"""
bp = Blueprint(state.blueprint_name, import_name,
url_prefix=state.url_prefix,
subdomain=state.subdomain,
template_folder='templates')
bp.route(state.logout_url, endpoint='logout')(logout)
if state.passwordless:
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(send_login)
bp.route(state.login_url + slash_url_suffix(state.login_url,
'<token>'),
endpoint='token_login')(token_login)
else:
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(login)
if state.registerable:
bp.route(state.register_url,
methods=['GET', 'POST'],
endpoint='register')(register)
if state.recoverable:
bp.route(state.reset_url,
methods=['GET', 'POST'],
endpoint='forgot_password')(forgot_password)
bp.route(state.reset_url + slash_url_suffix(state.reset_url,
'<token>'),
methods=['GET', 'POST'],
endpoint='reset_password')(reset_password)
if state.changeable:
bp.route(state.change_url,
methods=['GET', 'POST'],
endpoint='change_password')(change_password)
if state.confirmable:
bp.route(state.confirm_url,
methods=['GET', 'POST'],
endpoint='send_confirmation')(send_confirmation)
bp.route(state.confirm_url + slash_url_suffix(state.confirm_url,
'<token>'),
methods=['GET', 'POST'],
endpoint='confirm_email')(confirm_email)
return bp |
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length | Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7 | Below is the the instruction that describes the task:
### Input:
Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
### Response:
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length |
def MOVS(cpu, dest, src):
"""
Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
base, size, ty = cpu.get_descriptor(cpu.DS)
src_addr = src.address() + base
dest_addr = dest.address() + base
src_reg = src.mem.base
dest_reg = dest.mem.base
size = dest.size
# Copy the data
dest.write(src.read())
#Advance EDI/ESI pointers
increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8)
cpu.write_register(src_reg, cpu.read_register(src_reg) + increment)
cpu.write_register(dest_reg, cpu.read_register(dest_reg) + increment) | Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand. | Below is the the instruction that describes the task:
### Input:
Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
### Response:
def MOVS(cpu, dest, src):
"""
Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
base, size, ty = cpu.get_descriptor(cpu.DS)
src_addr = src.address() + base
dest_addr = dest.address() + base
src_reg = src.mem.base
dest_reg = dest.mem.base
size = dest.size
# Copy the data
dest.write(src.read())
#Advance EDI/ESI pointers
increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8)
cpu.write_register(src_reg, cpu.read_register(src_reg) + increment)
cpu.write_register(dest_reg, cpu.read_register(dest_reg) + increment) |
def pop(self, index=None):
"""
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# get extensible info
cycle_start, cycle_len, patterns = self.get_extensible_info()
# remove extensible fields
fields = self.clear_extensible_fields()
# pop
serialized_value = fields.pop(index-cycle_start)
# add remaining
self.add_fields(*fields)
return serialized_value | This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field | Below is the the instruction that describes the task:
### Input:
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
### Response:
def pop(self, index=None):
"""
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# get extensible info
cycle_start, cycle_len, patterns = self.get_extensible_info()
# remove extensible fields
fields = self.clear_extensible_fields()
# pop
serialized_value = fields.pop(index-cycle_start)
# add remaining
self.add_fields(*fields)
return serialized_value |
def to_dict(self):
"""
Returns a Python dictionary of the TDigest and internal Centroid values.
Or use centroids_to_list() for a list of only the Centroid values.
"""
return {'n':self.n, 'delta':self.delta, 'K':self.K, 'centroids':self.centroids_to_list()} | Returns a Python dictionary of the TDigest and internal Centroid values.
Or use centroids_to_list() for a list of only the Centroid values. | Below is the the instruction that describes the task:
### Input:
Returns a Python dictionary of the TDigest and internal Centroid values.
Or use centroids_to_list() for a list of only the Centroid values.
### Response:
def to_dict(self):
"""
Returns a Python dictionary of the TDigest and internal Centroid values.
Or use centroids_to_list() for a list of only the Centroid values.
"""
return {'n':self.n, 'delta':self.delta, 'K':self.K, 'centroids':self.centroids_to_list()} |
def _convert_types(self):
"""
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
"""
self.fars = np.array(self.fars)
self.parking_rates = np.array([self.parking_rates[use] for use in self.uses])
self.res_ratios = {}
assert len(self.uses) == len(self.residential_uses)
for k, v in self.forms.items():
self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses])
# normalize if not already
self.forms[k] /= self.forms[k].sum()
self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum()
self.costs = np.transpose(np.array([self.costs[use] for use in self.uses])) | convert lists and dictionaries that are useful for users to
np vectors that are usable by machines | Below is the the instruction that describes the task:
### Input:
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
### Response:
def _convert_types(self):
"""
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
"""
self.fars = np.array(self.fars)
self.parking_rates = np.array([self.parking_rates[use] for use in self.uses])
self.res_ratios = {}
assert len(self.uses) == len(self.residential_uses)
for k, v in self.forms.items():
self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses])
# normalize if not already
self.forms[k] /= self.forms[k].sum()
self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum()
self.costs = np.transpose(np.array([self.costs[use] for use in self.uses])) |
def may_be_emitted(self):
"""return True if message may be emitted using the current interpreter"""
if self.minversion is not None and self.minversion > sys.version_info:
return False
if self.maxversion is not None and self.maxversion <= sys.version_info:
return False
return True | return True if message may be emitted using the current interpreter | Below is the the instruction that describes the task:
### Input:
return True if message may be emitted using the current interpreter
### Response:
def may_be_emitted(self):
"""return True if message may be emitted using the current interpreter"""
if self.minversion is not None and self.minversion > sys.version_info:
return False
if self.maxversion is not None and self.maxversion <= sys.version_info:
return False
return True |
def get_workflow_id_and_project(path):
'''
:param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message.
'''
project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected='entity')
try:
if entity_result is None or not entity_result['id'].startswith('workflow-'):
raise DXCLIError('Could not resolve "' + path + '" to a workflow object')
except:
err_exit()
return entity_result['id'], project | :param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message. | Below is the the instruction that describes the task:
### Input:
:param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message.
### Response:
def get_workflow_id_and_project(path):
'''
:param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message.
'''
project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected='entity')
try:
if entity_result is None or not entity_result['id'].startswith('workflow-'):
raise DXCLIError('Could not resolve "' + path + '" to a workflow object')
except:
err_exit()
return entity_result['id'], project |
def set_cache(
self,
instance=None,
translation=None,
language=None,
field_name=None,
field_value=None,
):
"""
Add a new translation into the cache.
"""
if instance is not None and translation is not None:
cached_obj = CachedTranslation.from_object(translation)
instance._linguist_translations[translation.field_name][
translation.language
] = cached_obj
return cached_obj
if instance is None:
instance = self.instance
cached_obj = self.get_cache(
instance,
translation=translation,
field_value=field_value,
language=language,
field_name=field_name,
)
if field_value is None and cached_obj.field_value:
cached_obj.deleted = True
if field_value != cached_obj.field_value:
cached_obj.has_changed = True
cached_obj.field_value = field_value
return cached_obj | Add a new translation into the cache. | Below is the the instruction that describes the task:
### Input:
Add a new translation into the cache.
### Response:
def set_cache(
self,
instance=None,
translation=None,
language=None,
field_name=None,
field_value=None,
):
"""
Add a new translation into the cache.
"""
if instance is not None and translation is not None:
cached_obj = CachedTranslation.from_object(translation)
instance._linguist_translations[translation.field_name][
translation.language
] = cached_obj
return cached_obj
if instance is None:
instance = self.instance
cached_obj = self.get_cache(
instance,
translation=translation,
field_value=field_value,
language=language,
field_name=field_name,
)
if field_value is None and cached_obj.field_value:
cached_obj.deleted = True
if field_value != cached_obj.field_value:
cached_obj.has_changed = True
cached_obj.field_value = field_value
return cached_obj |
def __setUserMinimumSize( self, section, oldSize, newSize ):
"""
Records the user minimum size for a column.
:param section | <int>
oldSize | <int>
newSize | <int>
"""
if self.isVisible():
self._columnMinimums[section] = newSize | Records the user minimum size for a column.
:param section | <int>
oldSize | <int>
newSize | <int> | Below is the the instruction that describes the task:
### Input:
Records the user minimum size for a column.
:param section | <int>
oldSize | <int>
newSize | <int>
### Response:
def __setUserMinimumSize( self, section, oldSize, newSize ):
"""
Records the user minimum size for a column.
:param section | <int>
oldSize | <int>
newSize | <int>
"""
if self.isVisible():
self._columnMinimums[section] = newSize |
def get_parameter(self, name):
"""
Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/parameters{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.ParameterInfo()
message.ParseFromString(response.content)
return Parameter(message) | Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter | Below is the the instruction that describes the task:
### Input:
Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter
### Response:
def get_parameter(self, name):
"""
Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/parameters{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.ParameterInfo()
message.ParseFromString(response.content)
return Parameter(message) |
def gettree(self, key, create = False):
"""
Get a subtree node from the key (path relative to this node)
"""
tree, _ = self._getsubitem(key + '.tmp', create)
return tree | Get a subtree node from the key (path relative to this node) | Below is the the instruction that describes the task:
### Input:
Get a subtree node from the key (path relative to this node)
### Response:
def gettree(self, key, create = False):
"""
Get a subtree node from the key (path relative to this node)
"""
tree, _ = self._getsubitem(key + '.tmp', create)
return tree |
def insertData(self, tablename, list_value_pairs, list_SQLCMD_pairs=None):
"""insert data into table
- ID: identifier of the updated value
- list_value_pairs: contains the table field ID and the according value
- list_SQLCMD_pairs: contains the table field ID and a SQL command
"""
fields = self._getFieldsInDB(tablename)
lst_field = []
lst_value = []
# normal field-value-pairs
for pair in list_value_pairs:
if pair[0] in fields:
lst_field.append( pair[0] )
lst_value.append( '"%s"' % pair[1] )
else:
print "err: field %s can't be found in the table" % pair[0]
return False
# field-SQL-command-pairs: the only difference is the missing double quotes in the SQL command
if list_SQLCMD_pairs != None:
for pair in list_SQLCMD_pairs:
if pair[0] in fields:
lst_field.append( pair[0] )
lst_value.append( pair[1] )
else:
print "err: field %s can't be found in the table" % pair[0]
return False
# build the command
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tablename, join(lst_field, ','), join(lst_value, ',') )
self.execQuery( SQL )
return True | insert data into table
- ID: identifier of the updated value
- list_value_pairs: contains the table field ID and the according value
- list_SQLCMD_pairs: contains the table field ID and a SQL command | Below is the the instruction that describes the task:
### Input:
insert data into table
- ID: identifier of the updated value
- list_value_pairs: contains the table field ID and the according value
- list_SQLCMD_pairs: contains the table field ID and a SQL command
### Response:
def insertData(self, tablename, list_value_pairs, list_SQLCMD_pairs=None):
"""insert data into table
- ID: identifier of the updated value
- list_value_pairs: contains the table field ID and the according value
- list_SQLCMD_pairs: contains the table field ID and a SQL command
"""
fields = self._getFieldsInDB(tablename)
lst_field = []
lst_value = []
# normal field-value-pairs
for pair in list_value_pairs:
if pair[0] in fields:
lst_field.append( pair[0] )
lst_value.append( '"%s"' % pair[1] )
else:
print "err: field %s can't be found in the table" % pair[0]
return False
# field-SQL-command-pairs: the only difference is the missing double quotes in the SQL command
if list_SQLCMD_pairs != None:
for pair in list_SQLCMD_pairs:
if pair[0] in fields:
lst_field.append( pair[0] )
lst_value.append( pair[1] )
else:
print "err: field %s can't be found in the table" % pair[0]
return False
# build the command
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tablename, join(lst_field, ','), join(lst_value, ',') )
self.execQuery( SQL )
return True |
def write(self, msg):
""" This method the debugger uses to write. In contrast to
writeline, no newline is added to the end to `str'.
"""
if self.state == 'active':
if not self.output:
self.output = open(self.out_name, 'w')
pass
pass
else:
raise EOFError
self.output.write(msg)
if self.flush_after_write: self.flush()
return | This method the debugger uses to write. In contrast to
writeline, no newline is added to the end to `str'. | Below is the the instruction that describes the task:
### Input:
This method the debugger uses to write. In contrast to
writeline, no newline is added to the end to `str'.
### Response:
def write(self, msg):
""" This method the debugger uses to write. In contrast to
writeline, no newline is added to the end to `str'.
"""
if self.state == 'active':
if not self.output:
self.output = open(self.out_name, 'w')
pass
pass
else:
raise EOFError
self.output.write(msg)
if self.flush_after_write: self.flush()
return |
def allPolygons(self):
"""
Return a list of all polygons in this BSP tree.
"""
polygons = self.polygons[:]
if self.front:
polygons.extend(self.front.allPolygons())
if self.back:
polygons.extend(self.back.allPolygons())
return polygons | Return a list of all polygons in this BSP tree. | Below is the the instruction that describes the task:
### Input:
Return a list of all polygons in this BSP tree.
### Response:
def allPolygons(self):
"""
Return a list of all polygons in this BSP tree.
"""
polygons = self.polygons[:]
if self.front:
polygons.extend(self.front.allPolygons())
if self.back:
polygons.extend(self.back.allPolygons())
return polygons |
def create_channels_for_related_fields_in_model(model_class):
"""
Create channel for the fields of the model, the channel name can be got by calling get_ajax_config_for_relation_fields
:param model_class:
:return:
"""
need_to_create_channel = []
for field in enum_model_fields(model_class):
if type(field) in get_relation_field_types():
if field.related_model == 'self':
need_to_create_channel.append(model_class)
elif field.related_field.model not in need_to_create_channel:
need_to_create_channel.append(field.related_field.model)
for field in enum_model_many_to_many(model_class):
if type(field) in get_relation_field_types():
if field.related_model not in need_to_create_channel:
need_to_create_channel.append(field.related_model)
for field_model_class in need_to_create_channel:
if class_name_to_low_case(field_model_class.__name__) not in registry._registry:
register_channel(field_model_class) | Create channel for the fields of the model, the channel name can be got by calling get_ajax_config_for_relation_fields
:param model_class:
:return: | Below is the the instruction that describes the task:
### Input:
Create channel for the fields of the model, the channel name can be got by calling get_ajax_config_for_relation_fields
:param model_class:
:return:
### Response:
def create_channels_for_related_fields_in_model(model_class):
"""
Create channel for the fields of the model, the channel name can be got by calling get_ajax_config_for_relation_fields
:param model_class:
:return:
"""
need_to_create_channel = []
for field in enum_model_fields(model_class):
if type(field) in get_relation_field_types():
if field.related_model == 'self':
need_to_create_channel.append(model_class)
elif field.related_field.model not in need_to_create_channel:
need_to_create_channel.append(field.related_field.model)
for field in enum_model_many_to_many(model_class):
if type(field) in get_relation_field_types():
if field.related_model not in need_to_create_channel:
need_to_create_channel.append(field.related_model)
for field_model_class in need_to_create_channel:
if class_name_to_low_case(field_model_class.__name__) not in registry._registry:
register_channel(field_model_class) |
def log(self, level, msg, *args, **kwargs):
"""Log a message and inform all clients.
Parameters
----------
level : logging level constant
The level to log the message at.
msg : str
The text format for the log message.
args : list of objects
Arguments to pass to log format string. Final message text is
created using: msg % args.
kwargs : additional keyword parameters
Allowed keywords are 'name' and 'timestamp'. The name is the name
of the logger to log the message to. If not given the name defaults
to the root logger. The timestamp is a float in seconds. If not
given the timestamp defaults to the current time.
"""
timestamp = kwargs.get("timestamp")
python_msg = msg
if self._python_logger is not None:
if timestamp is not None:
python_msg = ' '.join((
'katcp timestamp: %r' % timestamp,
python_msg))
self._python_logger.log(self.PYTHON_LEVEL[level], python_msg, *args)
if level >= self._log_level:
name = kwargs.get("name")
if name is None:
name = self._root_logger_name
try:
inform_msg = msg % args
except TypeError:
# Catch the "not enough arguments for format string" exception.
inform_msg = "{} {}".format(
msg,
args if args else '').strip()
self._device_server.mass_inform(
self._device_server.create_log_inform(
self.level_name(level),
inform_msg,
name,
timestamp=timestamp)) | Log a message and inform all clients.
Parameters
----------
level : logging level constant
The level to log the message at.
msg : str
The text format for the log message.
args : list of objects
Arguments to pass to log format string. Final message text is
created using: msg % args.
kwargs : additional keyword parameters
Allowed keywords are 'name' and 'timestamp'. The name is the name
of the logger to log the message to. If not given the name defaults
to the root logger. The timestamp is a float in seconds. If not
given the timestamp defaults to the current time. | Below is the the instruction that describes the task:
### Input:
Log a message and inform all clients.
Parameters
----------
level : logging level constant
The level to log the message at.
msg : str
The text format for the log message.
args : list of objects
Arguments to pass to log format string. Final message text is
created using: msg % args.
kwargs : additional keyword parameters
Allowed keywords are 'name' and 'timestamp'. The name is the name
of the logger to log the message to. If not given the name defaults
to the root logger. The timestamp is a float in seconds. If not
given the timestamp defaults to the current time.
### Response:
def log(self, level, msg, *args, **kwargs):
"""Log a message and inform all clients.
Parameters
----------
level : logging level constant
The level to log the message at.
msg : str
The text format for the log message.
args : list of objects
Arguments to pass to log format string. Final message text is
created using: msg % args.
kwargs : additional keyword parameters
Allowed keywords are 'name' and 'timestamp'. The name is the name
of the logger to log the message to. If not given the name defaults
to the root logger. The timestamp is a float in seconds. If not
given the timestamp defaults to the current time.
"""
timestamp = kwargs.get("timestamp")
python_msg = msg
if self._python_logger is not None:
if timestamp is not None:
python_msg = ' '.join((
'katcp timestamp: %r' % timestamp,
python_msg))
self._python_logger.log(self.PYTHON_LEVEL[level], python_msg, *args)
if level >= self._log_level:
name = kwargs.get("name")
if name is None:
name = self._root_logger_name
try:
inform_msg = msg % args
except TypeError:
# Catch the "not enough arguments for format string" exception.
inform_msg = "{} {}".format(
msg,
args if args else '').strip()
self._device_server.mass_inform(
self._device_server.create_log_inform(
self.level_name(level),
inform_msg,
name,
timestamp=timestamp)) |
def mw(self):
"""Molecular weight"""
m = P_TAB[self.symbol].get('std_weight')
mh = P_TAB['H'].get('std_weight')
return m + mh * self.H_count | Molecular weight | Below is the the instruction that describes the task:
### Input:
Molecular weight
### Response:
def mw(self):
"""Molecular weight"""
m = P_TAB[self.symbol].get('std_weight')
mh = P_TAB['H'].get('std_weight')
return m + mh * self.H_count |
def ReadLine(self, file_object):
"""Reads a line.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the lines buffer.
"""
line, _, self.lines = self.lines.partition('\n')
if not line:
self.ReadLines(file_object)
line, _, self.lines = self.lines.partition('\n')
return line | Reads a line.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the lines buffer. | Below is the the instruction that describes the task:
### Input:
Reads a line.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the lines buffer.
### Response:
def ReadLine(self, file_object):
"""Reads a line.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the lines buffer.
"""
line, _, self.lines = self.lines.partition('\n')
if not line:
self.ReadLines(file_object)
line, _, self.lines = self.lines.partition('\n')
return line |
def ranked_attributes(self):
"""
Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray
"""
matrix = javabridge.call(self.jobject, "rankedAttributes", "()[[D")
if matrix is None:
return None
else:
return arrays.double_matrix_to_ndarray(matrix) | Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray | Below is the the instruction that describes the task:
### Input:
Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray
### Response:
def ranked_attributes(self):
"""
Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray
"""
matrix = javabridge.call(self.jobject, "rankedAttributes", "()[[D")
if matrix is None:
return None
else:
return arrays.double_matrix_to_ndarray(matrix) |
def load_pointings(self, filename=None):
"""Load some pointings"""
filename = ( filename is None and tkFileDialog.askopenfilename() or filename)
if filename is None:
return
f = storage.open_vos_or_local(filename)
lines = f.readlines()
f.close()
points = []
if lines[0][0:5] == "<?xml":
# ## assume astrores format
# ## with <DATA at start of 'data' segment
for i in range(len(lines)):
if lines[i][0:5] == '<DATA':
break
for j in range(i + 5, len(lines)):
if lines[j][0:2] == "]]":
break
vs = lines[j].split('|')
points.append(vs)
elif lines[0][0:5] == 'index':
# ## Palomar Format
# ## OK.. ID/NAME/RA /DEC format
v = lines[0].split()
if len(v) == 2 :
date = v[1]
self.date.set(v[1])
self.reset()
for line in lines:
if line[0] == '!' or line[0:5] == 'index':
# index is a header line for Palomar
continue
d = line.split()
if len(d) < 9:
sys.stderr.write("Don't understand pointing format\n%s\n" % line)
continue
ras = "%s:%s:%s" % (d[2], d[3], d[4])
decs = "%s:%s:%s" % (d[5], d[6], d[7])
points.append((d[1].strip(), ras, decs))
elif lines[0][0:5] == "#SSIM":
# ## Survey Simulator format
for line in lines[1:]:
d = line.split()
points.append((d[8], d[2], d[3]))
else:
# ## try name/ ra /dec / epoch
for line in lines:
d = line.split()
if len(d) == 5: # brave assumption time!
# self.pointing_format = 'Subaru' # unfortunately this doesn't seem to do anything, & breaks save
pointing_name = d[0].split('=')[0]
# oh grief these are sexagecimal with no separators. WHY
ra = d[1].split('=')[1]
dec = d[2].split('=')[1]
if len(ra.split('.')[0]) == 5: # LACK OF SEPARATORS ARGH
ra = '0' + ra
if len(dec.split('.')[0]) == 5:
dec = '0' + dec
ra = "{}:{}:{}".format(ra[0:2], ra[2:4], ra[4:])
dec = "{}:{}:{}".format(dec[0:2], dec[2:4], dec[4:])
points.append((pointing_name, ra, dec))
elif len(d) == 4:
f = d[1].count(":")
if ( f > 0 ):
points.append((d[0], d[1], d[2]))
else:
points.append(('', math.radians(float(d[1])), math.radians(float(d[2]))))
elif len(d) == 8:
line = "%s %s:%s:%s %s:%s:%s %s" % (d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7] )
d = line.split()
# this one seems unfinished...no append
else:
sys.stderr.write("Don't understand pointing format\n%s\n" % ( line))
continue
self.plot_points_list(points)
return | Load some pointings | Below is the the instruction that describes the task:
### Input:
Load some pointings
### Response:
def load_pointings(self, filename=None):
"""Load some pointings"""
filename = ( filename is None and tkFileDialog.askopenfilename() or filename)
if filename is None:
return
f = storage.open_vos_or_local(filename)
lines = f.readlines()
f.close()
points = []
if lines[0][0:5] == "<?xml":
# ## assume astrores format
# ## with <DATA at start of 'data' segment
for i in range(len(lines)):
if lines[i][0:5] == '<DATA':
break
for j in range(i + 5, len(lines)):
if lines[j][0:2] == "]]":
break
vs = lines[j].split('|')
points.append(vs)
elif lines[0][0:5] == 'index':
# ## Palomar Format
# ## OK.. ID/NAME/RA /DEC format
v = lines[0].split()
if len(v) == 2 :
date = v[1]
self.date.set(v[1])
self.reset()
for line in lines:
if line[0] == '!' or line[0:5] == 'index':
# index is a header line for Palomar
continue
d = line.split()
if len(d) < 9:
sys.stderr.write("Don't understand pointing format\n%s\n" % line)
continue
ras = "%s:%s:%s" % (d[2], d[3], d[4])
decs = "%s:%s:%s" % (d[5], d[6], d[7])
points.append((d[1].strip(), ras, decs))
elif lines[0][0:5] == "#SSIM":
# ## Survey Simulator format
for line in lines[1:]:
d = line.split()
points.append((d[8], d[2], d[3]))
else:
# ## try name/ ra /dec / epoch
for line in lines:
d = line.split()
if len(d) == 5: # brave assumption time!
# self.pointing_format = 'Subaru' # unfortunately this doesn't seem to do anything, & breaks save
pointing_name = d[0].split('=')[0]
# oh grief these are sexagecimal with no separators. WHY
ra = d[1].split('=')[1]
dec = d[2].split('=')[1]
if len(ra.split('.')[0]) == 5: # LACK OF SEPARATORS ARGH
ra = '0' + ra
if len(dec.split('.')[0]) == 5:
dec = '0' + dec
ra = "{}:{}:{}".format(ra[0:2], ra[2:4], ra[4:])
dec = "{}:{}:{}".format(dec[0:2], dec[2:4], dec[4:])
points.append((pointing_name, ra, dec))
elif len(d) == 4:
f = d[1].count(":")
if ( f > 0 ):
points.append((d[0], d[1], d[2]))
else:
points.append(('', math.radians(float(d[1])), math.radians(float(d[2]))))
elif len(d) == 8:
line = "%s %s:%s:%s %s:%s:%s %s" % (d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7] )
d = line.split()
# this one seems unfinished...no append
else:
sys.stderr.write("Don't understand pointing format\n%s\n" % ( line))
continue
self.plot_points_list(points)
return |
def temp_shell_task(cls, inp, ddb_node, mpi_procs=1,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
mpi_procs: Number of MPI processes to use.
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs)) | Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
mpi_procs: Number of MPI processes to use.
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments. | Below is the the instruction that describes the task:
### Input:
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
mpi_procs: Number of MPI processes to use.
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
### Response:
def temp_shell_task(cls, inp, ddb_node, mpi_procs=1,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
mpi_procs: Number of MPI processes to use.
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs)) |
def execute(self):
"""Deploys and executes the physical dataflow."""
self._collect_garbage() # Make sure everything is clean
# TODO (john): Check if dataflow has any 'logical inconsistencies'
# For example, if there is a forward partitioning strategy but
# the number of downstream instances is larger than the number of
# upstream instances, some of the downstream instances will not be
# used at all
# Each operator instance is implemented as a Ray actor
# Actors are deployed in topological order, as we traverse the
# logical dataflow from sources to sinks. At each step, data
# producers wait for acknowledge from consumers before starting
# generating data.
upstream_channels = {}
for node in nx.topological_sort(self.logical_topo):
operator = self.operators[node]
# Generate downstream data channels
downstream_channels = self._generate_channels(operator)
# Instantiate Ray actors
handles = self.__generate_actors(operator, upstream_channels,
downstream_channels)
if handles:
self.actor_handles.extend(handles)
upstream_channels.update(downstream_channels)
logger.debug("Running...")
return self.actor_handles | Deploys and executes the physical dataflow. | Below is the the instruction that describes the task:
### Input:
Deploys and executes the physical dataflow.
### Response:
def execute(self):
"""Deploys and executes the physical dataflow."""
self._collect_garbage() # Make sure everything is clean
# TODO (john): Check if dataflow has any 'logical inconsistencies'
# For example, if there is a forward partitioning strategy but
# the number of downstream instances is larger than the number of
# upstream instances, some of the downstream instances will not be
# used at all
# Each operator instance is implemented as a Ray actor
# Actors are deployed in topological order, as we traverse the
# logical dataflow from sources to sinks. At each step, data
# producers wait for acknowledge from consumers before starting
# generating data.
upstream_channels = {}
for node in nx.topological_sort(self.logical_topo):
operator = self.operators[node]
# Generate downstream data channels
downstream_channels = self._generate_channels(operator)
# Instantiate Ray actors
handles = self.__generate_actors(operator, upstream_channels,
downstream_channels)
if handles:
self.actor_handles.extend(handles)
upstream_channels.update(downstream_channels)
logger.debug("Running...")
return self.actor_handles |
def _examine_key(self, key_name, key_val, p, i, option_parsing):
""" Examine the current matching key
Extracts information, such as function to execute and command
options, from the current key (passed to function as 'key_name' and
'key_val').
"""
# if the element we reached has an executable registered, save it!
if 'exec' in key_val:
self.exe = key_val['exec']
# simple bool options, save value
if 'type' in key_val and key_val['type'] == 'bool':
self.exe_options[key_name] = True
# Elements wich takes arguments need special attention
if 'argument' in key_val:
# is there an argument (the next element)?
if len(self.inp_cmd) > i+1:
self.key = { 'argument': key_val['argument'] }
# there is - save it
if key_val['type'] == 'option':
# if argument is of type multiple, store result in a list
if 'multiple' in key_val and key_val['multiple'] == True:
if key_name not in self.exe_options:
self.exe_options[key_name] = []
self.exe_options[key_name].append(self.inp_cmd[i+1])
else:
self.exe_options[key_name] = self.inp_cmd[i+1]
else:
self.arg = self.inp_cmd[i+1]
# Validate the argument if possible
if 'validator' in key_val['argument']:
self.key_complete = key_val['argument']['validator'](self.inp_cmd[i+1])
else:
self.key_complete = True
# if there are sub parameters, add them
if 'children' in key_val:
self.children = key_val['children']
# If we reached a command without parameters (which
# should be the end of the command), unset the children
# dict.
elif key_val['type'] == 'command':
self.children = None
# if the command is finished (there is an element after the argument) and
# there is an exec_immediately-function, execute it now
if 'exec_immediately' in key_val and len(self.inp_cmd) > i+2:
key_val['exec_immediately'](self.inp_cmd[i+1], self.exe_options)
# clear exe_options as these were options for exec_immediately
self.exe_options = {}
i += 1
else:
# if there is no next element, let key_complete be true
# and set children to the option argument
self.children = { 'argument': key_val['argument'] }
# remove option from further tab completion as it has been filled in,
# unless it has the 'multiple' key set, which means it can be filled
# in multiple types and will return a list of all values
if option_parsing and p == key_name and key_name in self.children:
# if multiple, then pass
if 'multiple' in self.children[key_name] and self.children[key_name]['multiple'] == True:
pass
else:
del self.children[key_name]
# otherwise we are handling a command without arguments
else:
# Rest arguments?
if 'rest_argument' in key_val:
self._scoop_rest_arguments = True
self.arg = []
self.children = key_val.get('children')
if self.exe is not None:
option_parsing = True
return i, option_parsing | Examine the current matching key
Extracts information, such as function to execute and command
options, from the current key (passed to function as 'key_name' and
'key_val'). | Below is the the instruction that describes the task:
### Input:
Examine the current matching key
Extracts information, such as function to execute and command
options, from the current key (passed to function as 'key_name' and
'key_val').
### Response:
def _examine_key(self, key_name, key_val, p, i, option_parsing):
""" Examine the current matching key
Extracts information, such as function to execute and command
options, from the current key (passed to function as 'key_name' and
'key_val').
"""
# if the element we reached has an executable registered, save it!
if 'exec' in key_val:
self.exe = key_val['exec']
# simple bool options, save value
if 'type' in key_val and key_val['type'] == 'bool':
self.exe_options[key_name] = True
# Elements wich takes arguments need special attention
if 'argument' in key_val:
# is there an argument (the next element)?
if len(self.inp_cmd) > i+1:
self.key = { 'argument': key_val['argument'] }
# there is - save it
if key_val['type'] == 'option':
# if argument is of type multiple, store result in a list
if 'multiple' in key_val and key_val['multiple'] == True:
if key_name not in self.exe_options:
self.exe_options[key_name] = []
self.exe_options[key_name].append(self.inp_cmd[i+1])
else:
self.exe_options[key_name] = self.inp_cmd[i+1]
else:
self.arg = self.inp_cmd[i+1]
# Validate the argument if possible
if 'validator' in key_val['argument']:
self.key_complete = key_val['argument']['validator'](self.inp_cmd[i+1])
else:
self.key_complete = True
# if there are sub parameters, add them
if 'children' in key_val:
self.children = key_val['children']
# If we reached a command without parameters (which
# should be the end of the command), unset the children
# dict.
elif key_val['type'] == 'command':
self.children = None
# if the command is finished (there is an element after the argument) and
# there is an exec_immediately-function, execute it now
if 'exec_immediately' in key_val and len(self.inp_cmd) > i+2:
key_val['exec_immediately'](self.inp_cmd[i+1], self.exe_options)
# clear exe_options as these were options for exec_immediately
self.exe_options = {}
i += 1
else:
# if there is no next element, let key_complete be true
# and set children to the option argument
self.children = { 'argument': key_val['argument'] }
# remove option from further tab completion as it has been filled in,
# unless it has the 'multiple' key set, which means it can be filled
# in multiple types and will return a list of all values
if option_parsing and p == key_name and key_name in self.children:
# if multiple, then pass
if 'multiple' in self.children[key_name] and self.children[key_name]['multiple'] == True:
pass
else:
del self.children[key_name]
# otherwise we are handling a command without arguments
else:
# Rest arguments?
if 'rest_argument' in key_val:
self._scoop_rest_arguments = True
self.arg = []
self.children = key_val.get('children')
if self.exe is not None:
option_parsing = True
return i, option_parsing |
def mappedPolygon(self, polygon, path=None, percent=0.5):
"""
Maps the inputed polygon to the inputed path \
used when drawing items along the path. If no \
specific path is supplied, then this object's own \
path will be used. It will rotate and move the \
polygon according to the inputed percentage.
:param polygon <QPolygonF>
:param path <QPainterPath>
:param percent <float>
:return <QPolygonF> mapped_poly
"""
translatePerc = percent
anglePerc = percent
# we don't want to allow the angle percentage greater than 0.85
# or less than 0.05 or we won't get a good rotation angle
if 0.95 <= anglePerc:
anglePerc = 0.98
elif anglePerc <= 0.05:
anglePerc = 0.05
if not path:
path = self.path()
if not (path and path.length()):
return QPolygonF()
# transform the polygon to the path
point = path.pointAtPercent(translatePerc)
angle = path.angleAtPercent(anglePerc)
# rotate about the 0 axis
transform = QTransform().rotate(-angle)
polygon = transform.map(polygon)
# move to the translation point
transform = QTransform().translate(point.x(), point.y())
# create the rotated polygon
mapped_poly = transform.map(polygon)
self._polygons.append(mapped_poly)
return mapped_poly | Maps the inputed polygon to the inputed path \
used when drawing items along the path. If no \
specific path is supplied, then this object's own \
path will be used. It will rotate and move the \
polygon according to the inputed percentage.
:param polygon <QPolygonF>
:param path <QPainterPath>
:param percent <float>
:return <QPolygonF> mapped_poly | Below is the the instruction that describes the task:
### Input:
Maps the inputed polygon to the inputed path \
used when drawing items along the path. If no \
specific path is supplied, then this object's own \
path will be used. It will rotate and move the \
polygon according to the inputed percentage.
:param polygon <QPolygonF>
:param path <QPainterPath>
:param percent <float>
:return <QPolygonF> mapped_poly
### Response:
def mappedPolygon(self, polygon, path=None, percent=0.5):
"""
Maps the inputed polygon to the inputed path \
used when drawing items along the path. If no \
specific path is supplied, then this object's own \
path will be used. It will rotate and move the \
polygon according to the inputed percentage.
:param polygon <QPolygonF>
:param path <QPainterPath>
:param percent <float>
:return <QPolygonF> mapped_poly
"""
translatePerc = percent
anglePerc = percent
# we don't want to allow the angle percentage greater than 0.85
# or less than 0.05 or we won't get a good rotation angle
if 0.95 <= anglePerc:
anglePerc = 0.98
elif anglePerc <= 0.05:
anglePerc = 0.05
if not path:
path = self.path()
if not (path and path.length()):
return QPolygonF()
# transform the polygon to the path
point = path.pointAtPercent(translatePerc)
angle = path.angleAtPercent(anglePerc)
# rotate about the 0 axis
transform = QTransform().rotate(-angle)
polygon = transform.map(polygon)
# move to the translation point
transform = QTransform().translate(point.x(), point.y())
# create the rotated polygon
mapped_poly = transform.map(polygon)
self._polygons.append(mapped_poly)
return mapped_poly |
def create_import_marking(self, args, options):
"""
This function parses the `--marking_json` and `--marking_pfill` arguments
and creates a marking object if these parameters are specified.
Look into 'dingos/management/commands/dingos_generic_xml_import.py to see how
this is used to specify a Django command line argument 'dingos_generic_xml_import'
that can be called with Django's 'manage.py'
"""
marking_json = None
placeholder_dict = {}
if options.get('marking_json'):
# Open json
with open(options['marking_json'], 'r') as content_file:
marking_json = content_file.read()
del(options['marking_json'])
# Find all placeholders
placeholders = self.RE_SEARCH_PLACEHOLDERS.findall(marking_json)
# Create prefilled dictionary so that later when we use
# the json text as format string we do not encounter problems
# for undefined placeholders.
if placeholders:
for placeholder in placeholders:
placeholder_dict[placeholder] = 'EMPTY'
# Read in command-line specified placeholder values.
if options.get('placeholder_fillers'):
for (placeholder, value) in options['placeholder_fillers']:
placeholder_dict[placeholder] = value
del(options['placeholder_fillers'])
# Add standard values (this list may grow in future)
placeholder_dict['_username'] = getpass.getuser()
placeholder_dict['_command'] = os.path.basename(__file__)
placeholder_dict['_kargs'] = "%s" % options
placeholder_dict['_args'] = args
if marking_json:
# Massage the json text such that we can use it as format string
# to fill in the placeholders
# Escape possible '%'
marking_json = marking_json.replace('%','\%')
# Replace placeholder definitions with python string formatting
marking_json = self.RE_SEARCH_PLACEHOLDERS.sub("%(\\1)s", marking_json)
# Use string formatting to fill in placeholders
marking_json = marking_json % placeholder_dict
# Finally, parse json
marking_dict = dict2DingoObjDict(json.loads(marking_json))
# Create info object for marking
marking = DingoImporter.create_marking_iobject(metadata_dict=marking_dict)
return marking
return None | This function parses the `--marking_json` and `--marking_pfill` arguments
and creates a marking object if these parameters are specified.
Look into 'dingos/management/commands/dingos_generic_xml_import.py to see how
this is used to specify a Django command line argument 'dingos_generic_xml_import'
that can be called with Django's 'manage.py' | Below is the the instruction that describes the task:
### Input:
This function parses the `--marking_json` and `--marking_pfill` arguments
and creates a marking object if these parameters are specified.
Look into 'dingos/management/commands/dingos_generic_xml_import.py to see how
this is used to specify a Django command line argument 'dingos_generic_xml_import'
that can be called with Django's 'manage.py'
### Response:
def create_import_marking(self, args, options):
"""
This function parses the `--marking_json` and `--marking_pfill` arguments
and creates a marking object if these parameters are specified.
Look into 'dingos/management/commands/dingos_generic_xml_import.py to see how
this is used to specify a Django command line argument 'dingos_generic_xml_import'
that can be called with Django's 'manage.py'
"""
marking_json = None
placeholder_dict = {}
if options.get('marking_json'):
# Open json
with open(options['marking_json'], 'r') as content_file:
marking_json = content_file.read()
del(options['marking_json'])
# Find all placeholders
placeholders = self.RE_SEARCH_PLACEHOLDERS.findall(marking_json)
# Create prefilled dictionary so that later when we use
# the json text as format string we do not encounter problems
# for undefined placeholders.
if placeholders:
for placeholder in placeholders:
placeholder_dict[placeholder] = 'EMPTY'
# Read in command-line specified placeholder values.
if options.get('placeholder_fillers'):
for (placeholder, value) in options['placeholder_fillers']:
placeholder_dict[placeholder] = value
del(options['placeholder_fillers'])
# Add standard values (this list may grow in future)
placeholder_dict['_username'] = getpass.getuser()
placeholder_dict['_command'] = os.path.basename(__file__)
placeholder_dict['_kargs'] = "%s" % options
placeholder_dict['_args'] = args
if marking_json:
# Massage the json text such that we can use it as format string
# to fill in the placeholders
# Escape possible '%'
marking_json = marking_json.replace('%','\%')
# Replace placeholder definitions with python string formatting
marking_json = self.RE_SEARCH_PLACEHOLDERS.sub("%(\\1)s", marking_json)
# Use string formatting to fill in placeholders
marking_json = marking_json % placeholder_dict
# Finally, parse json
marking_dict = dict2DingoObjDict(json.loads(marking_json))
# Create info object for marking
marking = DingoImporter.create_marking_iobject(metadata_dict=marking_dict)
return marking
return None |
def add_task(self, func, *args, **kargs):
"""
Add a task to the queue.
"""
self.tasks.put((func, args, kargs)) | Add a task to the queue. | Below is the the instruction that describes the task:
### Input:
Add a task to the queue.
### Response:
def add_task(self, func, *args, **kargs):
"""
Add a task to the queue.
"""
self.tasks.put((func, args, kargs)) |
def trends_closest(self, lat, lon):
"""
Returns the closest regions for the supplied lat/lon.
"""
url = 'https://api.twitter.com/1.1/trends/closest.json'
params = {'lat': lat, 'long': lon}
try:
resp = self.get(url, params=params)
except requests.exceptions.HTTPError as e:
raise e
return resp.json() | Returns the closest regions for the supplied lat/lon. | Below is the the instruction that describes the task:
### Input:
Returns the closest regions for the supplied lat/lon.
### Response:
def trends_closest(self, lat, lon):
"""
Returns the closest regions for the supplied lat/lon.
"""
url = 'https://api.twitter.com/1.1/trends/closest.json'
params = {'lat': lat, 'long': lon}
try:
resp = self.get(url, params=params)
except requests.exceptions.HTTPError as e:
raise e
return resp.json() |
def current(self):
# type: () -> Hub
"""Returns the current instance of the hub."""
rv = _local.get(None)
if rv is None:
rv = Hub(GLOBAL_HUB)
_local.set(rv)
return rv | Returns the current instance of the hub. | Below is the the instruction that describes the task:
### Input:
Returns the current instance of the hub.
### Response:
def current(self):
# type: () -> Hub
"""Returns the current instance of the hub."""
rv = _local.get(None)
if rv is None:
rv = Hub(GLOBAL_HUB)
_local.set(rv)
return rv |
def _setup_storage_dir(self):
"""Setup the storage directory path value and ensure the path exists.
:rtype: str
:raises: tinman.exceptions.ConfigurationException
"""
dir_path = self._settings.get(config.DIRECTORY)
if dir_path is None:
dir_path = self._default_path
if not os.path.exists(dir_path):
self._make_path(dir_path)
else:
dir_path = path.abspath(dir_path)
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
raise exceptions.ConfigurationException(self.__class__.__name__,
config.DIRECTORY)
return dir_path.rstrip('/') | Setup the storage directory path value and ensure the path exists.
:rtype: str
:raises: tinman.exceptions.ConfigurationException | Below is the the instruction that describes the task:
### Input:
Setup the storage directory path value and ensure the path exists.
:rtype: str
:raises: tinman.exceptions.ConfigurationException
### Response:
def _setup_storage_dir(self):
"""Setup the storage directory path value and ensure the path exists.
:rtype: str
:raises: tinman.exceptions.ConfigurationException
"""
dir_path = self._settings.get(config.DIRECTORY)
if dir_path is None:
dir_path = self._default_path
if not os.path.exists(dir_path):
self._make_path(dir_path)
else:
dir_path = path.abspath(dir_path)
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
raise exceptions.ConfigurationException(self.__class__.__name__,
config.DIRECTORY)
return dir_path.rstrip('/') |
def _delete_msg(self, conn, queue_url, receipt_handle):
"""
Delete the message specified by ``receipt_handle`` in the queue
specified by ``queue_url``.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_url: queue URL to delete the message from
:type queue_url: str
:param receipt_handle: message receipt handle
:type receipt_handle: str
"""
resp = conn.delete_message(QueueUrl=queue_url,
ReceiptHandle=receipt_handle)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
logger.error('Error: message with receipt handle %s in queue %s '
'was not successfully deleted (HTTP %s)',
receipt_handle, queue_url,
resp['ResponseMetadata']['HTTPStatusCode'])
return
logger.info('Message with receipt handle %s deleted from queue %s',
receipt_handle, queue_url) | Delete the message specified by ``receipt_handle`` in the queue
specified by ``queue_url``.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_url: queue URL to delete the message from
:type queue_url: str
:param receipt_handle: message receipt handle
:type receipt_handle: str | Below is the the instruction that describes the task:
### Input:
Delete the message specified by ``receipt_handle`` in the queue
specified by ``queue_url``.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_url: queue URL to delete the message from
:type queue_url: str
:param receipt_handle: message receipt handle
:type receipt_handle: str
### Response:
def _delete_msg(self, conn, queue_url, receipt_handle):
"""
Delete the message specified by ``receipt_handle`` in the queue
specified by ``queue_url``.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_url: queue URL to delete the message from
:type queue_url: str
:param receipt_handle: message receipt handle
:type receipt_handle: str
"""
resp = conn.delete_message(QueueUrl=queue_url,
ReceiptHandle=receipt_handle)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
logger.error('Error: message with receipt handle %s in queue %s '
'was not successfully deleted (HTTP %s)',
receipt_handle, queue_url,
resp['ResponseMetadata']['HTTPStatusCode'])
return
logger.info('Message with receipt handle %s deleted from queue %s',
receipt_handle, queue_url) |
def add_changes_markup(dom, ins_nodes, del_nodes):
"""
Add <ins> and <del> tags to the dom to show changes.
"""
# add markup for inserted and deleted sections
for node in reversed(del_nodes):
# diff algorithm deletes nodes in reverse order, so un-reverse the
# order for this iteration
insert_or_append(node.orig_parent, node, node.orig_next_sibling)
wrap(node, 'del')
for node in ins_nodes:
wrap(node, 'ins')
# Perform post-processing and cleanup.
remove_nesting(dom, 'del')
remove_nesting(dom, 'ins')
sort_del_before_ins(dom)
merge_adjacent(dom, 'del')
merge_adjacent(dom, 'ins') | Add <ins> and <del> tags to the dom to show changes. | Below is the the instruction that describes the task:
### Input:
Add <ins> and <del> tags to the dom to show changes.
### Response:
def add_changes_markup(dom, ins_nodes, del_nodes):
"""
Add <ins> and <del> tags to the dom to show changes.
"""
# add markup for inserted and deleted sections
for node in reversed(del_nodes):
# diff algorithm deletes nodes in reverse order, so un-reverse the
# order for this iteration
insert_or_append(node.orig_parent, node, node.orig_next_sibling)
wrap(node, 'del')
for node in ins_nodes:
wrap(node, 'ins')
# Perform post-processing and cleanup.
remove_nesting(dom, 'del')
remove_nesting(dom, 'ins')
sort_del_before_ins(dom)
merge_adjacent(dom, 'del')
merge_adjacent(dom, 'ins') |
def set_int(bytearray_, byte_index, _int):
"""
Set value in bytearray to int
"""
# make sure were dealing with an int
_int = int(_int)
_bytes = struct.unpack('2B', struct.pack('>h', _int))
bytearray_[byte_index:byte_index + 2] = _bytes
return bytearray_ | Set value in bytearray to int | Below is the the instruction that describes the task:
### Input:
Set value in bytearray to int
### Response:
def set_int(bytearray_, byte_index, _int):
"""
Set value in bytearray to int
"""
# make sure were dealing with an int
_int = int(_int)
_bytes = struct.unpack('2B', struct.pack('>h', _int))
bytearray_[byte_index:byte_index + 2] = _bytes
return bytearray_ |
def check_stripe_api_host(app_configs=None, **kwargs):
"""
Check that STRIPE_API_HOST is not being used in production.
"""
from django.conf import settings
messages = []
if not settings.DEBUG and hasattr(settings, "STRIPE_API_HOST"):
messages.append(
checks.Warning(
"STRIPE_API_HOST should not be set in production! This is most likely unintended.",
hint="Remove STRIPE_API_HOST from your Django settings.",
id="djstripe.W002",
)
)
return messages | Check that STRIPE_API_HOST is not being used in production. | Below is the the instruction that describes the task:
### Input:
Check that STRIPE_API_HOST is not being used in production.
### Response:
def check_stripe_api_host(app_configs=None, **kwargs):
"""
Check that STRIPE_API_HOST is not being used in production.
"""
from django.conf import settings
messages = []
if not settings.DEBUG and hasattr(settings, "STRIPE_API_HOST"):
messages.append(
checks.Warning(
"STRIPE_API_HOST should not be set in production! This is most likely unintended.",
hint="Remove STRIPE_API_HOST from your Django settings.",
id="djstripe.W002",
)
)
return messages |
def authorization_code(self, client_id, client_secret, code,
redirect_uri, grant_type='authorization_code'):
"""Authorization code grant
This is the OAuth 2.0 grant that regular web apps utilize in order
to access an API. Use this endpoint to exchange an Authorization Code
for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code
use authorization_code
client_id (str): your application's client Id
client_secret (str): your application's client Secret
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
"""
return self.post(
'https://{}/oauth/token'.format(self.domain),
data={
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'grant_type': grant_type,
'redirect_uri': redirect_uri,
},
headers={'Content-Type': 'application/json'}
) | Authorization code grant
This is the OAuth 2.0 grant that regular web apps utilize in order
to access an API. Use this endpoint to exchange an Authorization Code
for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code
use authorization_code
client_id (str): your application's client Id
client_secret (str): your application's client Secret
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token | Below is the the instruction that describes the task:
### Input:
Authorization code grant
This is the OAuth 2.0 grant that regular web apps utilize in order
to access an API. Use this endpoint to exchange an Authorization Code
for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code
use authorization_code
client_id (str): your application's client Id
client_secret (str): your application's client Secret
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
### Response:
def authorization_code(self, client_id, client_secret, code,
redirect_uri, grant_type='authorization_code'):
"""Authorization code grant
This is the OAuth 2.0 grant that regular web apps utilize in order
to access an API. Use this endpoint to exchange an Authorization Code
for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code
use authorization_code
client_id (str): your application's client Id
client_secret (str): your application's client Secret
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
"""
return self.post(
'https://{}/oauth/token'.format(self.domain),
data={
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'grant_type': grant_type,
'redirect_uri': redirect_uri,
},
headers={'Content-Type': 'application/json'}
) |
def run(self,
ipyclient=None,
):
"""
Run a batch of dstat tests on a list of tests, where each test is
a dictionary mapping sample names to {p1 - p4} (and sometimes p5).
Parameters modifying the behavior of the run, such as the number
of bootstrap replicates (nboots) or the minimum coverage for
loci (mincov) can be set in {object}.params.
Parameters:
-----------
ipyclient (ipyparallel.Client object):
An ipyparallel client object to distribute jobs to a cluster.
"""
self.results_table, self.results_boots = batch(self, ipyclient)
## skip this for 5-part test results
if not isinstance(self.results_table, list):
self.results_table.nloci = np.nan_to_num(self.results_table.nloci)\
.astype(int) | Run a batch of dstat tests on a list of tests, where each test is
a dictionary mapping sample names to {p1 - p4} (and sometimes p5).
Parameters modifying the behavior of the run, such as the number
of bootstrap replicates (nboots) or the minimum coverage for
loci (mincov) can be set in {object}.params.
Parameters:
-----------
ipyclient (ipyparallel.Client object):
An ipyparallel client object to distribute jobs to a cluster. | Below is the the instruction that describes the task:
### Input:
Run a batch of dstat tests on a list of tests, where each test is
a dictionary mapping sample names to {p1 - p4} (and sometimes p5).
Parameters modifying the behavior of the run, such as the number
of bootstrap replicates (nboots) or the minimum coverage for
loci (mincov) can be set in {object}.params.
Parameters:
-----------
ipyclient (ipyparallel.Client object):
An ipyparallel client object to distribute jobs to a cluster.
### Response:
def run(self,
ipyclient=None,
):
"""
Run a batch of dstat tests on a list of tests, where each test is
a dictionary mapping sample names to {p1 - p4} (and sometimes p5).
Parameters modifying the behavior of the run, such as the number
of bootstrap replicates (nboots) or the minimum coverage for
loci (mincov) can be set in {object}.params.
Parameters:
-----------
ipyclient (ipyparallel.Client object):
An ipyparallel client object to distribute jobs to a cluster.
"""
self.results_table, self.results_boots = batch(self, ipyclient)
## skip this for 5-part test results
if not isinstance(self.results_table, list):
self.results_table.nloci = np.nan_to_num(self.results_table.nloci)\
.astype(int) |
def create_buffer(self, ignore_unsupported=False):
"""
Create this tree's TreeBuffer
"""
bufferdict = OrderedDict()
for branch in self.iterbranches():
# only include activated branches
if not self.GetBranchStatus(branch.GetName()):
continue
if not BaseTree.branch_is_supported(branch):
log.warning(
"ignore unsupported branch `{0}`".format(branch.GetName()))
continue
bufferdict[branch.GetName()] = Tree.branch_type(branch)
self.set_buffer(TreeBuffer(
bufferdict,
ignore_unsupported=ignore_unsupported)) | Create this tree's TreeBuffer | Below is the the instruction that describes the task:
### Input:
Create this tree's TreeBuffer
### Response:
def create_buffer(self, ignore_unsupported=False):
"""
Create this tree's TreeBuffer
"""
bufferdict = OrderedDict()
for branch in self.iterbranches():
# only include activated branches
if not self.GetBranchStatus(branch.GetName()):
continue
if not BaseTree.branch_is_supported(branch):
log.warning(
"ignore unsupported branch `{0}`".format(branch.GetName()))
continue
bufferdict[branch.GetName()] = Tree.branch_type(branch)
self.set_buffer(TreeBuffer(
bufferdict,
ignore_unsupported=ignore_unsupported)) |
def recarraydifference(X,Y):
"""
Records of a numpy recarray (or ndarray with structured dtype)
that do not appear in another.
Fast routine for determining which records in numpy array `X`
do not appear in numpy recarray `Y`.
Record array version of func:`tabular.fast.arraydifference`.
**Parameters**
**X** : numpy recarray
Numpy recarray to comapare to numpy recarray `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy recarray
Numpy recarray to which numpy recarray `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy recarray
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin`
"""
if len(Y) > 0:
Z = recarrayisin(X,Y)
return X[np.invert(Z)]
else:
return X | Records of a numpy recarray (or ndarray with structured dtype)
that do not appear in another.
Fast routine for determining which records in numpy array `X`
do not appear in numpy recarray `Y`.
Record array version of func:`tabular.fast.arraydifference`.
**Parameters**
**X** : numpy recarray
Numpy recarray to comapare to numpy recarray `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy recarray
Numpy recarray to which numpy recarray `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy recarray
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin` | Below is the the instruction that describes the task:
### Input:
Records of a numpy recarray (or ndarray with structured dtype)
that do not appear in another.
Fast routine for determining which records in numpy array `X`
do not appear in numpy recarray `Y`.
Record array version of func:`tabular.fast.arraydifference`.
**Parameters**
**X** : numpy recarray
Numpy recarray to comapare to numpy recarray `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy recarray
Numpy recarray to which numpy recarray `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy recarray
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin`
### Response:
def recarraydifference(X,Y):
"""
Records of a numpy recarray (or ndarray with structured dtype)
that do not appear in another.
Fast routine for determining which records in numpy array `X`
do not appear in numpy recarray `Y`.
Record array version of func:`tabular.fast.arraydifference`.
**Parameters**
**X** : numpy recarray
Numpy recarray to comapare to numpy recarray `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy recarray
Numpy recarray to which numpy recarray `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy recarray
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin`
"""
if len(Y) > 0:
Z = recarrayisin(X,Y)
return X[np.invert(Z)]
else:
return X |
def compute_full_connections(self, config, direct):
"""
Compute connections for a fully-connected feed-forward genome--each
input connected to all hidden nodes
(and output nodes if ``direct`` is set or there are no hidden nodes),
each hidden node connected to all output nodes.
(Recurrent genomes will also include node self-connections.)
"""
hidden = [i for i in iterkeys(self.nodes) if i not in config.output_keys]
output = [i for i in iterkeys(self.nodes) if i in config.output_keys]
connections = []
if hidden:
for input_id in config.input_keys:
for h in hidden:
connections.append((input_id, h))
for h in hidden:
for output_id in output:
connections.append((h, output_id))
if direct or (not hidden):
for input_id in config.input_keys:
for output_id in output:
connections.append((input_id, output_id))
# For recurrent genomes, include node self-connections.
if not config.feed_forward:
for i in iterkeys(self.nodes):
connections.append((i, i))
return connections | Compute connections for a fully-connected feed-forward genome--each
input connected to all hidden nodes
(and output nodes if ``direct`` is set or there are no hidden nodes),
each hidden node connected to all output nodes.
(Recurrent genomes will also include node self-connections.) | Below is the the instruction that describes the task:
### Input:
Compute connections for a fully-connected feed-forward genome--each
input connected to all hidden nodes
(and output nodes if ``direct`` is set or there are no hidden nodes),
each hidden node connected to all output nodes.
(Recurrent genomes will also include node self-connections.)
### Response:
def compute_full_connections(self, config, direct):
"""
Compute connections for a fully-connected feed-forward genome--each
input connected to all hidden nodes
(and output nodes if ``direct`` is set or there are no hidden nodes),
each hidden node connected to all output nodes.
(Recurrent genomes will also include node self-connections.)
"""
hidden = [i for i in iterkeys(self.nodes) if i not in config.output_keys]
output = [i for i in iterkeys(self.nodes) if i in config.output_keys]
connections = []
if hidden:
for input_id in config.input_keys:
for h in hidden:
connections.append((input_id, h))
for h in hidden:
for output_id in output:
connections.append((h, output_id))
if direct or (not hidden):
for input_id in config.input_keys:
for output_id in output:
connections.append((input_id, output_id))
# For recurrent genomes, include node self-connections.
if not config.feed_forward:
for i in iterkeys(self.nodes):
connections.append((i, i))
return connections |
def parse_options(options):
"""
:type options: list of str
:rtype: list of dict
"""
if options is None:
return []
else:
return [
convert_single_option(key.strip(), value.strip())
for key, value
in [option.split('=', 1) for option in options]
] | :type options: list of str
:rtype: list of dict | Below is the the instruction that describes the task:
### Input:
:type options: list of str
:rtype: list of dict
### Response:
def parse_options(options):
"""
:type options: list of str
:rtype: list of dict
"""
if options is None:
return []
else:
return [
convert_single_option(key.strip(), value.strip())
for key, value
in [option.split('=', 1) for option in options]
] |
def dump_environment(self):
""" Try to dump memory
Not currently implemented feature
:return: None
"""
# Dump the Alignak configuration to a temporary ini file
path = os.path.join(tempfile.gettempdir(),
'dump-env-%s-%s-%d.ini' % (self.type, self.name, int(time.time())))
try:
with open(path, "w") as out_file:
self.alignak_env.write(out_file)
except Exception as exp: # pylint: disable=broad-except
logger.error("Dumping daemon environment raised an error: %s. ", exp) | Try to dump memory
Not currently implemented feature
:return: None | Below is the the instruction that describes the task:
### Input:
Try to dump memory
Not currently implemented feature
:return: None
### Response:
def dump_environment(self):
""" Try to dump memory
Not currently implemented feature
:return: None
"""
# Dump the Alignak configuration to a temporary ini file
path = os.path.join(tempfile.gettempdir(),
'dump-env-%s-%s-%d.ini' % (self.type, self.name, int(time.time())))
try:
with open(path, "w") as out_file:
self.alignak_env.write(out_file)
except Exception as exp: # pylint: disable=broad-except
logger.error("Dumping daemon environment raised an error: %s. ", exp) |
def saliency_map(output, input, name="saliency_map"):
"""
Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in output w.r.t input.
Returns:
tf.Tensor: the saliency map. Has the same shape as input.
"""
max_outp = tf.reduce_max(output, 1)
saliency_op = tf.gradients(max_outp, input)[:][0]
return tf.identity(saliency_op, name=name) | Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in output w.r.t input.
Returns:
tf.Tensor: the saliency map. Has the same shape as input. | Below is the the instruction that describes the task:
### Input:
Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in output w.r.t input.
Returns:
tf.Tensor: the saliency map. Has the same shape as input.
### Response:
def saliency_map(output, input, name="saliency_map"):
"""
Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in output w.r.t input.
Returns:
tf.Tensor: the saliency map. Has the same shape as input.
"""
max_outp = tf.reduce_max(output, 1)
saliency_op = tf.gradients(max_outp, input)[:][0]
return tf.identity(saliency_op, name=name) |
def _set_intf_ipv6_router_isis(self, v, load=False):
"""
Setter method for intf_ipv6_router_isis, mapped from YANG variable /interface/ethernet/ipv6/intf_ipv6_router_isis (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_ipv6_router_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_ipv6_router_isis() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=intf_ipv6_router_isis.intf_ipv6_router_isis, is_container='container', presence=False, yang_name="intf-ipv6-router-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisInterfaceIpv6Router'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """intf_ipv6_router_isis must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=intf_ipv6_router_isis.intf_ipv6_router_isis, is_container='container', presence=False, yang_name="intf-ipv6-router-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisInterfaceIpv6Router'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__intf_ipv6_router_isis = t
if hasattr(self, '_set'):
self._set() | Setter method for intf_ipv6_router_isis, mapped from YANG variable /interface/ethernet/ipv6/intf_ipv6_router_isis (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_ipv6_router_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_ipv6_router_isis() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for intf_ipv6_router_isis, mapped from YANG variable /interface/ethernet/ipv6/intf_ipv6_router_isis (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_ipv6_router_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_ipv6_router_isis() directly.
### Response:
def _set_intf_ipv6_router_isis(self, v, load=False):
"""
Setter method for intf_ipv6_router_isis, mapped from YANG variable /interface/ethernet/ipv6/intf_ipv6_router_isis (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_ipv6_router_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_ipv6_router_isis() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=intf_ipv6_router_isis.intf_ipv6_router_isis, is_container='container', presence=False, yang_name="intf-ipv6-router-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisInterfaceIpv6Router'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """intf_ipv6_router_isis must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=intf_ipv6_router_isis.intf_ipv6_router_isis, is_container='container', presence=False, yang_name="intf-ipv6-router-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisInterfaceIpv6Router'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__intf_ipv6_router_isis = t
if hasattr(self, '_set'):
self._set() |
def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
"""
if value is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=utc)
else:
value = value.astimezone(utc)
return value | Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database. | Below is the the instruction that describes the task:
### Input:
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
### Response:
def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
"""
if value is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=utc)
else:
value = value.astimezone(utc)
return value |
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs | Returns a slice of the input tensor along multiple axes. | Below is the the instruction that describes the task:
### Input:
Returns a slice of the input tensor along multiple axes.
### Response:
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs |
def generate_heightmap(self, buffer=False, as_array=False):
"""Return a heightmap, representing the highest solid blocks in this chunk."""
non_solids = [0, 8, 9, 10, 11, 38, 37, 32, 31]
if buffer:
return BytesIO(pack(">i", 256)+self.generate_heightmap()) # Length + Heightmap, ready for insertion into Chunk NBT
else:
bytes = []
for z in range(16):
for x in range(16):
for y in range(127, -1, -1):
offset = y + z*128 + x*128*16
if (self.blocksList[offset] not in non_solids or y == 0):
bytes.append(y+1)
break
if (as_array):
return bytes
else:
return array.array('B', bytes).tostring() | Return a heightmap, representing the highest solid blocks in this chunk. | Below is the the instruction that describes the task:
### Input:
Return a heightmap, representing the highest solid blocks in this chunk.
### Response:
def generate_heightmap(self, buffer=False, as_array=False):
"""Return a heightmap, representing the highest solid blocks in this chunk."""
non_solids = [0, 8, 9, 10, 11, 38, 37, 32, 31]
if buffer:
return BytesIO(pack(">i", 256)+self.generate_heightmap()) # Length + Heightmap, ready for insertion into Chunk NBT
else:
bytes = []
for z in range(16):
for x in range(16):
for y in range(127, -1, -1):
offset = y + z*128 + x*128*16
if (self.blocksList[offset] not in non_solids or y == 0):
bytes.append(y+1)
break
if (as_array):
return bytes
else:
return array.array('B', bytes).tostring() |
def _get_opus_maximum(self):
"""Instantiate an opus maximum type."""
label = """The opux maximum of a given author
that is, the only preserved work by that
author or the most known one."""
opmax = self.session.get_resource(
BASE_URI_TYPES % "opmax",
self.session.get_class(surf.ns.ECRM['E55_Type'])
)
if opmax.is_present():
return opmax
else:
opmax.rdfs_label.append(Literal(label, "en"))
logger.debug("Created a new opus maximum type instance")
opmax.save()
return opmax | Instantiate an opus maximum type. | Below is the the instruction that describes the task:
### Input:
Instantiate an opus maximum type.
### Response:
def _get_opus_maximum(self):
"""Instantiate an opus maximum type."""
label = """The opux maximum of a given author
that is, the only preserved work by that
author or the most known one."""
opmax = self.session.get_resource(
BASE_URI_TYPES % "opmax",
self.session.get_class(surf.ns.ECRM['E55_Type'])
)
if opmax.is_present():
return opmax
else:
opmax.rdfs_label.append(Literal(label, "en"))
logger.debug("Created a new opus maximum type instance")
opmax.save()
return opmax |
def ecdh(self, identity, pubkey):
"""Get shared session key using Elliptic Curve Diffie-Hellman."""
curve_name = identity.get_curve_name(ecdh=True)
log.debug('"%s" shared session key (%s) for %r from %s',
identity.to_string(), curve_name, pubkey, self)
try:
result = self._defs.get_ecdh_session_key(
self.conn,
identity=self._identity_proto(identity),
peer_public_key=pubkey,
ecdsa_curve_name=curve_name)
log.debug('result: %s', result)
assert len(result.session_key) in {65, 33} # NIST256 or Curve25519
assert result.session_key[:1] == b'\x04'
return bytes(result.session_key)
except self._defs.TrezorFailure as e:
msg = '{} error: {}'.format(self, e)
log.debug(msg, exc_info=True)
raise interface.DeviceError(msg) | Get shared session key using Elliptic Curve Diffie-Hellman. | Below is the the instruction that describes the task:
### Input:
Get shared session key using Elliptic Curve Diffie-Hellman.
### Response:
def ecdh(self, identity, pubkey):
"""Get shared session key using Elliptic Curve Diffie-Hellman."""
curve_name = identity.get_curve_name(ecdh=True)
log.debug('"%s" shared session key (%s) for %r from %s',
identity.to_string(), curve_name, pubkey, self)
try:
result = self._defs.get_ecdh_session_key(
self.conn,
identity=self._identity_proto(identity),
peer_public_key=pubkey,
ecdsa_curve_name=curve_name)
log.debug('result: %s', result)
assert len(result.session_key) in {65, 33} # NIST256 or Curve25519
assert result.session_key[:1] == b'\x04'
return bytes(result.session_key)
except self._defs.TrezorFailure as e:
msg = '{} error: {}'.format(self, e)
log.debug(msg, exc_info=True)
raise interface.DeviceError(msg) |
def strel_pair(x, y):
"""Create a structing element composed of the origin and another pixel
x, y - x and y offsets of the other pixel
returns a structuring element
"""
x_center = int(np.abs(x))
y_center = int(np.abs(y))
result = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool)
result[y_center, x_center] = True
result[y_center + int(y), x_center + int(x)] = True
return result | Create a structing element composed of the origin and another pixel
x, y - x and y offsets of the other pixel
returns a structuring element | Below is the the instruction that describes the task:
### Input:
Create a structing element composed of the origin and another pixel
x, y - x and y offsets of the other pixel
returns a structuring element
### Response:
def strel_pair(x, y):
"""Create a structing element composed of the origin and another pixel
x, y - x and y offsets of the other pixel
returns a structuring element
"""
x_center = int(np.abs(x))
y_center = int(np.abs(y))
result = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool)
result[y_center, x_center] = True
result[y_center + int(y), x_center + int(x)] = True
return result |
def _parse_expression(s):
"""Parse boolean expression containing and/or operators"""
# Converters for opeartor clauses
operators = {
'and': And,
'or': Or,
None: lambda *args: args[0]
}
# Pairing of end group symbols with start group symbols
group_pairs = {
')': '(',
']': '['
}
scanner = re.compile(r'''
(\s+) | # space
(\(|\[) | # group_start
(\)|\]) | # group_end
((?:or|and)\b) | # operator
([^\s\(\)\[\]]+) | # variable
(\Z) | # end
(.) # error
''', re.DOTALL | re.VERBOSE | re.UNICODE | re.IGNORECASE)
# Parsed using two states and a stack of open clauses
# At state 0 (not expect_operator): Expect variable, or parenthesis group
# start.
# At state 1 (expect_operator): Expect operator, parenthesis group end, or
# end.
expect_operator = False
clause_stack = []
current_clause = []
clause_operator = None
clause_symbol = None
def close():
prev_op, prev_symbol, prev_clause = clause_stack.pop()
prev_clause.append(operators[clause_operator](*current_clause))
return prev_op, prev_symbol, prev_clause
for match in re.finditer(scanner, s):
(space, group_start, group_end, operator, variable, end,
error) = match.groups()
if error is not None:
raise ParseError('Invalid token in expression string: {}'.format(
repr(match.group(0))), span=(match.start(), match.end()))
elif space is not None:
continue
elif expect_operator and operator is not None:
operator = operator.lower()
if operator == 'and' and clause_operator != 'and':
prev_term = current_clause.pop()
clause_stack.append(
(clause_operator, clause_symbol, current_clause))
current_clause = [prev_term]
elif operator == 'or' and clause_operator == 'and':
clause_operator, clause_symbol, current_clause = close()
clause_operator = operator
expect_operator = False
elif expect_operator and group_end is not None:
if clause_operator == 'and':
clause_operator, clause_symbol, current_clause = close()
if len(clause_stack) == 0:
raise ParseError(
'Unbalanced parenthesis group in expression',
span=(match.start(), match.end()))
if group_pairs[group_end] != clause_symbol:
raise ParseError(
'Group started with {} ended with {}'.format(
clause_symbol, group_end),
span=(match.start(), match.end()))
clause_operator, clause_symbol, current_clause = close()
elif expect_operator and end is not None:
if clause_operator == 'and':
clause_operator, clause_symbol, current_clause = close()
elif not expect_operator and variable is not None:
current_clause.append(Variable(variable))
expect_operator = True
elif not expect_operator and group_start is not None:
clause_stack.append(
(clause_operator, clause_symbol, current_clause))
current_clause = []
clause_operator = None
clause_symbol = group_start
else:
raise ParseError(
'Invalid token in expression string: {!r}'.format(
match.group(0)),
span=(match.start(), match.end()))
if len(clause_stack) > 0:
raise ParseError('Unbalanced parenthesis group in expression')
expr = operators[clause_operator](*current_clause)
return expr | Parse boolean expression containing and/or operators | Below is the the instruction that describes the task:
### Input:
Parse boolean expression containing and/or operators
### Response:
def _parse_expression(s):
"""Parse boolean expression containing and/or operators"""
# Converters for opeartor clauses
operators = {
'and': And,
'or': Or,
None: lambda *args: args[0]
}
# Pairing of end group symbols with start group symbols
group_pairs = {
')': '(',
']': '['
}
scanner = re.compile(r'''
(\s+) | # space
(\(|\[) | # group_start
(\)|\]) | # group_end
((?:or|and)\b) | # operator
([^\s\(\)\[\]]+) | # variable
(\Z) | # end
(.) # error
''', re.DOTALL | re.VERBOSE | re.UNICODE | re.IGNORECASE)
# Parsed using two states and a stack of open clauses
# At state 0 (not expect_operator): Expect variable, or parenthesis group
# start.
# At state 1 (expect_operator): Expect operator, parenthesis group end, or
# end.
expect_operator = False
clause_stack = []
current_clause = []
clause_operator = None
clause_symbol = None
def close():
prev_op, prev_symbol, prev_clause = clause_stack.pop()
prev_clause.append(operators[clause_operator](*current_clause))
return prev_op, prev_symbol, prev_clause
for match in re.finditer(scanner, s):
(space, group_start, group_end, operator, variable, end,
error) = match.groups()
if error is not None:
raise ParseError('Invalid token in expression string: {}'.format(
repr(match.group(0))), span=(match.start(), match.end()))
elif space is not None:
continue
elif expect_operator and operator is not None:
operator = operator.lower()
if operator == 'and' and clause_operator != 'and':
prev_term = current_clause.pop()
clause_stack.append(
(clause_operator, clause_symbol, current_clause))
current_clause = [prev_term]
elif operator == 'or' and clause_operator == 'and':
clause_operator, clause_symbol, current_clause = close()
clause_operator = operator
expect_operator = False
elif expect_operator and group_end is not None:
if clause_operator == 'and':
clause_operator, clause_symbol, current_clause = close()
if len(clause_stack) == 0:
raise ParseError(
'Unbalanced parenthesis group in expression',
span=(match.start(), match.end()))
if group_pairs[group_end] != clause_symbol:
raise ParseError(
'Group started with {} ended with {}'.format(
clause_symbol, group_end),
span=(match.start(), match.end()))
clause_operator, clause_symbol, current_clause = close()
elif expect_operator and end is not None:
if clause_operator == 'and':
clause_operator, clause_symbol, current_clause = close()
elif not expect_operator and variable is not None:
current_clause.append(Variable(variable))
expect_operator = True
elif not expect_operator and group_start is not None:
clause_stack.append(
(clause_operator, clause_symbol, current_clause))
current_clause = []
clause_operator = None
clause_symbol = group_start
else:
raise ParseError(
'Invalid token in expression string: {!r}'.format(
match.group(0)),
span=(match.start(), match.end()))
if len(clause_stack) > 0:
raise ParseError('Unbalanced parenthesis group in expression')
expr = operators[clause_operator](*current_clause)
return expr |
def decode(cls, line):
"""
Remove backslash escaping from line.value, then split on commas.
"""
if line.encoded:
line.value = stringToTextValues(line.value,
listSeparator=cls.listSeparator)
line.encoded=False | Remove backslash escaping from line.value, then split on commas. | Below is the the instruction that describes the task:
### Input:
Remove backslash escaping from line.value, then split on commas.
### Response:
def decode(cls, line):
"""
Remove backslash escaping from line.value, then split on commas.
"""
if line.encoded:
line.value = stringToTextValues(line.value,
listSeparator=cls.listSeparator)
line.encoded=False |
def writePatternLine(self, step_milliseconds, color, pos, led_number=0):
"""Write a color & step time color pattern line to RAM
:param step_milliseconds: how long for this pattern line to take
:param color: LED color
:param pos: color pattern line number (0-15)
:param led_number: LED to adjust, 0=all, 1=LEDA, 2=LEDB
"""
if ( self.dev == None ): return ''
self.setLedN(led_number)
red, green, blue = self.color_to_rgb(color)
r, g, b = self.cc(red, green, blue)
step_time = int(step_milliseconds / 10)
th = (step_time & 0xff00) >> 8
tl = step_time & 0x00ff
buf = [REPORT_ID, ord('P'), int(r), int(g), int(b), th,tl, pos, 0]
return self.write(buf); | Write a color & step time color pattern line to RAM
:param step_milliseconds: how long for this pattern line to take
:param color: LED color
:param pos: color pattern line number (0-15)
:param led_number: LED to adjust, 0=all, 1=LEDA, 2=LEDB | Below is the the instruction that describes the task:
### Input:
Write a color & step time color pattern line to RAM
:param step_milliseconds: how long for this pattern line to take
:param color: LED color
:param pos: color pattern line number (0-15)
:param led_number: LED to adjust, 0=all, 1=LEDA, 2=LEDB
### Response:
def writePatternLine(self, step_milliseconds, color, pos, led_number=0):
"""Write a color & step time color pattern line to RAM
:param step_milliseconds: how long for this pattern line to take
:param color: LED color
:param pos: color pattern line number (0-15)
:param led_number: LED to adjust, 0=all, 1=LEDA, 2=LEDB
"""
if ( self.dev == None ): return ''
self.setLedN(led_number)
red, green, blue = self.color_to_rgb(color)
r, g, b = self.cc(red, green, blue)
step_time = int(step_milliseconds / 10)
th = (step_time & 0xff00) >> 8
tl = step_time & 0x00ff
buf = [REPORT_ID, ord('P'), int(r), int(g), int(b), th,tl, pos, 0]
return self.write(buf); |
def using(self, keyspace=None, connection=None):
"""
Change the context on-the-fly of the Model class (keyspace, connection)
"""
if connection and self._batch:
raise CQLEngineException("Cannot specify a connection on model in batch mode.")
clone = copy.deepcopy(self)
if keyspace:
from cassandra.cqlengine.models import _clone_model_class
clone.model = _clone_model_class(self.model, {'__keyspace__': keyspace})
if connection:
clone._connection = connection
return clone | Change the context on-the-fly of the Model class (keyspace, connection) | Below is the the instruction that describes the task:
### Input:
Change the context on-the-fly of the Model class (keyspace, connection)
### Response:
def using(self, keyspace=None, connection=None):
"""
Change the context on-the-fly of the Model class (keyspace, connection)
"""
if connection and self._batch:
raise CQLEngineException("Cannot specify a connection on model in batch mode.")
clone = copy.deepcopy(self)
if keyspace:
from cassandra.cqlengine.models import _clone_model_class
clone.model = _clone_model_class(self.model, {'__keyspace__': keyspace})
if connection:
clone._connection = connection
return clone |
def fft_coefficient(x, param):
"""
Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0,
\\ldots , n-1.
The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"),
the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag",
"abs", "angle"]
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
assert min([config["coeff"] for config in param]) >= 0, "Coefficients must be positive or zero."
assert set([config["attr"] for config in param]) <= set(["imag", "real", "abs", "angle"]), \
'Attribute must be "real", "imag", "angle" or "abs"'
fft = np.fft.rfft(x)
def complex_agg(x, agg):
if agg == "real":
return x.real
elif agg == "imag":
return x.imag
elif agg == "abs":
return np.abs(x)
elif agg == "angle":
return np.angle(x, deg=True)
res = [complex_agg(fft[config["coeff"]], config["attr"]) if config["coeff"] < len(fft)
else np.NaN for config in param]
index = ['coeff_{}__attr_"{}"'.format(config["coeff"], config["attr"]) for config in param]
return zip(index, res) | Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0,
\\ldots , n-1.
The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"),
the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag",
"abs", "angle"]
:type param: list
:return: the different feature values
:return type: pandas.Series | Below is the the instruction that describes the task:
### Input:
Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0,
\\ldots , n-1.
The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"),
the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag",
"abs", "angle"]
:type param: list
:return: the different feature values
:return type: pandas.Series
### Response:
def fft_coefficient(x, param):
"""
Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0,
\\ldots , n-1.
The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"),
the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag",
"abs", "angle"]
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
assert min([config["coeff"] for config in param]) >= 0, "Coefficients must be positive or zero."
assert set([config["attr"] for config in param]) <= set(["imag", "real", "abs", "angle"]), \
'Attribute must be "real", "imag", "angle" or "abs"'
fft = np.fft.rfft(x)
def complex_agg(x, agg):
if agg == "real":
return x.real
elif agg == "imag":
return x.imag
elif agg == "abs":
return np.abs(x)
elif agg == "angle":
return np.angle(x, deg=True)
res = [complex_agg(fft[config["coeff"]], config["attr"]) if config["coeff"] < len(fft)
else np.NaN for config in param]
index = ['coeff_{}__attr_"{}"'.format(config["coeff"], config["attr"]) for config in param]
return zip(index, res) |
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
section = u"== {} ==".format(section_title)
try:
index = self.content.index(section) + len(section)
except ValueError:
return None
try:
next_index = self.content.index("==", index)
except ValueError:
next_index = len(self.content)
return self.content[index:next_index].lstrip("=").strip() | Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty. | Below is the the instruction that describes the task:
### Input:
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
### Response:
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
section = u"== {} ==".format(section_title)
try:
index = self.content.index(section) + len(section)
except ValueError:
return None
try:
next_index = self.content.index("==", index)
except ValueError:
next_index = len(self.content)
return self.content[index:next_index].lstrip("=").strip() |
def render_template(self):
"""Render and save API doc in openapi.yml."""
self._parse_paths()
context = dict(napp=self._napp.__dict__, paths=self._paths)
self._save(context) | Render and save API doc in openapi.yml. | Below is the the instruction that describes the task:
### Input:
Render and save API doc in openapi.yml.
### Response:
def render_template(self):
"""Render and save API doc in openapi.yml."""
self._parse_paths()
context = dict(napp=self._napp.__dict__, paths=self._paths)
self._save(context) |
def worktree_prune(cwd,
dry_run=False,
verbose=True,
expire=None,
opts='',
git_opts='',
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
'''
.. versionadded:: 2015.8.0
Interface to `git-worktree(1)`_, prunes stale worktree administrative data
from the gitdir
cwd
The path to the main git checkout or a linked worktree
dry_run : False
If ``True``, then this function will report what would have been
pruned, but no changes will be made.
verbose : True
Report all changes made. Set to ``False`` to suppress this output.
expire
Only prune unused worktree data older than a specific period of time.
The date format for this parameter is described in the documentation
for the ``gc.pruneWorktreesExpire`` config param in the
`git-config(1)`_ manpage.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` to avoid causing errors
with Salt's own argument parsing.
All CLI options for pruning worktrees as of Git 2.5.0 are already
supported by this function as of Salt 2015.8.0, so using this
argument is unnecessary unless new CLI arguments are added to
`git-worktree(1)`_ and are not yet supported in Salt.
git_opts
Any additional options to add to git command itself (not the
``worktree`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
CLI Examples:
.. code-block:: bash
salt myminion git.worktree_prune /path/to/repo
salt myminion git.worktree_prune /path/to/repo dry_run=True
salt myminion git.worktree_prune /path/to/repo expire=1.day.ago
'''
_check_worktree_support()
cwd = _expand_path(cwd, user)
command = ['git'] + _format_git_opts(git_opts)
command.extend(['worktree', 'prune'])
if dry_run:
command.append('--dry-run')
if verbose:
command.append('--verbose')
if expire:
command.extend(['--expire', expire])
command.extend(_format_opts(opts))
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'] | .. versionadded:: 2015.8.0
Interface to `git-worktree(1)`_, prunes stale worktree administrative data
from the gitdir
cwd
The path to the main git checkout or a linked worktree
dry_run : False
If ``True``, then this function will report what would have been
pruned, but no changes will be made.
verbose : True
Report all changes made. Set to ``False`` to suppress this output.
expire
Only prune unused worktree data older than a specific period of time.
The date format for this parameter is described in the documentation
for the ``gc.pruneWorktreesExpire`` config param in the
`git-config(1)`_ manpage.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` to avoid causing errors
with Salt's own argument parsing.
All CLI options for pruning worktrees as of Git 2.5.0 are already
supported by this function as of Salt 2015.8.0, so using this
argument is unnecessary unless new CLI arguments are added to
`git-worktree(1)`_ and are not yet supported in Salt.
git_opts
Any additional options to add to git command itself (not the
``worktree`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
CLI Examples:
.. code-block:: bash
salt myminion git.worktree_prune /path/to/repo
salt myminion git.worktree_prune /path/to/repo dry_run=True
salt myminion git.worktree_prune /path/to/repo expire=1.day.ago | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
Interface to `git-worktree(1)`_, prunes stale worktree administrative data
from the gitdir
cwd
The path to the main git checkout or a linked worktree
dry_run : False
If ``True``, then this function will report what would have been
pruned, but no changes will be made.
verbose : True
Report all changes made. Set to ``False`` to suppress this output.
expire
Only prune unused worktree data older than a specific period of time.
The date format for this parameter is described in the documentation
for the ``gc.pruneWorktreesExpire`` config param in the
`git-config(1)`_ manpage.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` to avoid causing errors
with Salt's own argument parsing.
All CLI options for pruning worktrees as of Git 2.5.0 are already
supported by this function as of Salt 2015.8.0, so using this
argument is unnecessary unless new CLI arguments are added to
`git-worktree(1)`_ and are not yet supported in Salt.
git_opts
Any additional options to add to git command itself (not the
``worktree`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
CLI Examples:
.. code-block:: bash
salt myminion git.worktree_prune /path/to/repo
salt myminion git.worktree_prune /path/to/repo dry_run=True
salt myminion git.worktree_prune /path/to/repo expire=1.day.ago
### Response:
def worktree_prune(cwd,
dry_run=False,
verbose=True,
expire=None,
opts='',
git_opts='',
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
'''
.. versionadded:: 2015.8.0
Interface to `git-worktree(1)`_, prunes stale worktree administrative data
from the gitdir
cwd
The path to the main git checkout or a linked worktree
dry_run : False
If ``True``, then this function will report what would have been
pruned, but no changes will be made.
verbose : True
Report all changes made. Set to ``False`` to suppress this output.
expire
Only prune unused worktree data older than a specific period of time.
The date format for this parameter is described in the documentation
for the ``gc.pruneWorktreesExpire`` config param in the
`git-config(1)`_ manpage.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` to avoid causing errors
with Salt's own argument parsing.
All CLI options for pruning worktrees as of Git 2.5.0 are already
supported by this function as of Salt 2015.8.0, so using this
argument is unnecessary unless new CLI arguments are added to
`git-worktree(1)`_ and are not yet supported in Salt.
git_opts
Any additional options to add to git command itself (not the
``worktree`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
CLI Examples:
.. code-block:: bash
salt myminion git.worktree_prune /path/to/repo
salt myminion git.worktree_prune /path/to/repo dry_run=True
salt myminion git.worktree_prune /path/to/repo expire=1.day.ago
'''
_check_worktree_support()
cwd = _expand_path(cwd, user)
command = ['git'] + _format_git_opts(git_opts)
command.extend(['worktree', 'prune'])
if dry_run:
command.append('--dry-run')
if verbose:
command.append('--verbose')
if expire:
command.extend(['--expire', expire])
command.extend(_format_opts(opts))
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'] |
def qos(self, prefetch_size, prefetch_count, apply_global=False):
"""Request specific Quality of Service."""
self.channel.basic_qos(prefetch_size, prefetch_count,
apply_global) | Request specific Quality of Service. | Below is the the instruction that describes the task:
### Input:
Request specific Quality of Service.
### Response:
def qos(self, prefetch_size, prefetch_count, apply_global=False):
"""Request specific Quality of Service."""
self.channel.basic_qos(prefetch_size, prefetch_count,
apply_global) |
def first(self, limit=1, columns=None):
"""
Execute the query and get the first results
:param limit: The number of results to get
:type limit: int
:param columns: The columns to get
:type columns: list
:return: The result
:rtype: mixed
"""
if not columns:
columns = ['*']
results = self.take(limit).get(columns)
if len(results) > 0:
return results[0]
return | Execute the query and get the first results
:param limit: The number of results to get
:type limit: int
:param columns: The columns to get
:type columns: list
:return: The result
:rtype: mixed | Below is the the instruction that describes the task:
### Input:
Execute the query and get the first results
:param limit: The number of results to get
:type limit: int
:param columns: The columns to get
:type columns: list
:return: The result
:rtype: mixed
### Response:
def first(self, limit=1, columns=None):
"""
Execute the query and get the first results
:param limit: The number of results to get
:type limit: int
:param columns: The columns to get
:type columns: list
:return: The result
:rtype: mixed
"""
if not columns:
columns = ['*']
results = self.take(limit).get(columns)
if len(results) > 0:
return results[0]
return |
def show_zoning_enabled_configuration_input_request_type_get_next_request_last_rcvd_zone_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_zoning_enabled_configuration = ET.Element("show_zoning_enabled_configuration")
config = show_zoning_enabled_configuration
input = ET.SubElement(show_zoning_enabled_configuration, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_rcvd_zone_name = ET.SubElement(get_next_request, "last-rcvd-zone-name")
last_rcvd_zone_name.text = kwargs.pop('last_rcvd_zone_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_zoning_enabled_configuration_input_request_type_get_next_request_last_rcvd_zone_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_zoning_enabled_configuration = ET.Element("show_zoning_enabled_configuration")
config = show_zoning_enabled_configuration
input = ET.SubElement(show_zoning_enabled_configuration, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_rcvd_zone_name = ET.SubElement(get_next_request, "last-rcvd-zone-name")
last_rcvd_zone_name.text = kwargs.pop('last_rcvd_zone_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _validate_codeblock_size(self, cparams):
"""
Code block dimensions must satisfy certain restrictions.
They must both be a power of 2 and the total area defined by the width
and height cannot be either too great or too small for the codec.
"""
if cparams.cblockw_init != 0 and cparams.cblockh_init != 0:
# These fields ARE zero if uninitialized.
width = cparams.cblockw_init
height = cparams.cblockh_init
if height * width > 4096 or height < 4 or width < 4:
msg = ("The code block area is specified as "
"{height} x {width} = {area} square pixels. "
"Code block area cannot exceed 4096 square pixels. "
"Code block height and width dimensions must be larger "
"than 4 pixels.")
msg = msg.format(height=height, width=width,
area=height * width)
raise IOError(msg)
if ((math.log(height, 2) != math.floor(math.log(height, 2)) or
math.log(width, 2) != math.floor(math.log(width, 2)))):
msg = ("Bad code block size ({height} x {width}). "
"The dimensions must be powers of 2.")
msg = msg.format(height=height, width=width)
raise IOError(msg) | Code block dimensions must satisfy certain restrictions.
They must both be a power of 2 and the total area defined by the width
and height cannot be either too great or too small for the codec. | Below is the the instruction that describes the task:
### Input:
Code block dimensions must satisfy certain restrictions.
They must both be a power of 2 and the total area defined by the width
and height cannot be either too great or too small for the codec.
### Response:
def _validate_codeblock_size(self, cparams):
"""
Code block dimensions must satisfy certain restrictions.
They must both be a power of 2 and the total area defined by the width
and height cannot be either too great or too small for the codec.
"""
if cparams.cblockw_init != 0 and cparams.cblockh_init != 0:
# These fields ARE zero if uninitialized.
width = cparams.cblockw_init
height = cparams.cblockh_init
if height * width > 4096 or height < 4 or width < 4:
msg = ("The code block area is specified as "
"{height} x {width} = {area} square pixels. "
"Code block area cannot exceed 4096 square pixels. "
"Code block height and width dimensions must be larger "
"than 4 pixels.")
msg = msg.format(height=height, width=width,
area=height * width)
raise IOError(msg)
if ((math.log(height, 2) != math.floor(math.log(height, 2)) or
math.log(width, 2) != math.floor(math.log(width, 2)))):
msg = ("Bad code block size ({height} x {width}). "
"The dimensions must be powers of 2.")
msg = msg.format(height=height, width=width)
raise IOError(msg) |
def _point_in_tectonic_region(self, polygon):
'''
Returns the region type and area according to the tectonic
region
:param polygon: Dictionary containing the following attributes -
'long_lims' - Longitude limits (West, East)
'lat_lims' - Latitude limits (South, North)
'region_type' - Tectonic region type (str)
'area' - Area of cell in m ^ 2
'''
marker = np.zeros(self.strain.get_number_observations(), dtype=bool)
idlong = np.logical_and(
self.strain.data['longitude'] >= polygon['long_lims'][0],
self.strain.data['longitude'] < polygon['long_lims'][1])
id0 = np.where(np.logical_and(idlong, np.logical_and(
self.strain.data['latitude'] >= polygon['lat_lims'][0],
self.strain.data['latitude'] < polygon['lat_lims'][1])))[0]
if len(id0) > 0:
marker[id0] = True
for iloc in id0:
self.strain.data['region'][iloc] = \
polygon['region_type']
self.strain.data['area'][iloc] = polygon['area']
marker = np.logical_not(marker)
return marker | Returns the region type and area according to the tectonic
region
:param polygon: Dictionary containing the following attributes -
'long_lims' - Longitude limits (West, East)
'lat_lims' - Latitude limits (South, North)
'region_type' - Tectonic region type (str)
'area' - Area of cell in m ^ 2 | Below is the the instruction that describes the task:
### Input:
Returns the region type and area according to the tectonic
region
:param polygon: Dictionary containing the following attributes -
'long_lims' - Longitude limits (West, East)
'lat_lims' - Latitude limits (South, North)
'region_type' - Tectonic region type (str)
'area' - Area of cell in m ^ 2
### Response:
def _point_in_tectonic_region(self, polygon):
'''
Returns the region type and area according to the tectonic
region
:param polygon: Dictionary containing the following attributes -
'long_lims' - Longitude limits (West, East)
'lat_lims' - Latitude limits (South, North)
'region_type' - Tectonic region type (str)
'area' - Area of cell in m ^ 2
'''
marker = np.zeros(self.strain.get_number_observations(), dtype=bool)
idlong = np.logical_and(
self.strain.data['longitude'] >= polygon['long_lims'][0],
self.strain.data['longitude'] < polygon['long_lims'][1])
id0 = np.where(np.logical_and(idlong, np.logical_and(
self.strain.data['latitude'] >= polygon['lat_lims'][0],
self.strain.data['latitude'] < polygon['lat_lims'][1])))[0]
if len(id0) > 0:
marker[id0] = True
for iloc in id0:
self.strain.data['region'][iloc] = \
polygon['region_type']
self.strain.data['area'][iloc] = polygon['area']
marker = np.logical_not(marker)
return marker |
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim
in zip(nonzeros, self.dims)) | Equivalent numpy's nonzero but returns a tuple of Varibles. | Below is the the instruction that describes the task:
### Input:
Equivalent numpy's nonzero but returns a tuple of Varibles.
### Response:
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim
in zip(nonzeros, self.dims)) |
def from_doi(doi):
"""
Get the arXiv eprint id for a given DOI.
.. note::
Uses arXiv API. Will not return anything if arXiv is not aware of the
associated DOI.
:param doi: The DOI of the resource to look for.
:returns: The arXiv eprint id, or ``None`` if not found.
>>> from_doi('10.1209/0295-5075/111/40005')
# Note: Test do not pass due to an arXiv API bug.
'1506.06690'
"""
try:
request = requests.get("http://export.arxiv.org/api/query",
params={
"search_query": "doi:%s" % (doi,),
"max_results": 1
})
request.raise_for_status()
except RequestException:
return None
root = xml.etree.ElementTree.fromstring(request.content)
for entry in root.iter("{http://www.w3.org/2005/Atom}entry"):
arxiv_id = entry.find("{http://www.w3.org/2005/Atom}id").text
# arxiv_id is an arXiv full URL. We only want the id which is the last
# URL component.
return arxiv_id.split("/")[-1]
return None | Get the arXiv eprint id for a given DOI.
.. note::
Uses arXiv API. Will not return anything if arXiv is not aware of the
associated DOI.
:param doi: The DOI of the resource to look for.
:returns: The arXiv eprint id, or ``None`` if not found.
>>> from_doi('10.1209/0295-5075/111/40005')
# Note: Test do not pass due to an arXiv API bug.
'1506.06690' | Below is the the instruction that describes the task:
### Input:
Get the arXiv eprint id for a given DOI.
.. note::
Uses arXiv API. Will not return anything if arXiv is not aware of the
associated DOI.
:param doi: The DOI of the resource to look for.
:returns: The arXiv eprint id, or ``None`` if not found.
>>> from_doi('10.1209/0295-5075/111/40005')
# Note: Test do not pass due to an arXiv API bug.
'1506.06690'
### Response:
def from_doi(doi):
"""
Get the arXiv eprint id for a given DOI.
.. note::
Uses arXiv API. Will not return anything if arXiv is not aware of the
associated DOI.
:param doi: The DOI of the resource to look for.
:returns: The arXiv eprint id, or ``None`` if not found.
>>> from_doi('10.1209/0295-5075/111/40005')
# Note: Test do not pass due to an arXiv API bug.
'1506.06690'
"""
try:
request = requests.get("http://export.arxiv.org/api/query",
params={
"search_query": "doi:%s" % (doi,),
"max_results": 1
})
request.raise_for_status()
except RequestException:
return None
root = xml.etree.ElementTree.fromstring(request.content)
for entry in root.iter("{http://www.w3.org/2005/Atom}entry"):
arxiv_id = entry.find("{http://www.w3.org/2005/Atom}id").text
# arxiv_id is an arXiv full URL. We only want the id which is the last
# URL component.
return arxiv_id.split("/")[-1]
return None |
def write_to_file(chats, chatfile):
"""called every time chats are modified"""
with open(chatfile, 'w') as handler:
handler.write('\n'.join((str(id_) for id_ in chats))) | called every time chats are modified | Below is the the instruction that describes the task:
### Input:
called every time chats are modified
### Response:
def write_to_file(chats, chatfile):
"""called every time chats are modified"""
with open(chatfile, 'w') as handler:
handler.write('\n'.join((str(id_) for id_ in chats))) |
def simulate(batch_env, algo, log=True, reset=False):
"""Simulation step of a vectorized algorithm with in-graph environments.
Integrates the operations implemented by the algorithm and the environments
into a combined operation.
Args:
batch_env: In-graph batch environment.
algo: Algorithm instance implementing required operations.
log: Tensor indicating whether to compute and return summaries.
reset: Tensor causing all environments to reset.
Returns:
Tuple of tensors containing done flags for the current episodes, possibly
intermediate scores for the episodes, and a summary tensor.
"""
def _define_begin_episode(agent_indices):
"""Reset environments, intermediate scores and durations for new episodes.
Args:
agent_indices: Tensor containing batch indices starting an episode.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
zero_scores = tf.zeros_like(agent_indices, tf.float32)
zero_durations = tf.zeros_like(agent_indices)
reset_ops = [
batch_env.reset(agent_indices),
tf.scatter_update(score, agent_indices, zero_scores),
tf.scatter_update(length, agent_indices, zero_durations)]
with tf.control_dependencies(reset_ops):
return algo.begin_episode(agent_indices)
def _define_step():
"""Request actions from the algorithm and apply them to the environments.
Increments the lengths of all episodes and increases their scores by the
current reward. After stepping the environments, provides the full
transition tuple to the algorithm.
Returns:
Summary tensor.
"""
prevob = batch_env.observ + 0 # Ensure a copy of the variable value.
agent_indices = tf.range(len(batch_env))
action, step_summary = algo.perform(agent_indices, prevob)
action.set_shape(batch_env.action.shape)
with tf.control_dependencies([batch_env.simulate(action)]):
add_score = score.assign_add(batch_env.reward)
inc_length = length.assign_add(tf.ones(len(batch_env), tf.int32))
with tf.control_dependencies([add_score, inc_length]):
agent_indices = tf.range(len(batch_env))
experience_summary = algo.experience(
agent_indices, prevob, batch_env.action, batch_env.reward,
batch_env.done, batch_env.observ)
return tf.summary.merge([step_summary, experience_summary])
def _define_end_episode(agent_indices):
"""Notify the algorithm of ending episodes.
Also updates the mean score and length counters used for summaries.
Args:
agent_indices: Tensor holding batch indices that end their episodes.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
submit_score = mean_score.submit(tf.gather(score, agent_indices))
submit_length = mean_length.submit(
tf.cast(tf.gather(length, agent_indices), tf.float32))
with tf.control_dependencies([submit_score, submit_length]):
return algo.end_episode(agent_indices)
def _define_summaries():
"""Reset the average score and duration, and return them as summary.
Returns:
Summary string.
"""
score_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_score.count, tf.bool)),
lambda: tf.summary.scalar('mean_score', mean_score.clear()), str)
length_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_length.count, tf.bool)),
lambda: tf.summary.scalar('mean_length', mean_length.clear()), str)
return tf.summary.merge([score_summary, length_summary])
with tf.name_scope('simulate'):
log = tf.convert_to_tensor(log)
reset = tf.convert_to_tensor(reset)
with tf.variable_scope('simulate_temporary'):
score = tf.Variable(
lambda: tf.zeros(len(batch_env), dtype=tf.float32),
trainable=False, name='score')
length = tf.Variable(
lambda: tf.zeros(len(batch_env), dtype=tf.int32),
trainable=False, name='length')
mean_score = streaming_mean.StreamingMean((), tf.float32)
mean_length = streaming_mean.StreamingMean((), tf.float32)
agent_indices = tf.cond(
reset,
lambda: tf.range(len(batch_env)),
lambda: tf.cast(tf.where(batch_env.done)[:, 0], tf.int32))
begin_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_begin_episode(agent_indices), str)
with tf.control_dependencies([begin_episode]):
step = _define_step()
with tf.control_dependencies([step]):
agent_indices = tf.cast(tf.where(batch_env.done)[:, 0], tf.int32)
end_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_end_episode(agent_indices), str)
with tf.control_dependencies([end_episode]):
summary = tf.summary.merge([
_define_summaries(), begin_episode, step, end_episode])
with tf.control_dependencies([summary]):
done, score = tf.identity(batch_env.done), tf.identity(score)
return done, score, summary | Simulation step of a vectorized algorithm with in-graph environments.
Integrates the operations implemented by the algorithm and the environments
into a combined operation.
Args:
batch_env: In-graph batch environment.
algo: Algorithm instance implementing required operations.
log: Tensor indicating whether to compute and return summaries.
reset: Tensor causing all environments to reset.
Returns:
Tuple of tensors containing done flags for the current episodes, possibly
intermediate scores for the episodes, and a summary tensor. | Below is the the instruction that describes the task:
### Input:
Simulation step of a vectorized algorithm with in-graph environments.
Integrates the operations implemented by the algorithm and the environments
into a combined operation.
Args:
batch_env: In-graph batch environment.
algo: Algorithm instance implementing required operations.
log: Tensor indicating whether to compute and return summaries.
reset: Tensor causing all environments to reset.
Returns:
Tuple of tensors containing done flags for the current episodes, possibly
intermediate scores for the episodes, and a summary tensor.
### Response:
def simulate(batch_env, algo, log=True, reset=False):
"""Simulation step of a vectorized algorithm with in-graph environments.
Integrates the operations implemented by the algorithm and the environments
into a combined operation.
Args:
batch_env: In-graph batch environment.
algo: Algorithm instance implementing required operations.
log: Tensor indicating whether to compute and return summaries.
reset: Tensor causing all environments to reset.
Returns:
Tuple of tensors containing done flags for the current episodes, possibly
intermediate scores for the episodes, and a summary tensor.
"""
def _define_begin_episode(agent_indices):
"""Reset environments, intermediate scores and durations for new episodes.
Args:
agent_indices: Tensor containing batch indices starting an episode.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
zero_scores = tf.zeros_like(agent_indices, tf.float32)
zero_durations = tf.zeros_like(agent_indices)
reset_ops = [
batch_env.reset(agent_indices),
tf.scatter_update(score, agent_indices, zero_scores),
tf.scatter_update(length, agent_indices, zero_durations)]
with tf.control_dependencies(reset_ops):
return algo.begin_episode(agent_indices)
def _define_step():
"""Request actions from the algorithm and apply them to the environments.
Increments the lengths of all episodes and increases their scores by the
current reward. After stepping the environments, provides the full
transition tuple to the algorithm.
Returns:
Summary tensor.
"""
prevob = batch_env.observ + 0 # Ensure a copy of the variable value.
agent_indices = tf.range(len(batch_env))
action, step_summary = algo.perform(agent_indices, prevob)
action.set_shape(batch_env.action.shape)
with tf.control_dependencies([batch_env.simulate(action)]):
add_score = score.assign_add(batch_env.reward)
inc_length = length.assign_add(tf.ones(len(batch_env), tf.int32))
with tf.control_dependencies([add_score, inc_length]):
agent_indices = tf.range(len(batch_env))
experience_summary = algo.experience(
agent_indices, prevob, batch_env.action, batch_env.reward,
batch_env.done, batch_env.observ)
return tf.summary.merge([step_summary, experience_summary])
def _define_end_episode(agent_indices):
"""Notify the algorithm of ending episodes.
Also updates the mean score and length counters used for summaries.
Args:
agent_indices: Tensor holding batch indices that end their episodes.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
submit_score = mean_score.submit(tf.gather(score, agent_indices))
submit_length = mean_length.submit(
tf.cast(tf.gather(length, agent_indices), tf.float32))
with tf.control_dependencies([submit_score, submit_length]):
return algo.end_episode(agent_indices)
def _define_summaries():
"""Reset the average score and duration, and return them as summary.
Returns:
Summary string.
"""
score_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_score.count, tf.bool)),
lambda: tf.summary.scalar('mean_score', mean_score.clear()), str)
length_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_length.count, tf.bool)),
lambda: tf.summary.scalar('mean_length', mean_length.clear()), str)
return tf.summary.merge([score_summary, length_summary])
with tf.name_scope('simulate'):
log = tf.convert_to_tensor(log)
reset = tf.convert_to_tensor(reset)
with tf.variable_scope('simulate_temporary'):
score = tf.Variable(
lambda: tf.zeros(len(batch_env), dtype=tf.float32),
trainable=False, name='score')
length = tf.Variable(
lambda: tf.zeros(len(batch_env), dtype=tf.int32),
trainable=False, name='length')
mean_score = streaming_mean.StreamingMean((), tf.float32)
mean_length = streaming_mean.StreamingMean((), tf.float32)
agent_indices = tf.cond(
reset,
lambda: tf.range(len(batch_env)),
lambda: tf.cast(tf.where(batch_env.done)[:, 0], tf.int32))
begin_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_begin_episode(agent_indices), str)
with tf.control_dependencies([begin_episode]):
step = _define_step()
with tf.control_dependencies([step]):
agent_indices = tf.cast(tf.where(batch_env.done)[:, 0], tf.int32)
end_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_end_episode(agent_indices), str)
with tf.control_dependencies([end_episode]):
summary = tf.summary.merge([
_define_summaries(), begin_episode, step, end_episode])
with tf.control_dependencies([summary]):
done, score = tf.identity(batch_env.done), tf.identity(score)
return done, score, summary |
def on_ujson_dumps(self, ujson, config, dictionary, **kwargs):
""" The `ujson <https://pypi.org/project/ujson/>`_ dumps method.
:param module ujson: The ``ujson`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param int indent: The amount of spaces to use for indentation,
defaults to 0, optional
:returns: The json serialization of the given ``dictionary``
:rtype: str
"""
if not kwargs.get("indent", None):
kwargs["indent"] = 0
return ujson.dumps(dictionary, **kwargs) | The `ujson <https://pypi.org/project/ujson/>`_ dumps method.
:param module ujson: The ``ujson`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param int indent: The amount of spaces to use for indentation,
defaults to 0, optional
:returns: The json serialization of the given ``dictionary``
:rtype: str | Below is the the instruction that describes the task:
### Input:
The `ujson <https://pypi.org/project/ujson/>`_ dumps method.
:param module ujson: The ``ujson`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param int indent: The amount of spaces to use for indentation,
defaults to 0, optional
:returns: The json serialization of the given ``dictionary``
:rtype: str
### Response:
def on_ujson_dumps(self, ujson, config, dictionary, **kwargs):
""" The `ujson <https://pypi.org/project/ujson/>`_ dumps method.
:param module ujson: The ``ujson`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param int indent: The amount of spaces to use for indentation,
defaults to 0, optional
:returns: The json serialization of the given ``dictionary``
:rtype: str
"""
if not kwargs.get("indent", None):
kwargs["indent"] = 0
return ujson.dumps(dictionary, **kwargs) |
def unique(seq):
"""Helper function to include only unique monomials in a basis."""
seen = {}
result = []
for item in seq:
marker = item
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result | Helper function to include only unique monomials in a basis. | Below is the the instruction that describes the task:
### Input:
Helper function to include only unique monomials in a basis.
### Response:
def unique(seq):
"""Helper function to include only unique monomials in a basis."""
seen = {}
result = []
for item in seq:
marker = item
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result |
def fit_richness(self, atol=1.e-3, maxiter=50):
"""
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola
"""
# Check whether the signal probability for all objects are zero
# This can occur for finite kernels on the edge of the survey footprint
if np.isnan(self.u).any():
logger.warning("NaN signal probability found")
return 0., 0., None
if not np.any(self.u):
logger.warning("Signal probability is zero for all objects")
return 0., 0., None
if self.f == 0:
logger.warning("Observable fraction is zero")
return 0., 0., None
# Richness corresponding to 0, 1, and 10 observable stars
richness = np.array([0., 1./self.f, 10./self.f])
loglike = np.array([self.value(richness=r) for r in richness])
found_maximum = False
iteration = 0
while not found_maximum:
parabola = ugali.utils.parabola.Parabola(richness, 2.*loglike)
if parabola.vertex_x < 0.:
found_maximum = True
else:
richness = np.append(richness, parabola.vertex_x)
loglike = np.append(loglike, self.value(richness=richness[-1]))
if np.fabs(loglike[-1] - np.max(loglike[0: -1])) < atol:
found_maximum = True
iteration+=1
if iteration > maxiter:
logger.warning("Maximum number of iterations reached")
break
index = np.argmax(loglike)
return loglike[index], richness[index], parabola | Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola | Below is the the instruction that describes the task:
### Input:
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola
### Response:
def fit_richness(self, atol=1.e-3, maxiter=50):
"""
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola
"""
# Check whether the signal probability for all objects are zero
# This can occur for finite kernels on the edge of the survey footprint
if np.isnan(self.u).any():
logger.warning("NaN signal probability found")
return 0., 0., None
if not np.any(self.u):
logger.warning("Signal probability is zero for all objects")
return 0., 0., None
if self.f == 0:
logger.warning("Observable fraction is zero")
return 0., 0., None
# Richness corresponding to 0, 1, and 10 observable stars
richness = np.array([0., 1./self.f, 10./self.f])
loglike = np.array([self.value(richness=r) for r in richness])
found_maximum = False
iteration = 0
while not found_maximum:
parabola = ugali.utils.parabola.Parabola(richness, 2.*loglike)
if parabola.vertex_x < 0.:
found_maximum = True
else:
richness = np.append(richness, parabola.vertex_x)
loglike = np.append(loglike, self.value(richness=richness[-1]))
if np.fabs(loglike[-1] - np.max(loglike[0: -1])) < atol:
found_maximum = True
iteration+=1
if iteration > maxiter:
logger.warning("Maximum number of iterations reached")
break
index = np.argmax(loglike)
return loglike[index], richness[index], parabola |
def receive_bytes(self, data):
"""Process bytes received from the network.
Arguments:
data (bytes): any length bytes received from a network connection
to a kafka broker.
Returns:
responses (list of (correlation_id, response)): any/all completed
responses, decoded from bytes to python objects.
Raises:
KafkaProtocolError: if the bytes received could not be decoded.
CorrelationIdError: if the response does not match the request
correlation id.
"""
i = 0
n = len(data)
responses = []
while i < n:
# Not receiving is the state of reading the payload header
if not self._receiving:
bytes_to_read = min(4 - self._header.tell(), n - i)
self._header.write(data[i:i+bytes_to_read])
i += bytes_to_read
if self._header.tell() == 4:
self._header.seek(0)
nbytes = Int32.decode(self._header)
# reset buffer and switch state to receiving payload bytes
self._rbuffer = KafkaBytes(nbytes)
self._receiving = True
elif self._header.tell() > 4:
raise Errors.KafkaError('this should not happen - are you threading?')
if self._receiving:
total_bytes = len(self._rbuffer)
staged_bytes = self._rbuffer.tell()
bytes_to_read = min(total_bytes - staged_bytes, n - i)
self._rbuffer.write(data[i:i+bytes_to_read])
i += bytes_to_read
staged_bytes = self._rbuffer.tell()
if staged_bytes > total_bytes:
raise Errors.KafkaError('Receive buffer has more bytes than expected?')
if staged_bytes != total_bytes:
break
self._receiving = False
self._rbuffer.seek(0)
resp = self._process_response(self._rbuffer)
responses.append(resp)
self._reset_buffer()
return responses | Process bytes received from the network.
Arguments:
data (bytes): any length bytes received from a network connection
to a kafka broker.
Returns:
responses (list of (correlation_id, response)): any/all completed
responses, decoded from bytes to python objects.
Raises:
KafkaProtocolError: if the bytes received could not be decoded.
CorrelationIdError: if the response does not match the request
correlation id. | Below is the the instruction that describes the task:
### Input:
Process bytes received from the network.
Arguments:
data (bytes): any length bytes received from a network connection
to a kafka broker.
Returns:
responses (list of (correlation_id, response)): any/all completed
responses, decoded from bytes to python objects.
Raises:
KafkaProtocolError: if the bytes received could not be decoded.
CorrelationIdError: if the response does not match the request
correlation id.
### Response:
def receive_bytes(self, data):
"""Process bytes received from the network.
Arguments:
data (bytes): any length bytes received from a network connection
to a kafka broker.
Returns:
responses (list of (correlation_id, response)): any/all completed
responses, decoded from bytes to python objects.
Raises:
KafkaProtocolError: if the bytes received could not be decoded.
CorrelationIdError: if the response does not match the request
correlation id.
"""
i = 0
n = len(data)
responses = []
while i < n:
# Not receiving is the state of reading the payload header
if not self._receiving:
bytes_to_read = min(4 - self._header.tell(), n - i)
self._header.write(data[i:i+bytes_to_read])
i += bytes_to_read
if self._header.tell() == 4:
self._header.seek(0)
nbytes = Int32.decode(self._header)
# reset buffer and switch state to receiving payload bytes
self._rbuffer = KafkaBytes(nbytes)
self._receiving = True
elif self._header.tell() > 4:
raise Errors.KafkaError('this should not happen - are you threading?')
if self._receiving:
total_bytes = len(self._rbuffer)
staged_bytes = self._rbuffer.tell()
bytes_to_read = min(total_bytes - staged_bytes, n - i)
self._rbuffer.write(data[i:i+bytes_to_read])
i += bytes_to_read
staged_bytes = self._rbuffer.tell()
if staged_bytes > total_bytes:
raise Errors.KafkaError('Receive buffer has more bytes than expected?')
if staged_bytes != total_bytes:
break
self._receiving = False
self._rbuffer.seek(0)
resp = self._process_response(self._rbuffer)
responses.append(resp)
self._reset_buffer()
return responses |
def setup_sfr_pars(self, par_cols=None, include_temporal_pars=False):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(self.m, par_cols=par_cols,
include_temporal_pars=include_temporal_pars) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = pd.concat(par_dfs["sfr"])
self.frun_pre_lines.append(
"pyemu.gw_utils.apply_sfr_parameters(seg_pars={0}, reach_pars={1})".format(seg_pars, reach_pars))
else:
warnings.warn("No sfr parameters have been set up!", PyemuWarning) | setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1) | Below is the the instruction that describes the task:
### Input:
setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)
### Response:
def setup_sfr_pars(self, par_cols=None, include_temporal_pars=False):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(self.m, par_cols=par_cols,
include_temporal_pars=include_temporal_pars) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = pd.concat(par_dfs["sfr"])
self.frun_pre_lines.append(
"pyemu.gw_utils.apply_sfr_parameters(seg_pars={0}, reach_pars={1})".format(seg_pars, reach_pars))
else:
warnings.warn("No sfr parameters have been set up!", PyemuWarning) |
def _pkcs1imify(self, data):
"""
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre.
"""
SHA1_DIGESTINFO = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
size = len(util.deflate_long(self.n, 0))
filler = '\xff' * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
return '\x00\x01' + filler + '\x00' + SHA1_DIGESTINFO + data | turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre. | Below is the the instruction that describes the task:
### Input:
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre.
### Response:
def _pkcs1imify(self, data):
"""
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre.
"""
SHA1_DIGESTINFO = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
size = len(util.deflate_long(self.n, 0))
filler = '\xff' * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
return '\x00\x01' + filler + '\x00' + SHA1_DIGESTINFO + data |
def __geo_point(lat, lon, elev):
"""
GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict:
"""
logger_noaa_lpd.info("enter geo_point")
coordinates = []
geo_dict = OrderedDict()
geometry_dict = OrderedDict()
for index, point in enumerate(lat):
coordinates.append(lat[index])
coordinates.append(lon[index])
if elev:
coordinates = coordinates + elev
geometry_dict['type'] = 'Point'
geometry_dict['coordinates'] = coordinates
geo_dict['type'] = 'Feature'
geo_dict['geometry'] = geometry_dict
return geo_dict | GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict: | Below is the the instruction that describes the task:
### Input:
GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict:
### Response:
def __geo_point(lat, lon, elev):
"""
GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict:
"""
logger_noaa_lpd.info("enter geo_point")
coordinates = []
geo_dict = OrderedDict()
geometry_dict = OrderedDict()
for index, point in enumerate(lat):
coordinates.append(lat[index])
coordinates.append(lon[index])
if elev:
coordinates = coordinates + elev
geometry_dict['type'] = 'Point'
geometry_dict['coordinates'] = coordinates
geo_dict['type'] = 'Feature'
geo_dict['geometry'] = geometry_dict
return geo_dict |
def add_callback(self, callback):
"""
Add a callback function to the listener.
The callback function will be called for each indication this listener
receives from any WBEM server.
If the callback function is already known to the listener, it will not
be added.
Parameters:
callback (:func:`~pywbem.callback_interface`):
Callable that is being called for each CIM indication that is
received while the listener threads are running.
"""
if callback not in self._callbacks:
self._callbacks.append(callback) | Add a callback function to the listener.
The callback function will be called for each indication this listener
receives from any WBEM server.
If the callback function is already known to the listener, it will not
be added.
Parameters:
callback (:func:`~pywbem.callback_interface`):
Callable that is being called for each CIM indication that is
received while the listener threads are running. | Below is the the instruction that describes the task:
### Input:
Add a callback function to the listener.
The callback function will be called for each indication this listener
receives from any WBEM server.
If the callback function is already known to the listener, it will not
be added.
Parameters:
callback (:func:`~pywbem.callback_interface`):
Callable that is being called for each CIM indication that is
received while the listener threads are running.
### Response:
def add_callback(self, callback):
"""
Add a callback function to the listener.
The callback function will be called for each indication this listener
receives from any WBEM server.
If the callback function is already known to the listener, it will not
be added.
Parameters:
callback (:func:`~pywbem.callback_interface`):
Callable that is being called for each CIM indication that is
received while the listener threads are running.
"""
if callback not in self._callbacks:
self._callbacks.append(callback) |
def with_params(self, params):
"""Create a new request with added query parameters
Parameters
----------
params: Mapping
the query parameters to add
"""
return self.replace(params=_merge_maps(self.params, params)) | Create a new request with added query parameters
Parameters
----------
params: Mapping
the query parameters to add | Below is the the instruction that describes the task:
### Input:
Create a new request with added query parameters
Parameters
----------
params: Mapping
the query parameters to add
### Response:
def with_params(self, params):
"""Create a new request with added query parameters
Parameters
----------
params: Mapping
the query parameters to add
"""
return self.replace(params=_merge_maps(self.params, params)) |
def leaves(self):
""" Returns a :class:`QuerySet` of all leaf nodes (nodes with no
children).
:return: A :class:`QuerySet` of all leaf nodes (nodes with no
children).
"""
# We need to read the _cte_node_children attribute, so ensure it exists.
self._ensure_parameters()
return self.exclude(
**{"%s__id__in" % self.model._cte_node_children: self.all()}
) | Returns a :class:`QuerySet` of all leaf nodes (nodes with no
children).
:return: A :class:`QuerySet` of all leaf nodes (nodes with no
children). | Below is the the instruction that describes the task:
### Input:
Returns a :class:`QuerySet` of all leaf nodes (nodes with no
children).
:return: A :class:`QuerySet` of all leaf nodes (nodes with no
children).
### Response:
def leaves(self):
""" Returns a :class:`QuerySet` of all leaf nodes (nodes with no
children).
:return: A :class:`QuerySet` of all leaf nodes (nodes with no
children).
"""
# We need to read the _cte_node_children attribute, so ensure it exists.
self._ensure_parameters()
return self.exclude(
**{"%s__id__in" % self.model._cte_node_children: self.all()}
) |
def _computeSky(image, skypars, memmap=False):
"""
Compute the sky value for the data array passed to the function
image is a fits object which contains the data and the header
for one image extension
skypars is passed in as paramDict
"""
#this object contains the returned values from the image stats routine
_tmp = imagestats.ImageStats(image.data,
fields = skypars['skystat'],
lower = skypars['skylower'],
upper = skypars['skyupper'],
nclip = skypars['skyclip'],
lsig = skypars['skylsigma'],
usig = skypars['skyusigma'],
binwidth = skypars['skywidth']
)
_skyValue = _extractSkyValue(_tmp,skypars['skystat'].lower())
log.info(" Computed sky value/pixel for %s: %s "%
(image.rootname, _skyValue))
del _tmp
return _skyValue | Compute the sky value for the data array passed to the function
image is a fits object which contains the data and the header
for one image extension
skypars is passed in as paramDict | Below is the the instruction that describes the task:
### Input:
Compute the sky value for the data array passed to the function
image is a fits object which contains the data and the header
for one image extension
skypars is passed in as paramDict
### Response:
def _computeSky(image, skypars, memmap=False):
"""
Compute the sky value for the data array passed to the function
image is a fits object which contains the data and the header
for one image extension
skypars is passed in as paramDict
"""
#this object contains the returned values from the image stats routine
_tmp = imagestats.ImageStats(image.data,
fields = skypars['skystat'],
lower = skypars['skylower'],
upper = skypars['skyupper'],
nclip = skypars['skyclip'],
lsig = skypars['skylsigma'],
usig = skypars['skyusigma'],
binwidth = skypars['skywidth']
)
_skyValue = _extractSkyValue(_tmp,skypars['skystat'].lower())
log.info(" Computed sky value/pixel for %s: %s "%
(image.rootname, _skyValue))
del _tmp
return _skyValue |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.