code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def initialize_media_descriptor(self) -> None:
"""
Returns the media descriptor for the first media descriptor where
the file can be found.
"""
for md in self.media_descriptors:
media_path = self.get_media_path(md)
if media_path.is_file():
self.media_descriptor = md
return
raise FileNotFoundError(
"""Cannot find media file corresponding to {}.
Tried looking for the following files: {}.
""".format(self.eaf_path, [self.get_media_path(md)
for md in self.media_descriptors])) | Returns the media descriptor for the first media descriptor where
the file can be found. | Below is the the instruction that describes the task:
### Input:
Returns the media descriptor for the first media descriptor where
the file can be found.
### Response:
def initialize_media_descriptor(self) -> None:
"""
Returns the media descriptor for the first media descriptor where
the file can be found.
"""
for md in self.media_descriptors:
media_path = self.get_media_path(md)
if media_path.is_file():
self.media_descriptor = md
return
raise FileNotFoundError(
"""Cannot find media file corresponding to {}.
Tried looking for the following files: {}.
""".format(self.eaf_path, [self.get_media_path(md)
for md in self.media_descriptors])) |
def clear_time_value(self):
"""stub"""
if (self.get_time_value_metadata().is_read_only() or
self.get_time_value_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['timeValue'] = \
dict(self.get_time_value_metadata().get_default_duration_values()[0]) | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_time_value(self):
"""stub"""
if (self.get_time_value_metadata().is_read_only() or
self.get_time_value_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['timeValue'] = \
dict(self.get_time_value_metadata().get_default_duration_values()[0]) |
def get_image_hash(image):
'''
Returns an MD5 hash of the image file
Handles images stored locally and on AWS
I know this code is ugly.
Please don't ask.
The rabbit hole is deep.
'''
md5 = hashlib.md5()
try:
for chunk in image.file.chunks():
md5.update(chunk)
return md5.hexdigest()
# this should only occur in tests
except ValueError:
# see link below for why we try not to use .open()
# https://docs.djangoproject.com/en/1.9/ref/files/uploads/#django.core.files.uploadedfile.UploadedFile.chunks # noqa
image.file.open()
for chunk in image.file.chunks():
md5.update(chunk)
return md5.hexdigest()
finally:
image.file.close() | Returns an MD5 hash of the image file
Handles images stored locally and on AWS
I know this code is ugly.
Please don't ask.
The rabbit hole is deep. | Below is the the instruction that describes the task:
### Input:
Returns an MD5 hash of the image file
Handles images stored locally and on AWS
I know this code is ugly.
Please don't ask.
The rabbit hole is deep.
### Response:
def get_image_hash(image):
'''
Returns an MD5 hash of the image file
Handles images stored locally and on AWS
I know this code is ugly.
Please don't ask.
The rabbit hole is deep.
'''
md5 = hashlib.md5()
try:
for chunk in image.file.chunks():
md5.update(chunk)
return md5.hexdigest()
# this should only occur in tests
except ValueError:
# see link below for why we try not to use .open()
# https://docs.djangoproject.com/en/1.9/ref/files/uploads/#django.core.files.uploadedfile.UploadedFile.chunks # noqa
image.file.open()
for chunk in image.file.chunks():
md5.update(chunk)
return md5.hexdigest()
finally:
image.file.close() |
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
name = ET.SubElement(vnetwork_dvpgs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
name = ET.SubElement(vnetwork_dvpgs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _find_video(self):
"""
Lookup and populate ``pybrightcove.video.Video`` object given a video
id or reference_id.
"""
data = None
if self.id:
data = self.connection.get_item(
'find_video_by_id', video_id=self.id)
elif self.reference_id:
data = self.connection.get_item(
'find_video_by_reference_id', reference_id=self.reference_id)
if data:
self._load(data) | Lookup and populate ``pybrightcove.video.Video`` object given a video
id or reference_id. | Below is the the instruction that describes the task:
### Input:
Lookup and populate ``pybrightcove.video.Video`` object given a video
id or reference_id.
### Response:
def _find_video(self):
"""
Lookup and populate ``pybrightcove.video.Video`` object given a video
id or reference_id.
"""
data = None
if self.id:
data = self.connection.get_item(
'find_video_by_id', video_id=self.id)
elif self.reference_id:
data = self.connection.get_item(
'find_video_by_reference_id', reference_id=self.reference_id)
if data:
self._load(data) |
def stage(draft, discard, repo_directory, release_name, release_description):
"""
Stages a release
"""
with work_in(repo_directory):
if discard:
stage_command.discard(release_name, release_description)
else:
stage_command.stage(draft, release_name, release_description) | Stages a release | Below is the the instruction that describes the task:
### Input:
Stages a release
### Response:
def stage(draft, discard, repo_directory, release_name, release_description):
"""
Stages a release
"""
with work_in(repo_directory):
if discard:
stage_command.discard(release_name, release_description)
else:
stage_command.stage(draft, release_name, release_description) |
def get_residue_mapping(self):
'''Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.'''
if len(self.sequence_ids) == 2:
if not self.alignment_output:
self.align()
assert(self.alignment_output)
return self._create_residue_map(self._get_alignment_lines(), self.sequence_ids[1], self.sequence_ids[2])
else:
return None | Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler. | Below is the the instruction that describes the task:
### Input:
Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.
### Response:
def get_residue_mapping(self):
'''Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.'''
if len(self.sequence_ids) == 2:
if not self.alignment_output:
self.align()
assert(self.alignment_output)
return self._create_residue_map(self._get_alignment_lines(), self.sequence_ids[1], self.sequence_ids[2])
else:
return None |
def rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "rename({!r}, {!r})".format(oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath) | Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong | Below is the the instruction that describes the task:
### Input:
Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
### Response:
def rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "rename({!r}, {!r})".format(oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath) |
def pipeline_control_new(rst, clk, rx_rdy, rx_vld, tx_rdy, tx_vld, stage_enable, stop_rx=None, stop_tx=None):
""" Pipeline control unit
rx_rdy, rx_vld, - (o)(i) handshake at the pipeline input (front of the pipeline)
tx_rdy, tx_vld, - (i)(o) handshake at the pipeline output (back of the pipeline)
stage_enable - (o) vector of enable signals, one signal per stage, that controls the data registration in the stages;
The length of this vector determines the number of stages in the pipeline
stop_rx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops consuming data;
allows for multicycle execution in a stage (e.g. consume a data, then process it multiple cycles)
stop_tx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops producing data;
allows for multicycle execution in a stage (consume multiple data to produce single data )
stop_rx and stop_tx - If you do not need them, then do not connect them
"""
NUM_STAGES = len(stage_enable)
if (stop_rx == None):
stop_rx = Signal(intbv(0)[NUM_STAGES:])
if (stop_tx == None):
stop_tx = Signal(intbv(0)[NUM_STAGES:])
assert (len(stop_rx)==NUM_STAGES), "pipeline_control: expects len(stop_rx)=len(stage_enable), but len(stop_rx)={} len(stage_enable)={}".format(len(stop_rx),NUM_STAGES)
assert (len(stop_tx)==NUM_STAGES), "pipeline_control: expects len(stop_tx)=len(stage_enable), but len(stop_tx)={} len(stage_enable)={}".format(len(stop_tx),NUM_STAGES)
rdy = [Signal(bool(0)) for _ in range(NUM_STAGES+1)]
vld = [Signal(bool(0)) for _ in range(NUM_STAGES+1)]
BC = NUM_STAGES*[False]
en = [Signal(bool(0)) for _ in range(NUM_STAGES)]
stop_rx_s = [Signal(bool(0)) for _ in range(NUM_STAGES)]
stop_tx_s = [Signal(bool(0)) for _ in range(NUM_STAGES)]
rdy[0] = rx_rdy
vld[0] = rx_vld
rdy[-1] = tx_rdy
vld[-1] = tx_vld
BC[-1] = True
stg = [None for _ in range(NUM_STAGES)]
for i in range(NUM_STAGES):
stg[i] = _stage_ctrl(rst = rst,
clk = clk,
rx_rdy = rdy[i],
rx_vld = vld[i],
tx_rdy = rdy[i+1],
tx_vld = vld[i+1],
stage_en = en[i],
stop_rx = stop_rx_s[i],
stop_tx = stop_tx_s[i],
BC = BC[i])
x = en[0] if NUM_STAGES==1 else ConcatSignal(*reversed(en))
@always_comb
def _comb():
stage_enable.next = x
for i in range(NUM_STAGES):
stop_rx_s[i].next = stop_rx[i]
stop_tx_s[i].next = stop_tx[i]
return instances() | Pipeline control unit
rx_rdy, rx_vld, - (o)(i) handshake at the pipeline input (front of the pipeline)
tx_rdy, tx_vld, - (i)(o) handshake at the pipeline output (back of the pipeline)
stage_enable - (o) vector of enable signals, one signal per stage, that controls the data registration in the stages;
The length of this vector determines the number of stages in the pipeline
stop_rx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops consuming data;
allows for multicycle execution in a stage (e.g. consume a data, then process it multiple cycles)
stop_tx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops producing data;
allows for multicycle execution in a stage (consume multiple data to produce single data )
stop_rx and stop_tx - If you do not need them, then do not connect them | Below is the the instruction that describes the task:
### Input:
Pipeline control unit
rx_rdy, rx_vld, - (o)(i) handshake at the pipeline input (front of the pipeline)
tx_rdy, tx_vld, - (i)(o) handshake at the pipeline output (back of the pipeline)
stage_enable - (o) vector of enable signals, one signal per stage, that controls the data registration in the stages;
The length of this vector determines the number of stages in the pipeline
stop_rx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops consuming data;
allows for multicycle execution in a stage (e.g. consume a data, then process it multiple cycles)
stop_tx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops producing data;
allows for multicycle execution in a stage (consume multiple data to produce single data )
stop_rx and stop_tx - If you do not need them, then do not connect them
### Response:
def pipeline_control_new(rst, clk, rx_rdy, rx_vld, tx_rdy, tx_vld, stage_enable, stop_rx=None, stop_tx=None):
""" Pipeline control unit
rx_rdy, rx_vld, - (o)(i) handshake at the pipeline input (front of the pipeline)
tx_rdy, tx_vld, - (i)(o) handshake at the pipeline output (back of the pipeline)
stage_enable - (o) vector of enable signals, one signal per stage, that controls the data registration in the stages;
The length of this vector determines the number of stages in the pipeline
stop_rx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops consuming data;
allows for multicycle execution in a stage (e.g. consume a data, then process it multiple cycles)
stop_tx - (i) optional, vector of signals, one signal per stage; when asserted, the corresponding stage stops producing data;
allows for multicycle execution in a stage (consume multiple data to produce single data )
stop_rx and stop_tx - If you do not need them, then do not connect them
"""
NUM_STAGES = len(stage_enable)
if (stop_rx == None):
stop_rx = Signal(intbv(0)[NUM_STAGES:])
if (stop_tx == None):
stop_tx = Signal(intbv(0)[NUM_STAGES:])
assert (len(stop_rx)==NUM_STAGES), "pipeline_control: expects len(stop_rx)=len(stage_enable), but len(stop_rx)={} len(stage_enable)={}".format(len(stop_rx),NUM_STAGES)
assert (len(stop_tx)==NUM_STAGES), "pipeline_control: expects len(stop_tx)=len(stage_enable), but len(stop_tx)={} len(stage_enable)={}".format(len(stop_tx),NUM_STAGES)
rdy = [Signal(bool(0)) for _ in range(NUM_STAGES+1)]
vld = [Signal(bool(0)) for _ in range(NUM_STAGES+1)]
BC = NUM_STAGES*[False]
en = [Signal(bool(0)) for _ in range(NUM_STAGES)]
stop_rx_s = [Signal(bool(0)) for _ in range(NUM_STAGES)]
stop_tx_s = [Signal(bool(0)) for _ in range(NUM_STAGES)]
rdy[0] = rx_rdy
vld[0] = rx_vld
rdy[-1] = tx_rdy
vld[-1] = tx_vld
BC[-1] = True
stg = [None for _ in range(NUM_STAGES)]
for i in range(NUM_STAGES):
stg[i] = _stage_ctrl(rst = rst,
clk = clk,
rx_rdy = rdy[i],
rx_vld = vld[i],
tx_rdy = rdy[i+1],
tx_vld = vld[i+1],
stage_en = en[i],
stop_rx = stop_rx_s[i],
stop_tx = stop_tx_s[i],
BC = BC[i])
x = en[0] if NUM_STAGES==1 else ConcatSignal(*reversed(en))
@always_comb
def _comb():
stage_enable.next = x
for i in range(NUM_STAGES):
stop_rx_s[i].next = stop_rx[i]
stop_tx_s[i].next = stop_tx[i]
return instances() |
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper | Wrap this udf with a function and attach docstring from func | Below is the the instruction that describes the task:
### Input:
Wrap this udf with a function and attach docstring from func
### Response:
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper |
def remove_mapping(agent, prefix, ip):
"""Removes a mapping with a contract.
It has high latency but gives some kind of guarantee."""
return _broadcast(agent, RemoveMappingManager,
RecordType.record_A, prefix, ip) | Removes a mapping with a contract.
It has high latency but gives some kind of guarantee. | Below is the the instruction that describes the task:
### Input:
Removes a mapping with a contract.
It has high latency but gives some kind of guarantee.
### Response:
def remove_mapping(agent, prefix, ip):
"""Removes a mapping with a contract.
It has high latency but gives some kind of guarantee."""
return _broadcast(agent, RemoveMappingManager,
RecordType.record_A, prefix, ip) |
def set_canari_mode(mode=CanariMode.Unknown):
"""
Sets the global operating mode for Canari. This is used to alter the behaviour of dangerous classes like the
CanariConfigParser.
:param mode: the numeric Canari operating mode (CanariMode.Local, CanariMode.Remote, etc.).
:return: previous operating mode.
"""
global canari_mode
old_mode = canari_mode
canari_mode = mode
return old_mode | Sets the global operating mode for Canari. This is used to alter the behaviour of dangerous classes like the
CanariConfigParser.
:param mode: the numeric Canari operating mode (CanariMode.Local, CanariMode.Remote, etc.).
:return: previous operating mode. | Below is the the instruction that describes the task:
### Input:
Sets the global operating mode for Canari. This is used to alter the behaviour of dangerous classes like the
CanariConfigParser.
:param mode: the numeric Canari operating mode (CanariMode.Local, CanariMode.Remote, etc.).
:return: previous operating mode.
### Response:
def set_canari_mode(mode=CanariMode.Unknown):
"""
Sets the global operating mode for Canari. This is used to alter the behaviour of dangerous classes like the
CanariConfigParser.
:param mode: the numeric Canari operating mode (CanariMode.Local, CanariMode.Remote, etc.).
:return: previous operating mode.
"""
global canari_mode
old_mode = canari_mode
canari_mode = mode
return old_mode |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intent') and self.intent is not None:
_dict['intent'] = self.intent
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intent') and self.intent is not None:
_dict['intent'] = self.intent
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
return _dict |
def get_property_value(self, name):
"""Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager.
"""
refUrl = self.get_ref_url()
# lock properties
lm = self.provider.lock_manager
if lm and name == "{DAV:}lockdiscovery":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present.
# Correct?
activelocklist = lm.get_url_lock_list(refUrl)
lockdiscoveryEL = etree.Element(name)
for lock in activelocklist:
activelockEL = etree.SubElement(lockdiscoveryEL, "{DAV:}activelock")
locktypeEL = etree.SubElement(activelockEL, "{DAV:}locktype")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(locktypeEL, "{}{}".format("{DAV:}", lock["type"]))
lockscopeEL = etree.SubElement(activelockEL, "{DAV:}lockscope")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(lockscopeEL, "{}{}".format("{DAV:}", lock["scope"]))
etree.SubElement(activelockEL, "{DAV:}depth").text = lock["depth"]
if lock["owner"]:
# lock["owner"] is an XML string
# owner may be empty (#64)
ownerEL = xml_tools.string_to_xml(lock["owner"])
activelockEL.append(ownerEL)
timeout = lock["timeout"]
if timeout < 0:
timeout = "Infinite"
else:
# The time remaining on the lock
expire = lock["expire"]
timeout = "Second-" + str(int(expire - time.time()))
etree.SubElement(activelockEL, "{DAV:}timeout").text = timeout
locktokenEL = etree.SubElement(activelockEL, "{DAV:}locktoken")
etree.SubElement(locktokenEL, "{DAV:}href").text = lock["token"]
# TODO: this is ugly:
# res.get_property_value("{DAV:}lockdiscovery")
#
# lockRoot = self.get_href(self.provider.ref_url_to_path(lock["root"]))
lockPath = self.provider.ref_url_to_path(lock["root"])
lockRes = self.provider.get_resource_inst(lockPath, self.environ)
# FIXME: test for None
lockHref = lockRes.get_href()
lockrootEL = etree.SubElement(activelockEL, "{DAV:}lockroot")
etree.SubElement(lockrootEL, "{DAV:}href").text = lockHref
return lockdiscoveryEL
elif lm and name == "{DAV:}supportedlock":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?
# TODO: the lockmanager should decide about it's features
supportedlockEL = etree.Element(name)
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}exclusive")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}shared")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
return supportedlockEL
elif name.startswith("{DAV:}"):
# Standard live property (raises HTTP_NOT_FOUND if not supported)
if name == "{DAV:}creationdate" and self.get_creation_date() is not None:
# Note: uses RFC3339 format (ISO 8601)
return util.get_rfc3339_time(self.get_creation_date())
elif name == "{DAV:}getcontenttype" and self.get_content_type() is not None:
return self.get_content_type()
elif name == "{DAV:}resourcetype":
if self.is_collection:
resourcetypeEL = etree.Element(name)
etree.SubElement(resourcetypeEL, "{DAV:}collection")
return resourcetypeEL
return ""
elif (
name == "{DAV:}getlastmodified" and self.get_last_modified() is not None
):
# Note: uses RFC1123 format
return util.get_rfc1123_time(self.get_last_modified())
elif (
name == "{DAV:}getcontentlength"
and self.get_content_length() is not None
):
# Note: must be a numeric string
return str(self.get_content_length())
elif name == "{DAV:}getetag" and self.get_etag() is not None:
return self.get_etag()
elif name == "{DAV:}displayname" and self.get_display_name() is not None:
return self.get_display_name()
# Unsupported, no persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
# Dead property
pm = self.provider.prop_manager
if pm:
value = pm.get_property(refUrl, name, self.environ)
if value is not None:
return xml_tools.string_to_xml(value)
# No persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND) | Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager. | Below is the the instruction that describes the task:
### Input:
Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager.
### Response:
def get_property_value(self, name):
"""Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager.
"""
refUrl = self.get_ref_url()
# lock properties
lm = self.provider.lock_manager
if lm and name == "{DAV:}lockdiscovery":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present.
# Correct?
activelocklist = lm.get_url_lock_list(refUrl)
lockdiscoveryEL = etree.Element(name)
for lock in activelocklist:
activelockEL = etree.SubElement(lockdiscoveryEL, "{DAV:}activelock")
locktypeEL = etree.SubElement(activelockEL, "{DAV:}locktype")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(locktypeEL, "{}{}".format("{DAV:}", lock["type"]))
lockscopeEL = etree.SubElement(activelockEL, "{DAV:}lockscope")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(lockscopeEL, "{}{}".format("{DAV:}", lock["scope"]))
etree.SubElement(activelockEL, "{DAV:}depth").text = lock["depth"]
if lock["owner"]:
# lock["owner"] is an XML string
# owner may be empty (#64)
ownerEL = xml_tools.string_to_xml(lock["owner"])
activelockEL.append(ownerEL)
timeout = lock["timeout"]
if timeout < 0:
timeout = "Infinite"
else:
# The time remaining on the lock
expire = lock["expire"]
timeout = "Second-" + str(int(expire - time.time()))
etree.SubElement(activelockEL, "{DAV:}timeout").text = timeout
locktokenEL = etree.SubElement(activelockEL, "{DAV:}locktoken")
etree.SubElement(locktokenEL, "{DAV:}href").text = lock["token"]
# TODO: this is ugly:
# res.get_property_value("{DAV:}lockdiscovery")
#
# lockRoot = self.get_href(self.provider.ref_url_to_path(lock["root"]))
lockPath = self.provider.ref_url_to_path(lock["root"])
lockRes = self.provider.get_resource_inst(lockPath, self.environ)
# FIXME: test for None
lockHref = lockRes.get_href()
lockrootEL = etree.SubElement(activelockEL, "{DAV:}lockroot")
etree.SubElement(lockrootEL, "{DAV:}href").text = lockHref
return lockdiscoveryEL
elif lm and name == "{DAV:}supportedlock":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?
# TODO: the lockmanager should decide about it's features
supportedlockEL = etree.Element(name)
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}exclusive")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}shared")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
return supportedlockEL
elif name.startswith("{DAV:}"):
# Standard live property (raises HTTP_NOT_FOUND if not supported)
if name == "{DAV:}creationdate" and self.get_creation_date() is not None:
# Note: uses RFC3339 format (ISO 8601)
return util.get_rfc3339_time(self.get_creation_date())
elif name == "{DAV:}getcontenttype" and self.get_content_type() is not None:
return self.get_content_type()
elif name == "{DAV:}resourcetype":
if self.is_collection:
resourcetypeEL = etree.Element(name)
etree.SubElement(resourcetypeEL, "{DAV:}collection")
return resourcetypeEL
return ""
elif (
name == "{DAV:}getlastmodified" and self.get_last_modified() is not None
):
# Note: uses RFC1123 format
return util.get_rfc1123_time(self.get_last_modified())
elif (
name == "{DAV:}getcontentlength"
and self.get_content_length() is not None
):
# Note: must be a numeric string
return str(self.get_content_length())
elif name == "{DAV:}getetag" and self.get_etag() is not None:
return self.get_etag()
elif name == "{DAV:}displayname" and self.get_display_name() is not None:
return self.get_display_name()
# Unsupported, no persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
# Dead property
pm = self.provider.prop_manager
if pm:
value = pm.get_property(refUrl, name, self.environ)
if value is not None:
return xml_tools.string_to_xml(value)
# No persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND) |
def annual_heating_design_day_990(self):
"""A design day object representing the annual 99.0% heating design day."""
self._load_header_check()
if bool(self._heating_dict) is True:
avg_press = self.atmospheric_station_pressure.average
avg_press = None if avg_press == 999999 else avg_press
return DesignDay.from_ashrae_dict_heating(
self._heating_dict, self.location, True, avg_press)
else:
return None | A design day object representing the annual 99.0% heating design day. | Below is the the instruction that describes the task:
### Input:
A design day object representing the annual 99.0% heating design day.
### Response:
def annual_heating_design_day_990(self):
"""A design day object representing the annual 99.0% heating design day."""
self._load_header_check()
if bool(self._heating_dict) is True:
avg_press = self.atmospheric_station_pressure.average
avg_press = None if avg_press == 999999 else avg_press
return DesignDay.from_ashrae_dict_heating(
self._heating_dict, self.location, True, avg_press)
else:
return None |
def _supports(self, item):
"""Supports everything of parent class and csr, csc, bsr, and dia sparse matrices."""
if SparseParameter._is_supported_matrix(item):
return True
else:
return super(SparseResult, self)._supports(item) | Supports everything of parent class and csr, csc, bsr, and dia sparse matrices. | Below is the the instruction that describes the task:
### Input:
Supports everything of parent class and csr, csc, bsr, and dia sparse matrices.
### Response:
def _supports(self, item):
"""Supports everything of parent class and csr, csc, bsr, and dia sparse matrices."""
if SparseParameter._is_supported_matrix(item):
return True
else:
return super(SparseResult, self)._supports(item) |
def _transfer_output_files(self, tool_output_dir, working_dir, output_dir, patterns):
"""Transfer files created by the tool in the container to the output directory.
@param tool_output_dir: The directory under which all tool output files are created.
@param working_dir: The absolute working directory of the tool in the container.
@param output_dir: the directory where to write result files
@param patterns: a list of patterns of files to retrieve as result files
"""
assert output_dir and patterns
if any(os.path.isabs(pattern) for pattern in patterns):
base_dir = tool_output_dir
else:
base_dir = tool_output_dir + working_dir
def transfer_file(abs_file):
assert abs_file.startswith(base_dir)
# We ignore (empty) directories, because we create them for hidden dirs etc.
# We ignore device nodes, because overlayfs creates them.
# We also ignore all other files (symlinks, fifos etc.),
# because they are probably irrelevant, and just handle regular files.
file = os.path.join("/", os.path.relpath(abs_file, base_dir))
if (os.path.isfile(abs_file) and not os.path.islink(abs_file) and
not container.is_container_system_config_file(file)):
target = output_dir + file
logging.debug("Transferring output file %s to %s", abs_file, target)
try:
os.makedirs(os.path.dirname(target))
except EnvironmentError:
pass # exist_ok=True not supported on Python 2
try:
# move is more efficient than copy in case both abs_file and target
# are on the same filesystem, and it avoids matching the file again
# with the next pattern.
shutil.move(abs_file, target)
except EnvironmentError as e:
logging.warning("Could not retrieve output file '%s': %s", file, e)
for pattern in patterns:
if os.path.isabs(pattern):
pattern = tool_output_dir + pattern
else:
pattern = tool_output_dir + os.path.join(working_dir, pattern)
# normalize pattern for preventing directory traversal attacks:
for abs_file in util.maybe_recursive_iglob(os.path.normpath(pattern), recursive=True):
# Recursive matching is only supported starting with Python 3.5,
# so we allow the user to match directories and transfer them recursively.
if os.path.isdir(abs_file):
for root, unused_dirs, files in os.walk(abs_file):
for file in files:
transfer_file(os.path.join(root, file))
else:
transfer_file(abs_file) | Transfer files created by the tool in the container to the output directory.
@param tool_output_dir: The directory under which all tool output files are created.
@param working_dir: The absolute working directory of the tool in the container.
@param output_dir: the directory where to write result files
@param patterns: a list of patterns of files to retrieve as result files | Below is the the instruction that describes the task:
### Input:
Transfer files created by the tool in the container to the output directory.
@param tool_output_dir: The directory under which all tool output files are created.
@param working_dir: The absolute working directory of the tool in the container.
@param output_dir: the directory where to write result files
@param patterns: a list of patterns of files to retrieve as result files
### Response:
def _transfer_output_files(self, tool_output_dir, working_dir, output_dir, patterns):
"""Transfer files created by the tool in the container to the output directory.
@param tool_output_dir: The directory under which all tool output files are created.
@param working_dir: The absolute working directory of the tool in the container.
@param output_dir: the directory where to write result files
@param patterns: a list of patterns of files to retrieve as result files
"""
assert output_dir and patterns
if any(os.path.isabs(pattern) for pattern in patterns):
base_dir = tool_output_dir
else:
base_dir = tool_output_dir + working_dir
def transfer_file(abs_file):
assert abs_file.startswith(base_dir)
# We ignore (empty) directories, because we create them for hidden dirs etc.
# We ignore device nodes, because overlayfs creates them.
# We also ignore all other files (symlinks, fifos etc.),
# because they are probably irrelevant, and just handle regular files.
file = os.path.join("/", os.path.relpath(abs_file, base_dir))
if (os.path.isfile(abs_file) and not os.path.islink(abs_file) and
not container.is_container_system_config_file(file)):
target = output_dir + file
logging.debug("Transferring output file %s to %s", abs_file, target)
try:
os.makedirs(os.path.dirname(target))
except EnvironmentError:
pass # exist_ok=True not supported on Python 2
try:
# move is more efficient than copy in case both abs_file and target
# are on the same filesystem, and it avoids matching the file again
# with the next pattern.
shutil.move(abs_file, target)
except EnvironmentError as e:
logging.warning("Could not retrieve output file '%s': %s", file, e)
for pattern in patterns:
if os.path.isabs(pattern):
pattern = tool_output_dir + pattern
else:
pattern = tool_output_dir + os.path.join(working_dir, pattern)
# normalize pattern for preventing directory traversal attacks:
for abs_file in util.maybe_recursive_iglob(os.path.normpath(pattern), recursive=True):
# Recursive matching is only supported starting with Python 3.5,
# so we allow the user to match directories and transfer them recursively.
if os.path.isdir(abs_file):
for root, unused_dirs, files in os.walk(abs_file):
for file in files:
transfer_file(os.path.join(root, file))
else:
transfer_file(abs_file) |
def fft_plan(shape, dtype=np.complex64, axes=None, fast_math=True):
"""returns an reikna plan/FFT obj of shape dshape
"""
# if not axes is None and any([a<0 for a in axes]):
# raise NotImplementedError("indices of axes have to be non negative, but are: %s"%str(axes))
axes = _convert_axes_to_absolute(shape, axes)
mock_buffer = MockBuffer(dtype, shape)
fft_plan = FFT(mock_buffer, axes=axes).compile(cluda.ocl_api().Thread(get_device().queue),
fast_math=fast_math)
return fft_plan | returns an reikna plan/FFT obj of shape dshape | Below is the the instruction that describes the task:
### Input:
returns an reikna plan/FFT obj of shape dshape
### Response:
def fft_plan(shape, dtype=np.complex64, axes=None, fast_math=True):
"""returns an reikna plan/FFT obj of shape dshape
"""
# if not axes is None and any([a<0 for a in axes]):
# raise NotImplementedError("indices of axes have to be non negative, but are: %s"%str(axes))
axes = _convert_axes_to_absolute(shape, axes)
mock_buffer = MockBuffer(dtype, shape)
fft_plan = FFT(mock_buffer, axes=axes).compile(cluda.ocl_api().Thread(get_device().queue),
fast_math=fast_math)
return fft_plan |
def surface_ras_shift(self):
"""Freesurfer uses two coordinate systems: one for volumes ("RAS") and
one for surfaces ("tkReg", "tkRAS", and "Surface RAS").
To get from surface to volume coordinates, add this numbers.
To get from volume to surface coordinates, substract this numbers.
"""
T1_path = self.dir / 'mri' / 'T1.mgz'
assert T1_path.exists()
T1 = nload(str(T1_path))
return T1.header['Pxyz_c'] | Freesurfer uses two coordinate systems: one for volumes ("RAS") and
one for surfaces ("tkReg", "tkRAS", and "Surface RAS").
To get from surface to volume coordinates, add this numbers.
To get from volume to surface coordinates, substract this numbers. | Below is the the instruction that describes the task:
### Input:
Freesurfer uses two coordinate systems: one for volumes ("RAS") and
one for surfaces ("tkReg", "tkRAS", and "Surface RAS").
To get from surface to volume coordinates, add this numbers.
To get from volume to surface coordinates, substract this numbers.
### Response:
def surface_ras_shift(self):
"""Freesurfer uses two coordinate systems: one for volumes ("RAS") and
one for surfaces ("tkReg", "tkRAS", and "Surface RAS").
To get from surface to volume coordinates, add this numbers.
To get from volume to surface coordinates, substract this numbers.
"""
T1_path = self.dir / 'mri' / 'T1.mgz'
assert T1_path.exists()
T1 = nload(str(T1_path))
return T1.header['Pxyz_c'] |
def camelcase_to_underline(param_dict):
"""
将驼峰命名的参数字典键转换为下划线参数
:param:
* param_dict: (dict) 请求参数字典
:return:
* temp_dict: (dict) 转换后的参数字典
举例如下::
print('--- transform_hump_to_underline demo---')
hump_param_dict = {'firstName': 'Python', 'Second_Name': 'san', 'right_name': 'name'}
underline_param_dict = transform_hump_to_underline(hump_param_dict )
print(underline_param_dict )
print('---')
执行结果::
--- transform_hump_to_underline demo---
{'first_name': 'Python', 'second_name': 'san', 'right_name': 'name'}
---
"""
temp_dict = copy.deepcopy(param_dict)
# 正则
hump_to_underline = re.compile(r'([a-z]|\d)([A-Z])')
for key in list(param_dict.keys()):
# 将驼峰值替换为下划线
underline_sub = re.sub(hump_to_underline, r'\1_\2', key).lower()
temp_dict[underline_sub] = temp_dict.pop(key)
return temp_dict | 将驼峰命名的参数字典键转换为下划线参数
:param:
* param_dict: (dict) 请求参数字典
:return:
* temp_dict: (dict) 转换后的参数字典
举例如下::
print('--- transform_hump_to_underline demo---')
hump_param_dict = {'firstName': 'Python', 'Second_Name': 'san', 'right_name': 'name'}
underline_param_dict = transform_hump_to_underline(hump_param_dict )
print(underline_param_dict )
print('---')
执行结果::
--- transform_hump_to_underline demo---
{'first_name': 'Python', 'second_name': 'san', 'right_name': 'name'}
--- | Below is the the instruction that describes the task:
### Input:
将驼峰命名的参数字典键转换为下划线参数
:param:
* param_dict: (dict) 请求参数字典
:return:
* temp_dict: (dict) 转换后的参数字典
举例如下::
print('--- transform_hump_to_underline demo---')
hump_param_dict = {'firstName': 'Python', 'Second_Name': 'san', 'right_name': 'name'}
underline_param_dict = transform_hump_to_underline(hump_param_dict )
print(underline_param_dict )
print('---')
执行结果::
--- transform_hump_to_underline demo---
{'first_name': 'Python', 'second_name': 'san', 'right_name': 'name'}
---
### Response:
def camelcase_to_underline(param_dict):
"""
将驼峰命名的参数字典键转换为下划线参数
:param:
* param_dict: (dict) 请求参数字典
:return:
* temp_dict: (dict) 转换后的参数字典
举例如下::
print('--- transform_hump_to_underline demo---')
hump_param_dict = {'firstName': 'Python', 'Second_Name': 'san', 'right_name': 'name'}
underline_param_dict = transform_hump_to_underline(hump_param_dict )
print(underline_param_dict )
print('---')
执行结果::
--- transform_hump_to_underline demo---
{'first_name': 'Python', 'second_name': 'san', 'right_name': 'name'}
---
"""
temp_dict = copy.deepcopy(param_dict)
# 正则
hump_to_underline = re.compile(r'([a-z]|\d)([A-Z])')
for key in list(param_dict.keys()):
# 将驼峰值替换为下划线
underline_sub = re.sub(hump_to_underline, r'\1_\2', key).lower()
temp_dict[underline_sub] = temp_dict.pop(key)
return temp_dict |
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value | Schema for all additional properties, or False. | Below is the the instruction that describes the task:
### Input:
Schema for all additional properties, or False.
### Response:
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value |
def search(self):
"""Handle the search request."""
search = self.document_class().search() # pylint: disable=not-callable
search = self.custom_filter(search)
search = self.filter_search(search)
search = self.order_search(search)
search = self.filter_permissions(search)
if search.count() > ELASTICSEARCH_SIZE:
limit = self.paginator.get_limit(self.request)
if not limit or limit > ELASTICSEARCH_SIZE:
raise TooManyResults()
search = search.extra(size=ELASTICSEARCH_SIZE)
return search | Handle the search request. | Below is the the instruction that describes the task:
### Input:
Handle the search request.
### Response:
def search(self):
"""Handle the search request."""
search = self.document_class().search() # pylint: disable=not-callable
search = self.custom_filter(search)
search = self.filter_search(search)
search = self.order_search(search)
search = self.filter_permissions(search)
if search.count() > ELASTICSEARCH_SIZE:
limit = self.paginator.get_limit(self.request)
if not limit or limit > ELASTICSEARCH_SIZE:
raise TooManyResults()
search = search.extra(size=ELASTICSEARCH_SIZE)
return search |
def dpms(self):
"""
Display a colorful state of DPMS.
"""
if "DPMS is Enabled" in self.py3.command_output("xset -q"):
_format = self.icon_on
color = self.color_on
else:
_format = self.icon_off
color = self.color_off
icon = self.py3.safe_format(_format)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"icon": icon}),
"color": color,
} | Display a colorful state of DPMS. | Below is the the instruction that describes the task:
### Input:
Display a colorful state of DPMS.
### Response:
def dpms(self):
"""
Display a colorful state of DPMS.
"""
if "DPMS is Enabled" in self.py3.command_output("xset -q"):
_format = self.icon_on
color = self.color_on
else:
_format = self.icon_off
color = self.color_off
icon = self.py3.safe_format(_format)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"icon": icon}),
"color": color,
} |
def _writeCic(self, filelike, specfile, compress):
"""Writes the ``.cic`` container entry of the specified specfile to the
``mrc_cic`` format. For details see
:func:`maspy.auxiliary.writeBinaryItemContainer()`
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression
"""
aux.writeBinaryItemContainer(filelike, self.cic[specfile], compress) | Writes the ``.cic`` container entry of the specified specfile to the
``mrc_cic`` format. For details see
:func:`maspy.auxiliary.writeBinaryItemContainer()`
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression | Below is the the instruction that describes the task:
### Input:
Writes the ``.cic`` container entry of the specified specfile to the
``mrc_cic`` format. For details see
:func:`maspy.auxiliary.writeBinaryItemContainer()`
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression
### Response:
def _writeCic(self, filelike, specfile, compress):
"""Writes the ``.cic`` container entry of the specified specfile to the
``mrc_cic`` format. For details see
:func:`maspy.auxiliary.writeBinaryItemContainer()`
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression
"""
aux.writeBinaryItemContainer(filelike, self.cic[specfile], compress) |
def get_type_name(type_name, sub_type=None):
""" Returns a go type according to a spec type
"""
if type_name in ("string", "enum"):
return "string"
if type_name == "float":
return "float64"
if type_name == "boolean":
return "bool"
if type_name == "list":
st = get_type_name(type_name=sub_type, sub_type=None) if sub_type else "interface{}"
return "[]%s" % st
if type_name == "integer":
return "int"
if type_name == "time":
return "float64"
return "interface{}" | Returns a go type according to a spec type | Below is the the instruction that describes the task:
### Input:
Returns a go type according to a spec type
### Response:
def get_type_name(type_name, sub_type=None):
""" Returns a go type according to a spec type
"""
if type_name in ("string", "enum"):
return "string"
if type_name == "float":
return "float64"
if type_name == "boolean":
return "bool"
if type_name == "list":
st = get_type_name(type_name=sub_type, sub_type=None) if sub_type else "interface{}"
return "[]%s" % st
if type_name == "integer":
return "int"
if type_name == "time":
return "float64"
return "interface{}" |
def read_large_int(self, bits, signed=True):
"""Reads a n-bits long integer value."""
return int.from_bytes(
self.read(bits // 8), byteorder='little', signed=signed) | Reads a n-bits long integer value. | Below is the the instruction that describes the task:
### Input:
Reads a n-bits long integer value.
### Response:
def read_large_int(self, bits, signed=True):
"""Reads a n-bits long integer value."""
return int.from_bytes(
self.read(bits // 8), byteorder='little', signed=signed) |
def write(self, data):
"""
write single molecule into file
"""
m = self._convert_structure(data)
self._file.write(self._format_mol(*m))
self._file.write('M END\n')
for k, v in data.meta.items():
self._file.write(f'> <{k}>\n{v}\n')
self._file.write('$$$$\n') | write single molecule into file | Below is the the instruction that describes the task:
### Input:
write single molecule into file
### Response:
def write(self, data):
"""
write single molecule into file
"""
m = self._convert_structure(data)
self._file.write(self._format_mol(*m))
self._file.write('M END\n')
for k, v in data.meta.items():
self._file.write(f'> <{k}>\n{v}\n')
self._file.write('$$$$\n') |
def _create_simulated_annealing_expander(schedule):
'''
Creates an expander that has a random chance to choose a node that is worse
than the current (first) node, but that chance decreases with time.
'''
def _expander(fringe, iteration, viewer):
T = schedule(iteration)
current = fringe[0]
neighbors = current.expand(local_search=True)
if viewer:
viewer.event('expanded', [current], [neighbors])
if neighbors:
succ = random.choice(neighbors)
delta_e = succ.value - current.value
if delta_e > 0 or random.random() < math.exp(delta_e / T):
fringe.pop()
fringe.append(succ)
if viewer:
viewer.event('chosen_node', succ)
return _expander | Creates an expander that has a random chance to choose a node that is worse
than the current (first) node, but that chance decreases with time. | Below is the the instruction that describes the task:
### Input:
Creates an expander that has a random chance to choose a node that is worse
than the current (first) node, but that chance decreases with time.
### Response:
def _create_simulated_annealing_expander(schedule):
'''
Creates an expander that has a random chance to choose a node that is worse
than the current (first) node, but that chance decreases with time.
'''
def _expander(fringe, iteration, viewer):
T = schedule(iteration)
current = fringe[0]
neighbors = current.expand(local_search=True)
if viewer:
viewer.event('expanded', [current], [neighbors])
if neighbors:
succ = random.choice(neighbors)
delta_e = succ.value - current.value
if delta_e > 0 or random.random() < math.exp(delta_e / T):
fringe.pop()
fringe.append(succ)
if viewer:
viewer.event('chosen_node', succ)
return _expander |
def find_clusters(network, mask=[], t_labels=False):
r"""
Identify connected clusters of pores in the network. This method can
also return a list of throat cluster numbers, which correspond to the
cluster numbers of the pores to which the throat is connected. Either
site and bond percolation can be considered, see description of input
arguments for details.
Parameters
----------
network : OpenPNM Network Object
The network
mask : array_like, boolean
A list of active bonds or sites (throats or pores). If the mask is
Np long, then the method will perform a site percolation, and if
the mask is Nt long bond percolation will be performed.
Returns
-------
A tuple containing an Np long list of pore cluster labels, and an Nt-long
list of throat cluster labels. The label numbers correspond such that
pores and throats with the same label are part of the same cluster.
Examples
--------
>>> import openpnm as op
>>> from scipy import rand
>>> pn = op.network.Cubic(shape=[25, 25, 1])
>>> pn['pore.seed'] = rand(pn.Np)
>>> pn['throat.seed'] = rand(pn.Nt)
"""
# Parse the input arguments
mask = sp.array(mask, ndmin=1)
if mask.dtype != bool:
raise Exception('Mask must be a boolean array of Np or Nt length')
# If pore mask was given perform site percolation
if sp.size(mask) == network.Np:
(p_clusters, t_clusters) = _site_percolation(network, mask)
# If pore mask was given perform bond percolation
elif sp.size(mask) == network.Nt:
(p_clusters, t_clusters) = _bond_percolation(network, mask)
else:
raise Exception('Mask received was neither Nt nor Np long')
return (p_clusters, t_clusters) | r"""
Identify connected clusters of pores in the network. This method can
also return a list of throat cluster numbers, which correspond to the
cluster numbers of the pores to which the throat is connected. Either
site and bond percolation can be considered, see description of input
arguments for details.
Parameters
----------
network : OpenPNM Network Object
The network
mask : array_like, boolean
A list of active bonds or sites (throats or pores). If the mask is
Np long, then the method will perform a site percolation, and if
the mask is Nt long bond percolation will be performed.
Returns
-------
A tuple containing an Np long list of pore cluster labels, and an Nt-long
list of throat cluster labels. The label numbers correspond such that
pores and throats with the same label are part of the same cluster.
Examples
--------
>>> import openpnm as op
>>> from scipy import rand
>>> pn = op.network.Cubic(shape=[25, 25, 1])
>>> pn['pore.seed'] = rand(pn.Np)
>>> pn['throat.seed'] = rand(pn.Nt) | Below is the the instruction that describes the task:
### Input:
r"""
Identify connected clusters of pores in the network. This method can
also return a list of throat cluster numbers, which correspond to the
cluster numbers of the pores to which the throat is connected. Either
site and bond percolation can be considered, see description of input
arguments for details.
Parameters
----------
network : OpenPNM Network Object
The network
mask : array_like, boolean
A list of active bonds or sites (throats or pores). If the mask is
Np long, then the method will perform a site percolation, and if
the mask is Nt long bond percolation will be performed.
Returns
-------
A tuple containing an Np long list of pore cluster labels, and an Nt-long
list of throat cluster labels. The label numbers correspond such that
pores and throats with the same label are part of the same cluster.
Examples
--------
>>> import openpnm as op
>>> from scipy import rand
>>> pn = op.network.Cubic(shape=[25, 25, 1])
>>> pn['pore.seed'] = rand(pn.Np)
>>> pn['throat.seed'] = rand(pn.Nt)
### Response:
def find_clusters(network, mask=[], t_labels=False):
r"""
Identify connected clusters of pores in the network. This method can
also return a list of throat cluster numbers, which correspond to the
cluster numbers of the pores to which the throat is connected. Either
site and bond percolation can be considered, see description of input
arguments for details.
Parameters
----------
network : OpenPNM Network Object
The network
mask : array_like, boolean
A list of active bonds or sites (throats or pores). If the mask is
Np long, then the method will perform a site percolation, and if
the mask is Nt long bond percolation will be performed.
Returns
-------
A tuple containing an Np long list of pore cluster labels, and an Nt-long
list of throat cluster labels. The label numbers correspond such that
pores and throats with the same label are part of the same cluster.
Examples
--------
>>> import openpnm as op
>>> from scipy import rand
>>> pn = op.network.Cubic(shape=[25, 25, 1])
>>> pn['pore.seed'] = rand(pn.Np)
>>> pn['throat.seed'] = rand(pn.Nt)
"""
# Parse the input arguments
mask = sp.array(mask, ndmin=1)
if mask.dtype != bool:
raise Exception('Mask must be a boolean array of Np or Nt length')
# If pore mask was given perform site percolation
if sp.size(mask) == network.Np:
(p_clusters, t_clusters) = _site_percolation(network, mask)
# If pore mask was given perform bond percolation
elif sp.size(mask) == network.Nt:
(p_clusters, t_clusters) = _bond_percolation(network, mask)
else:
raise Exception('Mask received was neither Nt nor Np long')
return (p_clusters, t_clusters) |
def discover(self):
"""Method to send a discovery message
"""
if self.transport:
if self.discovery_countdown <= 0:
self.discovery_countdown = self.discovery_interval
msg = GetService(BROADCAST_MAC, self.source_id, seq_num=0, payload={}, ack_requested=False, response_requested=True)
self.transport.sendto(msg.generate_packed_message(), (self.broadcast_ip, UDP_BROADCAST_PORT))
else:
self.discovery_countdown -= self.discovery_step
self.loop.call_later(self.discovery_step, self.discover) | Method to send a discovery message | Below is the the instruction that describes the task:
### Input:
Method to send a discovery message
### Response:
def discover(self):
"""Method to send a discovery message
"""
if self.transport:
if self.discovery_countdown <= 0:
self.discovery_countdown = self.discovery_interval
msg = GetService(BROADCAST_MAC, self.source_id, seq_num=0, payload={}, ack_requested=False, response_requested=True)
self.transport.sendto(msg.generate_packed_message(), (self.broadcast_ip, UDP_BROADCAST_PORT))
else:
self.discovery_countdown -= self.discovery_step
self.loop.call_later(self.discovery_step, self.discover) |
def shutdown(self):
""" Shuts down the daemon process.
"""
if not self._exited:
self._exited = True
# signal task runner to terminate via SIGTERM
if self._task_runner.is_alive():
self._task_runner.terminate()
# if command server is running, then block until
# task runner completes so it has time to use
# the command server to clean up root plugins
if self._command_server.is_alive():
if self._task_runner.is_alive():
self._task_runner.join()
_shutdown_pipe(self._pipe)
self._task.stop() | Shuts down the daemon process. | Below is the the instruction that describes the task:
### Input:
Shuts down the daemon process.
### Response:
def shutdown(self):
""" Shuts down the daemon process.
"""
if not self._exited:
self._exited = True
# signal task runner to terminate via SIGTERM
if self._task_runner.is_alive():
self._task_runner.terminate()
# if command server is running, then block until
# task runner completes so it has time to use
# the command server to clean up root plugins
if self._command_server.is_alive():
if self._task_runner.is_alive():
self._task_runner.join()
_shutdown_pipe(self._pipe)
self._task.stop() |
def newNsPropEatName(self, ns, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewNsPropEatName(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlNewNsPropEatName() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp | Create a new property tagged with a namespace and carried
by a node. | Below is the the instruction that describes the task:
### Input:
Create a new property tagged with a namespace and carried
by a node.
### Response:
def newNsPropEatName(self, ns, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewNsPropEatName(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlNewNsPropEatName() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp |
def remove_accounts_from_project(accounts_query, project):
""" Remove accounts from project. """
query = accounts_query.filter(date_deleted__isnull=True)
for account in query:
remove_account_from_project(account, project) | Remove accounts from project. | Below is the the instruction that describes the task:
### Input:
Remove accounts from project.
### Response:
def remove_accounts_from_project(accounts_query, project):
""" Remove accounts from project. """
query = accounts_query.filter(date_deleted__isnull=True)
for account in query:
remove_account_from_project(account, project) |
def otsu(data, min_threshold=None, max_threshold=None,bins=256):
"""Compute a threshold using Otsu's method
data - an array of intensity values between zero and one
min_threshold - only consider thresholds above this minimum value
max_threshold - only consider thresholds below this maximum value
bins - we bin the data into this many equally-spaced bins, then pick
the bin index that optimizes the metric
"""
assert min_threshold is None or max_threshold is None or min_threshold < max_threshold
def constrain(threshold):
if not min_threshold is None and threshold < min_threshold:
threshold = min_threshold
if not max_threshold is None and threshold > max_threshold:
threshold = max_threshold
return threshold
data = np.atleast_1d(data)
data = data[~ np.isnan(data)]
if len(data) == 0:
return (min_threshold if not min_threshold is None
else max_threshold if not max_threshold is None
else 0)
elif len(data) == 1:
return constrain(data[0])
if bins > len(data):
bins = len(data)
data.sort()
var = running_variance(data)
rvar = np.flipud(running_variance(np.flipud(data)))
thresholds = data[1:len(data):len(data)//bins]
score_low = (var[0:len(data)-1:len(data)//bins] *
np.arange(0,len(data)-1,len(data)//bins))
score_high = (rvar[1:len(data):len(data)//bins] *
(len(data) - np.arange(1,len(data),len(data)//bins)))
scores = score_low + score_high
if len(scores) == 0:
return constrain(thresholds[0])
index = np.argwhere(scores == scores.min()).flatten()
if len(index)==0:
return constrain(thresholds[0])
#
# Take the average of the thresholds to either side of
# the chosen value to get an intermediate in cases where there is
# a steep step between the background and foreground
index = index[0]
if index == 0:
index_low = 0
else:
index_low = index-1
if index == len(thresholds)-1:
index_high = len(thresholds)-1
else:
index_high = index+1
return constrain((thresholds[index_low]+thresholds[index_high]) / 2) | Compute a threshold using Otsu's method
data - an array of intensity values between zero and one
min_threshold - only consider thresholds above this minimum value
max_threshold - only consider thresholds below this maximum value
bins - we bin the data into this many equally-spaced bins, then pick
the bin index that optimizes the metric | Below is the the instruction that describes the task:
### Input:
Compute a threshold using Otsu's method
data - an array of intensity values between zero and one
min_threshold - only consider thresholds above this minimum value
max_threshold - only consider thresholds below this maximum value
bins - we bin the data into this many equally-spaced bins, then pick
the bin index that optimizes the metric
### Response:
def otsu(data, min_threshold=None, max_threshold=None,bins=256):
"""Compute a threshold using Otsu's method
data - an array of intensity values between zero and one
min_threshold - only consider thresholds above this minimum value
max_threshold - only consider thresholds below this maximum value
bins - we bin the data into this many equally-spaced bins, then pick
the bin index that optimizes the metric
"""
assert min_threshold is None or max_threshold is None or min_threshold < max_threshold
def constrain(threshold):
if not min_threshold is None and threshold < min_threshold:
threshold = min_threshold
if not max_threshold is None and threshold > max_threshold:
threshold = max_threshold
return threshold
data = np.atleast_1d(data)
data = data[~ np.isnan(data)]
if len(data) == 0:
return (min_threshold if not min_threshold is None
else max_threshold if not max_threshold is None
else 0)
elif len(data) == 1:
return constrain(data[0])
if bins > len(data):
bins = len(data)
data.sort()
var = running_variance(data)
rvar = np.flipud(running_variance(np.flipud(data)))
thresholds = data[1:len(data):len(data)//bins]
score_low = (var[0:len(data)-1:len(data)//bins] *
np.arange(0,len(data)-1,len(data)//bins))
score_high = (rvar[1:len(data):len(data)//bins] *
(len(data) - np.arange(1,len(data),len(data)//bins)))
scores = score_low + score_high
if len(scores) == 0:
return constrain(thresholds[0])
index = np.argwhere(scores == scores.min()).flatten()
if len(index)==0:
return constrain(thresholds[0])
#
# Take the average of the thresholds to either side of
# the chosen value to get an intermediate in cases where there is
# a steep step between the background and foreground
index = index[0]
if index == 0:
index_low = 0
else:
index_low = index-1
if index == len(thresholds)-1:
index_high = len(thresholds)-1
else:
index_high = index+1
return constrain((thresholds[index_low]+thresholds[index_high]) / 2) |
def get_factory_context(cls):
# type: (type) -> FactoryContext
"""
Retrieves the factory context object associated to a factory. Creates it
if needed
:param cls: The factory class
:return: The factory class context
"""
context = getattr(cls, constants.IPOPO_FACTORY_CONTEXT, None)
if context is None:
# Class not yet manipulated
context = FactoryContext()
elif is_from_parent(cls, constants.IPOPO_FACTORY_CONTEXT):
# Create a copy the context
context = context.copy(True)
# * Manipulation has not been applied yet
context.completed = False
else:
# Nothing special to do
return context
# Context has been created or copied, inject the new bean
setattr(cls, constants.IPOPO_FACTORY_CONTEXT, context)
return context | Retrieves the factory context object associated to a factory. Creates it
if needed
:param cls: The factory class
:return: The factory class context | Below is the the instruction that describes the task:
### Input:
Retrieves the factory context object associated to a factory. Creates it
if needed
:param cls: The factory class
:return: The factory class context
### Response:
def get_factory_context(cls):
# type: (type) -> FactoryContext
"""
Retrieves the factory context object associated to a factory. Creates it
if needed
:param cls: The factory class
:return: The factory class context
"""
context = getattr(cls, constants.IPOPO_FACTORY_CONTEXT, None)
if context is None:
# Class not yet manipulated
context = FactoryContext()
elif is_from_parent(cls, constants.IPOPO_FACTORY_CONTEXT):
# Create a copy the context
context = context.copy(True)
# * Manipulation has not been applied yet
context.completed = False
else:
# Nothing special to do
return context
# Context has been created or copied, inject the new bean
setattr(cls, constants.IPOPO_FACTORY_CONTEXT, context)
return context |
def translate_connect_args(self, names=[], **kw):
"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ["host", "database", "username", "password", "port"]
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated | Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally. | Below is the the instruction that describes the task:
### Input:
Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
### Response:
def translate_connect_args(self, names=[], **kw):
"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ["host", "database", "username", "password", "port"]
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated |
def composed_deps(self):
"""Dependencies of this build target."""
if 'deps' in self.params:
param_deps = self.params['deps'] or []
deps = [self.makeaddress(dep) for dep in param_deps]
return deps
else:
return None | Dependencies of this build target. | Below is the the instruction that describes the task:
### Input:
Dependencies of this build target.
### Response:
def composed_deps(self):
"""Dependencies of this build target."""
if 'deps' in self.params:
param_deps = self.params['deps'] or []
deps = [self.makeaddress(dep) for dep in param_deps]
return deps
else:
return None |
def _cnn_filter(in_file, vrn_files, data):
"""Perform CNN filtering on input VCF using pre-trained models.
"""
#tensor_type = "reference" # 1D, reference sequence
tensor_type = "read_tensor" # 2D, reads, flags, mapping quality
score_file = _cnn_score_variants(in_file, tensor_type, data)
return _cnn_tranch_filtering(score_file, vrn_files, tensor_type, data) | Perform CNN filtering on input VCF using pre-trained models. | Below is the the instruction that describes the task:
### Input:
Perform CNN filtering on input VCF using pre-trained models.
### Response:
def _cnn_filter(in_file, vrn_files, data):
"""Perform CNN filtering on input VCF using pre-trained models.
"""
#tensor_type = "reference" # 1D, reference sequence
tensor_type = "read_tensor" # 2D, reads, flags, mapping quality
score_file = _cnn_score_variants(in_file, tensor_type, data)
return _cnn_tranch_filtering(score_file, vrn_files, tensor_type, data) |
def clear(path):
'''
Causes the all attributes on the file/directory to be removed
:param str path: The file(s) to get attributes from
:return: True if successful, otherwise False
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr"
'''
cmd = 'xattr -c "{0}"'.format(path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
return list_(path) == {} | Causes the all attributes on the file/directory to be removed
:param str path: The file(s) to get attributes from
:return: True if successful, otherwise False
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr" | Below is the the instruction that describes the task:
### Input:
Causes the all attributes on the file/directory to be removed
:param str path: The file(s) to get attributes from
:return: True if successful, otherwise False
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr"
### Response:
def clear(path):
'''
Causes the all attributes on the file/directory to be removed
:param str path: The file(s) to get attributes from
:return: True if successful, otherwise False
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr"
'''
cmd = 'xattr -c "{0}"'.format(path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
return list_(path) == {} |
def set_date(date):
'''
Set the current month, day, and year
:param str date: The date to set. Valid date formats are:
- %m:%d:%y
- %m:%d:%Y
- %m/%d/%y
- %m/%d/%Y
:return: True if successful, False if not
:rtype: bool
:raises: SaltInvocationError on Invalid Date format
:raises: CommandExecutionError on failure
CLI Example:
.. code-block:: bash
salt '*' timezone.set_date 1/13/2016
'''
date_format = _get_date_time_format(date)
dt_obj = datetime.strptime(date, date_format)
cmd = 'systemsetup -setdate {0}'.format(dt_obj.strftime('%m:%d:%Y'))
return salt.utils.mac_utils.execute_return_success(cmd) | Set the current month, day, and year
:param str date: The date to set. Valid date formats are:
- %m:%d:%y
- %m:%d:%Y
- %m/%d/%y
- %m/%d/%Y
:return: True if successful, False if not
:rtype: bool
:raises: SaltInvocationError on Invalid Date format
:raises: CommandExecutionError on failure
CLI Example:
.. code-block:: bash
salt '*' timezone.set_date 1/13/2016 | Below is the the instruction that describes the task:
### Input:
Set the current month, day, and year
:param str date: The date to set. Valid date formats are:
- %m:%d:%y
- %m:%d:%Y
- %m/%d/%y
- %m/%d/%Y
:return: True if successful, False if not
:rtype: bool
:raises: SaltInvocationError on Invalid Date format
:raises: CommandExecutionError on failure
CLI Example:
.. code-block:: bash
salt '*' timezone.set_date 1/13/2016
### Response:
def set_date(date):
'''
Set the current month, day, and year
:param str date: The date to set. Valid date formats are:
- %m:%d:%y
- %m:%d:%Y
- %m/%d/%y
- %m/%d/%Y
:return: True if successful, False if not
:rtype: bool
:raises: SaltInvocationError on Invalid Date format
:raises: CommandExecutionError on failure
CLI Example:
.. code-block:: bash
salt '*' timezone.set_date 1/13/2016
'''
date_format = _get_date_time_format(date)
dt_obj = datetime.strptime(date, date_format)
cmd = 'systemsetup -setdate {0}'.format(dt_obj.strftime('%m:%d:%Y'))
return salt.utils.mac_utils.execute_return_success(cmd) |
def auto_message(self, args):
"""Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
"""
if any(args) and callable(args[0]) and not self.message:
return args[0].__name__
elif not self.message:
return self.default_message
else:
return self.message | Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already | Below is the the instruction that describes the task:
### Input:
Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
### Response:
def auto_message(self, args):
"""Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
"""
if any(args) and callable(args[0]) and not self.message:
return args[0].__name__
elif not self.message:
return self.default_message
else:
return self.message |
def print_structure(self, tostring=False):
"""
## FOR DEBUGGING ONLY ##
Pretty-prints the structure of the tree.
If tostring is true, prints nothing and returns a string.
:rtype: None or str
"""
if self.top_node:
return self.top_node.print_structure(tostring=tostring)
else:
result = "<empty IntervalTree>"
if not tostring:
print(result)
else:
return result | ## FOR DEBUGGING ONLY ##
Pretty-prints the structure of the tree.
If tostring is true, prints nothing and returns a string.
:rtype: None or str | Below is the the instruction that describes the task:
### Input:
## FOR DEBUGGING ONLY ##
Pretty-prints the structure of the tree.
If tostring is true, prints nothing and returns a string.
:rtype: None or str
### Response:
def print_structure(self, tostring=False):
"""
## FOR DEBUGGING ONLY ##
Pretty-prints the structure of the tree.
If tostring is true, prints nothing and returns a string.
:rtype: None or str
"""
if self.top_node:
return self.top_node.print_structure(tostring=tostring)
else:
result = "<empty IntervalTree>"
if not tostring:
print(result)
else:
return result |
def from_spherical_coords(theta_phi, phi=None):
"""Return the quaternion corresponding to these spherical coordinates
Assumes the spherical coordinates correspond to the quaternion R via
R = exp(phi*z/2) * exp(theta*y/2)
The angles naturally must be in radians for this to make any sense.
Note that this quaternion rotates `z` onto the point with the given
spherical coordinates, but also rotates `x` and `y` onto the usual basis
vectors (theta and phi, respectively) at that point.
Parameters
----------
theta_phi: float or array of floats
This argument may either contain an array with last dimension of
size 2, where those two elements describe the (theta, phi) values in
radians for each point; or it may contain just the theta values in
radians, in which case the next argument must also be given.
phi: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first argument.
Returns
-------
R: quaternion array
If the second argument is not given to this function, the shape
will be the same as the input shape except for the last dimension,
which will be removed. If the second argument is given, this
output array will have the shape resulting from broadcasting the
two input arrays against each other.
"""
# Figure out the input angles from either type of input
if phi is None:
theta_phi = np.asarray(theta_phi, dtype=np.double)
theta = theta_phi[..., 0]
phi = theta_phi[..., 1]
else:
theta = np.asarray(theta_phi, dtype=np.double)
phi = np.asarray(phi, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(theta, phi).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(phi/2)*np.cos(theta/2) # scalar quaternion components
R[..., 1] = -np.sin(phi/2)*np.sin(theta/2) # x quaternion components
R[..., 2] = np.cos(phi/2)*np.sin(theta/2) # y quaternion components
R[..., 3] = np.sin(phi/2)*np.cos(theta/2) # z quaternion components
return as_quat_array(R) | Return the quaternion corresponding to these spherical coordinates
Assumes the spherical coordinates correspond to the quaternion R via
R = exp(phi*z/2) * exp(theta*y/2)
The angles naturally must be in radians for this to make any sense.
Note that this quaternion rotates `z` onto the point with the given
spherical coordinates, but also rotates `x` and `y` onto the usual basis
vectors (theta and phi, respectively) at that point.
Parameters
----------
theta_phi: float or array of floats
This argument may either contain an array with last dimension of
size 2, where those two elements describe the (theta, phi) values in
radians for each point; or it may contain just the theta values in
radians, in which case the next argument must also be given.
phi: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first argument.
Returns
-------
R: quaternion array
If the second argument is not given to this function, the shape
will be the same as the input shape except for the last dimension,
which will be removed. If the second argument is given, this
output array will have the shape resulting from broadcasting the
two input arrays against each other. | Below is the the instruction that describes the task:
### Input:
Return the quaternion corresponding to these spherical coordinates
Assumes the spherical coordinates correspond to the quaternion R via
R = exp(phi*z/2) * exp(theta*y/2)
The angles naturally must be in radians for this to make any sense.
Note that this quaternion rotates `z` onto the point with the given
spherical coordinates, but also rotates `x` and `y` onto the usual basis
vectors (theta and phi, respectively) at that point.
Parameters
----------
theta_phi: float or array of floats
This argument may either contain an array with last dimension of
size 2, where those two elements describe the (theta, phi) values in
radians for each point; or it may contain just the theta values in
radians, in which case the next argument must also be given.
phi: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first argument.
Returns
-------
R: quaternion array
If the second argument is not given to this function, the shape
will be the same as the input shape except for the last dimension,
which will be removed. If the second argument is given, this
output array will have the shape resulting from broadcasting the
two input arrays against each other.
### Response:
def from_spherical_coords(theta_phi, phi=None):
"""Return the quaternion corresponding to these spherical coordinates
Assumes the spherical coordinates correspond to the quaternion R via
R = exp(phi*z/2) * exp(theta*y/2)
The angles naturally must be in radians for this to make any sense.
Note that this quaternion rotates `z` onto the point with the given
spherical coordinates, but also rotates `x` and `y` onto the usual basis
vectors (theta and phi, respectively) at that point.
Parameters
----------
theta_phi: float or array of floats
This argument may either contain an array with last dimension of
size 2, where those two elements describe the (theta, phi) values in
radians for each point; or it may contain just the theta values in
radians, in which case the next argument must also be given.
phi: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first argument.
Returns
-------
R: quaternion array
If the second argument is not given to this function, the shape
will be the same as the input shape except for the last dimension,
which will be removed. If the second argument is given, this
output array will have the shape resulting from broadcasting the
two input arrays against each other.
"""
# Figure out the input angles from either type of input
if phi is None:
theta_phi = np.asarray(theta_phi, dtype=np.double)
theta = theta_phi[..., 0]
phi = theta_phi[..., 1]
else:
theta = np.asarray(theta_phi, dtype=np.double)
phi = np.asarray(phi, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(theta, phi).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(phi/2)*np.cos(theta/2) # scalar quaternion components
R[..., 1] = -np.sin(phi/2)*np.sin(theta/2) # x quaternion components
R[..., 2] = np.cos(phi/2)*np.sin(theta/2) # y quaternion components
R[..., 3] = np.sin(phi/2)*np.cos(theta/2) # z quaternion components
return as_quat_array(R) |
def _set_trunk_private_vlan_classification(self, v, load=False):
"""
Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/port_channel/switchport/trunk_private_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_private_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_private_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_private_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__trunk_private_vlan_classification = t
if hasattr(self, '_set'):
self._set() | Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/port_channel/switchport/trunk_private_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_private_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_private_vlan_classification() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/port_channel/switchport/trunk_private_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_private_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_private_vlan_classification() directly.
### Response:
def _set_trunk_private_vlan_classification(self, v, load=False):
"""
Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/port_channel/switchport/trunk_private_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_private_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_private_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_private_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__trunk_private_vlan_classification = t
if hasattr(self, '_set'):
self._set() |
def Copy(self, name=None):
"""Returns a copy of this Cdf.
Args:
name: string name for the new Cdf
"""
if name is None:
name = self.name
return Cdf(list(self.xs), list(self.ps), name) | Returns a copy of this Cdf.
Args:
name: string name for the new Cdf | Below is the the instruction that describes the task:
### Input:
Returns a copy of this Cdf.
Args:
name: string name for the new Cdf
### Response:
def Copy(self, name=None):
"""Returns a copy of this Cdf.
Args:
name: string name for the new Cdf
"""
if name is None:
name = self.name
return Cdf(list(self.xs), list(self.ps), name) |
def get_log_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the log lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogLookupSession) - a ``LogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_lookup()`` is ``true``.*
"""
if not self.supports_log_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.LogLookupSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the log lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogLookupSession) - a ``LogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_lookup()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the log lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogLookupSession) - a ``LogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_lookup()`` is ``true``.*
### Response:
def get_log_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the log lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogLookupSession) - a ``LogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_lookup()`` is ``true``.*
"""
if not self.supports_log_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.LogLookupSession(proxy=proxy, runtime=self._runtime) |
def accept(kind, doc=None, error_text=None, exception_handlers=empty.dict, accept_context=False):
"""Allows quick wrapping of any Python type cast function for use as a hug type annotation"""
return create(
doc,
error_text,
exception_handlers=exception_handlers,
chain=False,
accept_context=accept_context
)(kind) | Allows quick wrapping of any Python type cast function for use as a hug type annotation | Below is the the instruction that describes the task:
### Input:
Allows quick wrapping of any Python type cast function for use as a hug type annotation
### Response:
def accept(kind, doc=None, error_text=None, exception_handlers=empty.dict, accept_context=False):
"""Allows quick wrapping of any Python type cast function for use as a hug type annotation"""
return create(
doc,
error_text,
exception_handlers=exception_handlers,
chain=False,
accept_context=accept_context
)(kind) |
def inputhook_wx2():
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
try:
app = wx.GetApp() # @UndefinedVariable
if app is not None:
assert wx.Thread_IsMain() # @UndefinedVariable
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10) # CHANGE time here to control polling interval
except KeyboardInterrupt:
pass
return 0 | Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often. | Below is the the instruction that describes the task:
### Input:
Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
### Response:
def inputhook_wx2():
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
try:
app = wx.GetApp() # @UndefinedVariable
if app is not None:
assert wx.Thread_IsMain() # @UndefinedVariable
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10) # CHANGE time here to control polling interval
except KeyboardInterrupt:
pass
return 0 |
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods) | Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874 | Below is the the instruction that describes the task:
### Input:
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
### Response:
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods) |
def isSuperTagSetOf(self, tagSet):
"""Test type relationship against given *TagSet*
The callee is considered to be a supertype of given *TagSet*
tag-wise if all tags in *TagSet* are present in the callee and
they are in the same order.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
*TagSet* object to evaluate against the callee
Returns
-------
: :py:class:`bool`
`True` if callee is a supertype of *tagSet*
"""
if len(tagSet) < self.__lenOfSuperTags:
return False
return self.__superTags == tagSet[:self.__lenOfSuperTags] | Test type relationship against given *TagSet*
The callee is considered to be a supertype of given *TagSet*
tag-wise if all tags in *TagSet* are present in the callee and
they are in the same order.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
*TagSet* object to evaluate against the callee
Returns
-------
: :py:class:`bool`
`True` if callee is a supertype of *tagSet* | Below is the the instruction that describes the task:
### Input:
Test type relationship against given *TagSet*
The callee is considered to be a supertype of given *TagSet*
tag-wise if all tags in *TagSet* are present in the callee and
they are in the same order.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
*TagSet* object to evaluate against the callee
Returns
-------
: :py:class:`bool`
`True` if callee is a supertype of *tagSet*
### Response:
def isSuperTagSetOf(self, tagSet):
"""Test type relationship against given *TagSet*
The callee is considered to be a supertype of given *TagSet*
tag-wise if all tags in *TagSet* are present in the callee and
they are in the same order.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
*TagSet* object to evaluate against the callee
Returns
-------
: :py:class:`bool`
`True` if callee is a supertype of *tagSet*
"""
if len(tagSet) < self.__lenOfSuperTags:
return False
return self.__superTags == tagSet[:self.__lenOfSuperTags] |
def setup_types(self):
"""
The Message object has a circular reference on itself, thus we have to allow
Type referencing by name. Here we lookup any Types referenced by name and
replace with the real class.
"""
def load(t):
from TelegramBotAPI.types.type import Type
if isinstance(t, str):
return Type._type(t)
assert issubclass(t, Type)
return t
self.types = [load(t) for t in self.types] | The Message object has a circular reference on itself, thus we have to allow
Type referencing by name. Here we lookup any Types referenced by name and
replace with the real class. | Below is the the instruction that describes the task:
### Input:
The Message object has a circular reference on itself, thus we have to allow
Type referencing by name. Here we lookup any Types referenced by name and
replace with the real class.
### Response:
def setup_types(self):
"""
The Message object has a circular reference on itself, thus we have to allow
Type referencing by name. Here we lookup any Types referenced by name and
replace with the real class.
"""
def load(t):
from TelegramBotAPI.types.type import Type
if isinstance(t, str):
return Type._type(t)
assert issubclass(t, Type)
return t
self.types = [load(t) for t in self.types] |
def _set_reverse_metric_info(self, v, load=False):
"""
Setter method for reverse_metric_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/reverse_metric_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_info() directly.
YANG Description: ISIS interface reverse-metric configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=reverse_metric_info.reverse_metric_info, is_container='container', presence=False, yang_name="reverse-metric-info", rest_name="reverse-metric-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-reverse-metric-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reverse_metric_info must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=reverse_metric_info.reverse_metric_info, is_container='container', presence=False, yang_name="reverse-metric-info", rest_name="reverse-metric-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-reverse-metric-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__reverse_metric_info = t
if hasattr(self, '_set'):
self._set() | Setter method for reverse_metric_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/reverse_metric_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_info() directly.
YANG Description: ISIS interface reverse-metric configuration | Below is the the instruction that describes the task:
### Input:
Setter method for reverse_metric_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/reverse_metric_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_info() directly.
YANG Description: ISIS interface reverse-metric configuration
### Response:
def _set_reverse_metric_info(self, v, load=False):
"""
Setter method for reverse_metric_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/reverse_metric_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_info() directly.
YANG Description: ISIS interface reverse-metric configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=reverse_metric_info.reverse_metric_info, is_container='container', presence=False, yang_name="reverse-metric-info", rest_name="reverse-metric-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-reverse-metric-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reverse_metric_info must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=reverse_metric_info.reverse_metric_info, is_container='container', presence=False, yang_name="reverse-metric-info", rest_name="reverse-metric-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-reverse-metric-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__reverse_metric_info = t
if hasattr(self, '_set'):
self._set() |
def get_prop(self, prop):
""" Calls the getter with no arguments and returns its value """
if self._parser is None:
raise ConfigurationError('Cannot call ParserProperty."get_prop" with no parser configured')
return self._parser(prop) if prop else self._parser() | Calls the getter with no arguments and returns its value | Below is the the instruction that describes the task:
### Input:
Calls the getter with no arguments and returns its value
### Response:
def get_prop(self, prop):
""" Calls the getter with no arguments and returns its value """
if self._parser is None:
raise ConfigurationError('Cannot call ParserProperty."get_prop" with no parser configured')
return self._parser(prop) if prop else self._parser() |
def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False | Check if the file is only a list of genes, not a BED | Below is the the instruction that describes the task:
### Input:
Check if the file is only a list of genes, not a BED
### Response:
def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False |
def authenticate(self, session: Session, listener):
"""
This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return:
"""
auth_plugins = None
auth_config = self.config.get('auth', None)
if auth_config:
auth_plugins = auth_config.get('plugins', None)
returns = yield from self.plugins_manager.map_plugin_coro(
"authenticate",
session=session,
filter_plugins=auth_plugins)
auth_result = True
if returns:
for plugin in returns:
res = returns[plugin]
if res is False:
auth_result = False
self.logger.debug("Authentication failed due to '%s' plugin result: %s" % (plugin.name, res))
else:
self.logger.debug("'%s' plugin result: %s" % (plugin.name, res))
# If all plugins returned True, authentication is success
return auth_result | This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return: | Below is the the instruction that describes the task:
### Input:
This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return:
### Response:
def authenticate(self, session: Session, listener):
"""
This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return:
"""
auth_plugins = None
auth_config = self.config.get('auth', None)
if auth_config:
auth_plugins = auth_config.get('plugins', None)
returns = yield from self.plugins_manager.map_plugin_coro(
"authenticate",
session=session,
filter_plugins=auth_plugins)
auth_result = True
if returns:
for plugin in returns:
res = returns[plugin]
if res is False:
auth_result = False
self.logger.debug("Authentication failed due to '%s' plugin result: %s" % (plugin.name, res))
else:
self.logger.debug("'%s' plugin result: %s" % (plugin.name, res))
# If all plugins returned True, authentication is success
return auth_result |
def generate_random_string(cls, length):
"""
Generatesa a [length] characters alpha numeric secret
"""
# avoid things that could be mistaken ex: 'I' and '1'
letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
return "".join([random.choice(letters) for _ in range(length)]) | Generatesa a [length] characters alpha numeric secret | Below is the the instruction that describes the task:
### Input:
Generatesa a [length] characters alpha numeric secret
### Response:
def generate_random_string(cls, length):
"""
Generatesa a [length] characters alpha numeric secret
"""
# avoid things that could be mistaken ex: 'I' and '1'
letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
return "".join([random.choice(letters) for _ in range(length)]) |
def _build_default_options(self):
""""Provide the default value for all allowed fields.
Custom FactoryOptions classes should override this method
to update() its return value.
"""
return [
OptionDefault('model', None, inherit=True),
OptionDefault('abstract', False, inherit=False),
OptionDefault('strategy', enums.CREATE_STRATEGY, inherit=True),
OptionDefault('inline_args', (), inherit=True),
OptionDefault('exclude', (), inherit=True),
OptionDefault('rename', {}, inherit=True),
] | Provide the default value for all allowed fields.
Custom FactoryOptions classes should override this method
to update() its return value. | Below is the the instruction that describes the task:
### Input:
Provide the default value for all allowed fields.
Custom FactoryOptions classes should override this method
to update() its return value.
### Response:
def _build_default_options(self):
""""Provide the default value for all allowed fields.
Custom FactoryOptions classes should override this method
to update() its return value.
"""
return [
OptionDefault('model', None, inherit=True),
OptionDefault('abstract', False, inherit=False),
OptionDefault('strategy', enums.CREATE_STRATEGY, inherit=True),
OptionDefault('inline_args', (), inherit=True),
OptionDefault('exclude', (), inherit=True),
OptionDefault('rename', {}, inherit=True),
] |
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0 | Returns True if we have a client with a certain integer identifier | Below is the the instruction that describes the task:
### Input:
Returns True if we have a client with a certain integer identifier
### Response:
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0 |
def detect(self):
"""Detect and return the IP address."""
if PY3: # py23
import subprocess # noqa: S404 @UnresolvedImport pylint: disable=import-error
else:
import commands as subprocess # @UnresolvedImport pylint: disable=import-error
try:
theip = subprocess.getoutput(self.opts_command) # noqa: S605
except Exception:
theip = None
self.set_current_value(theip)
return theip | Detect and return the IP address. | Below is the the instruction that describes the task:
### Input:
Detect and return the IP address.
### Response:
def detect(self):
"""Detect and return the IP address."""
if PY3: # py23
import subprocess # noqa: S404 @UnresolvedImport pylint: disable=import-error
else:
import commands as subprocess # @UnresolvedImport pylint: disable=import-error
try:
theip = subprocess.getoutput(self.opts_command) # noqa: S605
except Exception:
theip = None
self.set_current_value(theip)
return theip |
def set_user_method(self, user_methods, forced=False):
r'''Method to set the T, P, and composition dependent property methods
desired for consideration by the user. Can be used to exclude certain
methods which might have unacceptable accuracy.
As a side effect, the previously selected method is removed when
this method is called to ensure user methods are tried in the desired
order.
Parameters
----------
user_methods : str or list
Methods by name to be considered for calculation of the mixture
property, ordered by preference.
forced : bool, optional
If True, only the user specified methods will ever be considered;
if False, other methods will be considered if no user methods
suceed.
'''
# Accept either a string or a list of methods, and whether
# or not to only consider the false methods
if isinstance(user_methods, str):
user_methods = [user_methods]
# The user's order matters and is retained for use by select_valid_methods
self.user_methods = user_methods
self.forced = forced
# Validate that the user's specified methods are actual methods
if set(self.user_methods).difference(self.all_methods):
raise Exception("One of the given methods is not available for this mixture")
if not self.user_methods and self.forced:
raise Exception('Only user specified methods are considered when forced is True, but no methods were provided')
# Remove previously selected methods
self.method = None
self.sorted_valid_methods = []
self.TP_zs_ws_cached = (None, None, None, None) | r'''Method to set the T, P, and composition dependent property methods
desired for consideration by the user. Can be used to exclude certain
methods which might have unacceptable accuracy.
As a side effect, the previously selected method is removed when
this method is called to ensure user methods are tried in the desired
order.
Parameters
----------
user_methods : str or list
Methods by name to be considered for calculation of the mixture
property, ordered by preference.
forced : bool, optional
If True, only the user specified methods will ever be considered;
if False, other methods will be considered if no user methods
suceed. | Below is the the instruction that describes the task:
### Input:
r'''Method to set the T, P, and composition dependent property methods
desired for consideration by the user. Can be used to exclude certain
methods which might have unacceptable accuracy.
As a side effect, the previously selected method is removed when
this method is called to ensure user methods are tried in the desired
order.
Parameters
----------
user_methods : str or list
Methods by name to be considered for calculation of the mixture
property, ordered by preference.
forced : bool, optional
If True, only the user specified methods will ever be considered;
if False, other methods will be considered if no user methods
suceed.
### Response:
def set_user_method(self, user_methods, forced=False):
r'''Method to set the T, P, and composition dependent property methods
desired for consideration by the user. Can be used to exclude certain
methods which might have unacceptable accuracy.
As a side effect, the previously selected method is removed when
this method is called to ensure user methods are tried in the desired
order.
Parameters
----------
user_methods : str or list
Methods by name to be considered for calculation of the mixture
property, ordered by preference.
forced : bool, optional
If True, only the user specified methods will ever be considered;
if False, other methods will be considered if no user methods
suceed.
'''
# Accept either a string or a list of methods, and whether
# or not to only consider the false methods
if isinstance(user_methods, str):
user_methods = [user_methods]
# The user's order matters and is retained for use by select_valid_methods
self.user_methods = user_methods
self.forced = forced
# Validate that the user's specified methods are actual methods
if set(self.user_methods).difference(self.all_methods):
raise Exception("One of the given methods is not available for this mixture")
if not self.user_methods and self.forced:
raise Exception('Only user specified methods are considered when forced is True, but no methods were provided')
# Remove previously selected methods
self.method = None
self.sorted_valid_methods = []
self.TP_zs_ws_cached = (None, None, None, None) |
def get(package_name, pypi_server="https://pypi.python.org/pypi/"):
"""
Constructs a request to the PyPI server and returns a
:class:`yarg.package.Package`.
:param package_name: case sensitive name of the package on the PyPI server.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> package = yarg.get('yarg')
<Package yarg>
"""
if not pypi_server.endswith("/"):
pypi_server = pypi_server + "/"
response = requests.get("{0}{1}/json".format(pypi_server,
package_name))
if response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
if hasattr(response.content, 'decode'):
return json2package(response.content.decode())
else:
return json2package(response.content) | Constructs a request to the PyPI server and returns a
:class:`yarg.package.Package`.
:param package_name: case sensitive name of the package on the PyPI server.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> package = yarg.get('yarg')
<Package yarg> | Below is the the instruction that describes the task:
### Input:
Constructs a request to the PyPI server and returns a
:class:`yarg.package.Package`.
:param package_name: case sensitive name of the package on the PyPI server.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> package = yarg.get('yarg')
<Package yarg>
### Response:
def get(package_name, pypi_server="https://pypi.python.org/pypi/"):
"""
Constructs a request to the PyPI server and returns a
:class:`yarg.package.Package`.
:param package_name: case sensitive name of the package on the PyPI server.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> package = yarg.get('yarg')
<Package yarg>
"""
if not pypi_server.endswith("/"):
pypi_server = pypi_server + "/"
response = requests.get("{0}{1}/json".format(pypi_server,
package_name))
if response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
if hasattr(response.content, 'decode'):
return json2package(response.content.decode())
else:
return json2package(response.content) |
def _output_ret(self, ret, out, retcode=0):
'''
Print the output from a single return to the terminal
'''
import salt.output
# Handle special case commands
if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception):
self._print_docs(ret)
else:
# Determine the proper output method and run it
salt.output.display_output(ret,
out=out,
opts=self.config,
_retcode=retcode)
if not ret:
sys.stderr.write('ERROR: No return received\n')
sys.exit(2) | Print the output from a single return to the terminal | Below is the the instruction that describes the task:
### Input:
Print the output from a single return to the terminal
### Response:
def _output_ret(self, ret, out, retcode=0):
'''
Print the output from a single return to the terminal
'''
import salt.output
# Handle special case commands
if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception):
self._print_docs(ret)
else:
# Determine the proper output method and run it
salt.output.display_output(ret,
out=out,
opts=self.config,
_retcode=retcode)
if not ret:
sys.stderr.write('ERROR: No return received\n')
sys.exit(2) |
def secret(self, s):
"""
Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
"""
s = parseable_str(s)
for f in [self.private_key, self.hierarchical_key]:
v = f(s)
if v:
return v | Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None. | Below is the the instruction that describes the task:
### Input:
Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
### Response:
def secret(self, s):
"""
Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
"""
s = parseable_str(s)
for f in [self.private_key, self.hierarchical_key]:
v = f(s)
if v:
return v |
def timedelta_to_seconds(value, with_microseconds=False):
"""
Convert datetime.timedelta to seconds
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: seconds/seconds with microseconds or None if val is None
:rtype: int/float/None
:raise: TypeError when val is not timedelta
"""
if value is None:
return None
if not isinstance(value, timedelta):
raise TypeError('value must be a datetime.timedelta object')
microseconds = value.microseconds / MICROSECONDS_IN_SECOND \
if with_microseconds else 0
return value.days * SECONDS_IN_DAY + value.seconds + microseconds | Convert datetime.timedelta to seconds
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: seconds/seconds with microseconds or None if val is None
:rtype: int/float/None
:raise: TypeError when val is not timedelta | Below is the the instruction that describes the task:
### Input:
Convert datetime.timedelta to seconds
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: seconds/seconds with microseconds or None if val is None
:rtype: int/float/None
:raise: TypeError when val is not timedelta
### Response:
def timedelta_to_seconds(value, with_microseconds=False):
"""
Convert datetime.timedelta to seconds
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: seconds/seconds with microseconds or None if val is None
:rtype: int/float/None
:raise: TypeError when val is not timedelta
"""
if value is None:
return None
if not isinstance(value, timedelta):
raise TypeError('value must be a datetime.timedelta object')
microseconds = value.microseconds / MICROSECONDS_IN_SECOND \
if with_microseconds else 0
return value.days * SECONDS_IN_DAY + value.seconds + microseconds |
def update(self, dictionary=None, **kwargs):
"""
Adds/overwrites all the keys and values from the dictionary.
"""
if not dictionary == None: kwargs.update(dictionary)
for k in list(kwargs.keys()): self[k] = kwargs[k] | Adds/overwrites all the keys and values from the dictionary. | Below is the the instruction that describes the task:
### Input:
Adds/overwrites all the keys and values from the dictionary.
### Response:
def update(self, dictionary=None, **kwargs):
"""
Adds/overwrites all the keys and values from the dictionary.
"""
if not dictionary == None: kwargs.update(dictionary)
for k in list(kwargs.keys()): self[k] = kwargs[k] |
def custom_conf(self, conf):
'''custom apikey and http parameters'''
if conf:
for (key, val) in conf.items():
self.__conf[key] = val
return self | custom apikey and http parameters | Below is the the instruction that describes the task:
### Input:
custom apikey and http parameters
### Response:
def custom_conf(self, conf):
'''custom apikey and http parameters'''
if conf:
for (key, val) in conf.items():
self.__conf[key] = val
return self |
def post_transaction(self, transaction, mode):
"""Submit a valid transaction to the mempool."""
if not mode or mode not in self.mode_list:
raise ValidationError('Mode must be one of the following {}.'
.format(', '.join(self.mode_list)))
tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict()
payload = {
'method': mode,
'jsonrpc': '2.0',
'params': [encode_transaction(tx_dict)],
'id': str(uuid4())
}
# TODO: handle connection errors!
return requests.post(self.endpoint, json=payload) | Submit a valid transaction to the mempool. | Below is the the instruction that describes the task:
### Input:
Submit a valid transaction to the mempool.
### Response:
def post_transaction(self, transaction, mode):
"""Submit a valid transaction to the mempool."""
if not mode or mode not in self.mode_list:
raise ValidationError('Mode must be one of the following {}.'
.format(', '.join(self.mode_list)))
tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict()
payload = {
'method': mode,
'jsonrpc': '2.0',
'params': [encode_transaction(tx_dict)],
'id': str(uuid4())
}
# TODO: handle connection errors!
return requests.post(self.endpoint, json=payload) |
def remove_bad_particles(st, min_rad='calc', max_rad='calc', min_edge_dist=2.0,
check_rad_cutoff=[3.5, 15], check_outside_im=True,
tries=50, im_change_frac=0.2, **kwargs):
"""
Removes improperly-featured particles from the state, based on a
combination of particle size and the change in error on removal.
Parameters
-----------
st : :class:`peri.states.State`
The state to remove bad particles from.
min_rad : Float, optional
All particles with radius below min_rad are automatically deleted.
Set to 'calc' to make it the median rad - 25* radius std.
Default is 'calc'.
max_rad : Float, optional
All particles with radius above max_rad are automatically deleted.
Set to 'calc' to make it the median rad + 15* radius std.
Default is 'calc'.
min_edge_dist : Float, optional
All particles within min_edge_dist of the (padded) image
edges are automatically deleted. Default is 2.0
check_rad_cutoff : 2-element list of floats, optional
Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]
are checked if they should be deleted. Set to 'calc' to make it the
median rad +- 3.5 * radius std. Default is [3.5, 15].
check_outside_im : Bool, optional
If True, checks if particles located outside the unpadded image
should be deleted. Default is True.
tries : Int, optional
The maximum number of particles with radii < check_rad_cutoff
to try to remove. Checks in increasing order of radius size.
Default is 50.
im_change_frac : Float, , optional
Number between 0 and 1. If removing a particle decreases the
error by less than im_change_frac*the change in the image, then
the particle is deleted. Default is 0.2
Returns
-------
removed: Int
The cumulative number of particles removed.
"""
is_near_im_edge = lambda pos, pad: (((pos + st.pad) < pad) | (pos >
np.array(st.ishape.shape) + st.pad - pad)).any(axis=1)
# returns True if the position is within 'pad' of the _outer_ image edge
removed = 0
attempts = 0
n_tot_part = st.obj_get_positions().shape[0]
q10 = int(0.1 * n_tot_part) # 10% quartile
r_sig = np.sort(st.obj_get_radii())[q10:-q10].std()
r_med = np.median(st.obj_get_radii())
if max_rad == 'calc':
max_rad = r_med + 15*r_sig
if min_rad == 'calc':
min_rad = r_med - 25*r_sig
if check_rad_cutoff == 'calc':
check_rad_cutoff = [r_med - 7.5*r_sig, r_med + 7.5*r_sig]
# 1. Automatic deletion:
rad_wrong_size = np.nonzero(
(st.obj_get_radii() < min_rad) | (st.obj_get_radii() > max_rad))[0]
near_im_edge = np.nonzero(is_near_im_edge(st.obj_get_positions(),
min_edge_dist - st.pad))[0]
delete_inds = np.unique(np.append(rad_wrong_size, near_im_edge)).tolist()
delete_poses = st.obj_get_positions()[delete_inds].tolist()
message = ('-'*27 + 'SUBTRACTING' + '-'*28 +
'\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1')
with log.noformat():
CLOG.info(message)
for pos in delete_poses:
ind = st.obj_closest_particle(pos)
old_err = st.error
p, r = st.obj_remove_particle(ind)
p = p[0]
r = r[0]
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
tuple(p) + (r,) + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
removed += 1
# 2. Conditional deletion:
check_rad_inds = np.nonzero((st.obj_get_radii() < check_rad_cutoff[0]) |
(st.obj_get_radii() > check_rad_cutoff[1]))[0]
if check_outside_im:
check_edge_inds = np.nonzero(
is_near_im_edge(st.obj_get_positions(), st.pad))[0]
check_inds = np.unique(np.append(check_rad_inds, check_edge_inds))
else:
check_inds = check_rad_inds
check_inds = check_inds[np.argsort(st.obj_get_radii()[check_inds])]
tries = np.min([tries, check_inds.size])
check_poses = st.obj_get_positions()[check_inds[:tries]].copy()
for pos in check_poses:
old_err = st.error
ind = st.obj_closest_particle(pos)
killed, p, r = check_remove_particle(
st, ind, im_change_frac=im_change_frac)
if killed:
removed += 1
check_inds[check_inds > ind] -= 1 # cleaning up indices....
delete_poses.append(pos)
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
p + r + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
return removed, delete_poses | Removes improperly-featured particles from the state, based on a
combination of particle size and the change in error on removal.
Parameters
-----------
st : :class:`peri.states.State`
The state to remove bad particles from.
min_rad : Float, optional
All particles with radius below min_rad are automatically deleted.
Set to 'calc' to make it the median rad - 25* radius std.
Default is 'calc'.
max_rad : Float, optional
All particles with radius above max_rad are automatically deleted.
Set to 'calc' to make it the median rad + 15* radius std.
Default is 'calc'.
min_edge_dist : Float, optional
All particles within min_edge_dist of the (padded) image
edges are automatically deleted. Default is 2.0
check_rad_cutoff : 2-element list of floats, optional
Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]
are checked if they should be deleted. Set to 'calc' to make it the
median rad +- 3.5 * radius std. Default is [3.5, 15].
check_outside_im : Bool, optional
If True, checks if particles located outside the unpadded image
should be deleted. Default is True.
tries : Int, optional
The maximum number of particles with radii < check_rad_cutoff
to try to remove. Checks in increasing order of radius size.
Default is 50.
im_change_frac : Float, , optional
Number between 0 and 1. If removing a particle decreases the
error by less than im_change_frac*the change in the image, then
the particle is deleted. Default is 0.2
Returns
-------
removed: Int
The cumulative number of particles removed. | Below is the the instruction that describes the task:
### Input:
Removes improperly-featured particles from the state, based on a
combination of particle size and the change in error on removal.
Parameters
-----------
st : :class:`peri.states.State`
The state to remove bad particles from.
min_rad : Float, optional
All particles with radius below min_rad are automatically deleted.
Set to 'calc' to make it the median rad - 25* radius std.
Default is 'calc'.
max_rad : Float, optional
All particles with radius above max_rad are automatically deleted.
Set to 'calc' to make it the median rad + 15* radius std.
Default is 'calc'.
min_edge_dist : Float, optional
All particles within min_edge_dist of the (padded) image
edges are automatically deleted. Default is 2.0
check_rad_cutoff : 2-element list of floats, optional
Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]
are checked if they should be deleted. Set to 'calc' to make it the
median rad +- 3.5 * radius std. Default is [3.5, 15].
check_outside_im : Bool, optional
If True, checks if particles located outside the unpadded image
should be deleted. Default is True.
tries : Int, optional
The maximum number of particles with radii < check_rad_cutoff
to try to remove. Checks in increasing order of radius size.
Default is 50.
im_change_frac : Float, , optional
Number between 0 and 1. If removing a particle decreases the
error by less than im_change_frac*the change in the image, then
the particle is deleted. Default is 0.2
Returns
-------
removed: Int
The cumulative number of particles removed.
### Response:
def remove_bad_particles(st, min_rad='calc', max_rad='calc', min_edge_dist=2.0,
check_rad_cutoff=[3.5, 15], check_outside_im=True,
tries=50, im_change_frac=0.2, **kwargs):
"""
Removes improperly-featured particles from the state, based on a
combination of particle size and the change in error on removal.
Parameters
-----------
st : :class:`peri.states.State`
The state to remove bad particles from.
min_rad : Float, optional
All particles with radius below min_rad are automatically deleted.
Set to 'calc' to make it the median rad - 25* radius std.
Default is 'calc'.
max_rad : Float, optional
All particles with radius above max_rad are automatically deleted.
Set to 'calc' to make it the median rad + 15* radius std.
Default is 'calc'.
min_edge_dist : Float, optional
All particles within min_edge_dist of the (padded) image
edges are automatically deleted. Default is 2.0
check_rad_cutoff : 2-element list of floats, optional
Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]
are checked if they should be deleted. Set to 'calc' to make it the
median rad +- 3.5 * radius std. Default is [3.5, 15].
check_outside_im : Bool, optional
If True, checks if particles located outside the unpadded image
should be deleted. Default is True.
tries : Int, optional
The maximum number of particles with radii < check_rad_cutoff
to try to remove. Checks in increasing order of radius size.
Default is 50.
im_change_frac : Float, , optional
Number between 0 and 1. If removing a particle decreases the
error by less than im_change_frac*the change in the image, then
the particle is deleted. Default is 0.2
Returns
-------
removed: Int
The cumulative number of particles removed.
"""
is_near_im_edge = lambda pos, pad: (((pos + st.pad) < pad) | (pos >
np.array(st.ishape.shape) + st.pad - pad)).any(axis=1)
# returns True if the position is within 'pad' of the _outer_ image edge
removed = 0
attempts = 0
n_tot_part = st.obj_get_positions().shape[0]
q10 = int(0.1 * n_tot_part) # 10% quartile
r_sig = np.sort(st.obj_get_radii())[q10:-q10].std()
r_med = np.median(st.obj_get_radii())
if max_rad == 'calc':
max_rad = r_med + 15*r_sig
if min_rad == 'calc':
min_rad = r_med - 25*r_sig
if check_rad_cutoff == 'calc':
check_rad_cutoff = [r_med - 7.5*r_sig, r_med + 7.5*r_sig]
# 1. Automatic deletion:
rad_wrong_size = np.nonzero(
(st.obj_get_radii() < min_rad) | (st.obj_get_radii() > max_rad))[0]
near_im_edge = np.nonzero(is_near_im_edge(st.obj_get_positions(),
min_edge_dist - st.pad))[0]
delete_inds = np.unique(np.append(rad_wrong_size, near_im_edge)).tolist()
delete_poses = st.obj_get_positions()[delete_inds].tolist()
message = ('-'*27 + 'SUBTRACTING' + '-'*28 +
'\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1')
with log.noformat():
CLOG.info(message)
for pos in delete_poses:
ind = st.obj_closest_particle(pos)
old_err = st.error
p, r = st.obj_remove_particle(ind)
p = p[0]
r = r[0]
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
tuple(p) + (r,) + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
removed += 1
# 2. Conditional deletion:
check_rad_inds = np.nonzero((st.obj_get_radii() < check_rad_cutoff[0]) |
(st.obj_get_radii() > check_rad_cutoff[1]))[0]
if check_outside_im:
check_edge_inds = np.nonzero(
is_near_im_edge(st.obj_get_positions(), st.pad))[0]
check_inds = np.unique(np.append(check_rad_inds, check_edge_inds))
else:
check_inds = check_rad_inds
check_inds = check_inds[np.argsort(st.obj_get_radii()[check_inds])]
tries = np.min([tries, check_inds.size])
check_poses = st.obj_get_positions()[check_inds[:tries]].copy()
for pos in check_poses:
old_err = st.error
ind = st.obj_closest_particle(pos)
killed, p, r = check_remove_particle(
st, ind, im_change_frac=im_change_frac)
if killed:
removed += 1
check_inds[check_inds > ind] -= 1 # cleaning up indices....
delete_poses.append(pos)
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
p + r + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
return removed, delete_poses |
def l2traceroute_input_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute = ET.Element("l2traceroute")
config = l2traceroute
input = ET.SubElement(l2traceroute, "input")
vlan_id = ET.SubElement(input, "vlan-id")
vlan_id.text = kwargs.pop('vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def l2traceroute_input_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute = ET.Element("l2traceroute")
config = l2traceroute
input = ET.SubElement(l2traceroute, "input")
vlan_id = ET.SubElement(input, "vlan-id")
vlan_id.text = kwargs.pop('vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]]) | See what edges can be extended by this edge. | Below is the the instruction that describes the task:
### Input:
See what edges can be extended by this edge.
### Response:
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]]) |
def list_instance_profiles(path_prefix='/', region=None, key=None,
keyid=None, profile=None):
'''
List all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
CLI Example:
salt-call boto_iam.list_instance_profiles
'''
p = get_all_instance_profiles(path_prefix, region, key, keyid, profile)
return [i['instance_profile_name'] for i in p] | List all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
CLI Example:
salt-call boto_iam.list_instance_profiles | Below is the the instruction that describes the task:
### Input:
List all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
CLI Example:
salt-call boto_iam.list_instance_profiles
### Response:
def list_instance_profiles(path_prefix='/', region=None, key=None,
keyid=None, profile=None):
'''
List all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
CLI Example:
salt-call boto_iam.list_instance_profiles
'''
p = get_all_instance_profiles(path_prefix, region, key, keyid, profile)
return [i['instance_profile_name'] for i in p] |
def _get_nd_basic_indexing(self, key):
"""This function is called when key is a slice, or an integer,
or a tuple of slices or integers"""
shape = self.shape
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is not None and key.step != 1:
if key.step == 0:
raise ValueError("slice step cannot be zero")
return op.slice(self, begin=(key.start,), end=(key.stop,), step=(key.step,))
elif key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
if not isinstance(key, tuple):
raise ValueError('index=%s must be a slice, or an ineger, or a tuple'
' of slices and integers to use basic indexing, received type=%s'
% (str(key), str(type(key))))
assert len(key) != 0, 'basic index cannot be an empty tuple'
begin = []
end = []
step = []
kept_axes = [] # axes where slice_i is a slice
i = -1
for i, slice_i in enumerate(key):
if isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1 if slice_i != -1 else self.shape[i])
step.append(1)
elif isinstance(slice_i, py_slice):
if slice_i.step == 0:
raise ValueError('basic index=%s cannot have slice=%s with step = 0'
% (str(key), str(slice_i)))
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
kept_axes.append(i)
else:
raise ValueError('basic_indexing does not support slicing with '
'index=%s of type=%s.' % (str(slice_i), str(type(slice_i))))
kept_axes.extend(range(i+1, len(shape)))
sliced_nd = op.slice(self, begin, end, step)
if len(kept_axes) == len(shape):
return sliced_nd
# squeeze sliced_shape to remove the axes indexed by integers
oshape = []
sliced_shape = sliced_nd.shape
for axis in kept_axes:
oshape.append(sliced_shape[axis])
# if key is a tuple of integers, still need to keep 1 dim
# while in Numpy, the output will become an value instead of an ndarray
if len(oshape) == 0:
oshape.append(1)
oshape = tuple(oshape)
assert np.prod(oshape) == np.prod(sliced_shape), 'oshape=%s has different size'\
' than sliced_shape=%s'\
% (oshape, sliced_shape)
return sliced_nd.reshape(oshape) | This function is called when key is a slice, or an integer,
or a tuple of slices or integers | Below is the the instruction that describes the task:
### Input:
This function is called when key is a slice, or an integer,
or a tuple of slices or integers
### Response:
def _get_nd_basic_indexing(self, key):
"""This function is called when key is a slice, or an integer,
or a tuple of slices or integers"""
shape = self.shape
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is not None and key.step != 1:
if key.step == 0:
raise ValueError("slice step cannot be zero")
return op.slice(self, begin=(key.start,), end=(key.stop,), step=(key.step,))
elif key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
if not isinstance(key, tuple):
raise ValueError('index=%s must be a slice, or an ineger, or a tuple'
' of slices and integers to use basic indexing, received type=%s'
% (str(key), str(type(key))))
assert len(key) != 0, 'basic index cannot be an empty tuple'
begin = []
end = []
step = []
kept_axes = [] # axes where slice_i is a slice
i = -1
for i, slice_i in enumerate(key):
if isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1 if slice_i != -1 else self.shape[i])
step.append(1)
elif isinstance(slice_i, py_slice):
if slice_i.step == 0:
raise ValueError('basic index=%s cannot have slice=%s with step = 0'
% (str(key), str(slice_i)))
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
kept_axes.append(i)
else:
raise ValueError('basic_indexing does not support slicing with '
'index=%s of type=%s.' % (str(slice_i), str(type(slice_i))))
kept_axes.extend(range(i+1, len(shape)))
sliced_nd = op.slice(self, begin, end, step)
if len(kept_axes) == len(shape):
return sliced_nd
# squeeze sliced_shape to remove the axes indexed by integers
oshape = []
sliced_shape = sliced_nd.shape
for axis in kept_axes:
oshape.append(sliced_shape[axis])
# if key is a tuple of integers, still need to keep 1 dim
# while in Numpy, the output will become an value instead of an ndarray
if len(oshape) == 0:
oshape.append(1)
oshape = tuple(oshape)
assert np.prod(oshape) == np.prod(sliced_shape), 'oshape=%s has different size'\
' than sliced_shape=%s'\
% (oshape, sliced_shape)
return sliced_nd.reshape(oshape) |
def cmd_register(phone):
"""
Войти в приложение с помощью SMS.
"""
if phone is None:
phone = click.prompt("Номер телефона")
r = rocket.devices.register.post(data={"phone": phone})
r = handle_error(r)
id = r.json()["sms_verification"]["id"]
code = click.prompt("Введите код из SMS", type=int)
r = rocket.sms_verifications[id]["verify"].patch(data={"code": code})
r = handle_error(r)
j = r.json()
click.secho("Добро пожаловать, {}!".format(j["user"]["first_name"]), fg="green")
config.email = j["user"]["email"]
config.write() | Войти в приложение с помощью SMS. | Below is the the instruction that describes the task:
### Input:
Войти в приложение с помощью SMS.
### Response:
def cmd_register(phone):
"""
Войти в приложение с помощью SMS.
"""
if phone is None:
phone = click.prompt("Номер телефона")
r = rocket.devices.register.post(data={"phone": phone})
r = handle_error(r)
id = r.json()["sms_verification"]["id"]
code = click.prompt("Введите код из SMS", type=int)
r = rocket.sms_verifications[id]["verify"].patch(data={"code": code})
r = handle_error(r)
j = r.json()
click.secho("Добро пожаловать, {}!".format(j["user"]["first_name"]), fg="green")
config.email = j["user"]["email"]
config.write() |
def append_delta_index(self, ts_data=None, data_delta=None, key=KEY_DATA):
"""Append columns with ∆T between rows to data."""
reasign = False
if data_delta is None:
if self.data is not None:
data_delta = self.data[key]
else:
self.printif('NO HAY DATOS PARA AÑADIR DELTA', 'error')
return None
reasign = True
data_delta['delta'] = data_delta.index.tz_convert('UTC')
if ts_data is not None:
data_delta['delta'] = (data_delta['delta'] - data_delta['delta'].shift(1)).fillna(ts_data)
data_delta['delta_T'] = data_delta['delta'].apply(lambda x: pd.Timedelta(x).seconds) / ts_data
else:
data_delta['delta'] = (data_delta['delta'] - data_delta['delta'].shift(1)).fillna(0)
if reasign:
self.data[key] = data_delta
else:
return data_delta | Append columns with ∆T between rows to data. | Below is the the instruction that describes the task:
### Input:
Append columns with ∆T between rows to data.
### Response:
def append_delta_index(self, ts_data=None, data_delta=None, key=KEY_DATA):
"""Append columns with ∆T between rows to data."""
reasign = False
if data_delta is None:
if self.data is not None:
data_delta = self.data[key]
else:
self.printif('NO HAY DATOS PARA AÑADIR DELTA', 'error')
return None
reasign = True
data_delta['delta'] = data_delta.index.tz_convert('UTC')
if ts_data is not None:
data_delta['delta'] = (data_delta['delta'] - data_delta['delta'].shift(1)).fillna(ts_data)
data_delta['delta_T'] = data_delta['delta'].apply(lambda x: pd.Timedelta(x).seconds) / ts_data
else:
data_delta['delta'] = (data_delta['delta'] - data_delta['delta'].shift(1)).fillna(0)
if reasign:
self.data[key] = data_delta
else:
return data_delta |
def get_files_map(self):
"""stub"""
files_map = {}
if self.has_files():
for label in self.my_osid_object._my_map['fileIds']:
asset_content = self._get_asset_content(
Id(self.my_osid_object._my_map['fileIds'][label]['assetId']),
Type(self.my_osid_object._my_map['fileIds'][label]['assetContentTypeId']))
try:
files_map[label] = asset_content._my_map['base64']
except KeyError:
files_map[label] = base64.b64encode(asset_content.get_data().read())
return files_map
raise IllegalState('no files_map') | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def get_files_map(self):
"""stub"""
files_map = {}
if self.has_files():
for label in self.my_osid_object._my_map['fileIds']:
asset_content = self._get_asset_content(
Id(self.my_osid_object._my_map['fileIds'][label]['assetId']),
Type(self.my_osid_object._my_map['fileIds'][label]['assetContentTypeId']))
try:
files_map[label] = asset_content._my_map['base64']
except KeyError:
files_map[label] = base64.b64encode(asset_content.get_data().read())
return files_map
raise IllegalState('no files_map') |
def bovy_ars(domain,isDomainFinite,abcissae,hx,hpx,nsamples=1,
hxparams=(),maxn=100):
"""bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
"""
#First set-up the upper and lower hulls
hull=setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams)
#Then start sampling: call sampleone repeatedly
out= []
nupdates= 0
for ii in range(int(nsamples)):
thissample, hull, nupdates= sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams)
out.append(thissample)
return out | bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
### Response:
def bovy_ars(domain,isDomainFinite,abcissae,hx,hpx,nsamples=1,
hxparams=(),maxn=100):
"""bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
"""
#First set-up the upper and lower hulls
hull=setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams)
#Then start sampling: call sampleone repeatedly
out= []
nupdates= 0
for ii in range(int(nsamples)):
thissample, hull, nupdates= sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams)
out.append(thissample)
return out |
def valid(self, inplace=False, **kwargs):
"""
Return Series without null values.
.. deprecated:: 0.23.0
Use :meth:`Series.dropna` instead.
"""
warnings.warn("Method .valid will be removed in a future version. "
"Use .dropna instead.", FutureWarning, stacklevel=2)
return self.dropna(inplace=inplace, **kwargs) | Return Series without null values.
.. deprecated:: 0.23.0
Use :meth:`Series.dropna` instead. | Below is the the instruction that describes the task:
### Input:
Return Series without null values.
.. deprecated:: 0.23.0
Use :meth:`Series.dropna` instead.
### Response:
def valid(self, inplace=False, **kwargs):
"""
Return Series without null values.
.. deprecated:: 0.23.0
Use :meth:`Series.dropna` instead.
"""
warnings.warn("Method .valid will be removed in a future version. "
"Use .dropna instead.", FutureWarning, stacklevel=2)
return self.dropna(inplace=inplace, **kwargs) |
def get_configuration(filename):
""" Read configuration file
:type filename: str
:param filename: Path to the configuration file
"""
logger.debug('Reading configuration from {}'.format(filename))
conf = SafeConfigParser()
conf.read(filename)
if not conf:
logger.error('Configuration file {} not found'.format(filename))
sys.exit(1)
if not conf.has_section('general'):
logger.error('Missing [general] section in the configuration file')
sys.exit(1)
try:
config = {
'access-key-id': conf.get('general', 'access-key-id'),
'secret-access-key': conf.get('general', 'secret-access-key'),
'region': conf.get('general', 'region'),
}
except NoOptionError as err:
logger.error('Error in config file: {}'.format(err))
sys.exit(1)
return config | Read configuration file
:type filename: str
:param filename: Path to the configuration file | Below is the the instruction that describes the task:
### Input:
Read configuration file
:type filename: str
:param filename: Path to the configuration file
### Response:
def get_configuration(filename):
""" Read configuration file
:type filename: str
:param filename: Path to the configuration file
"""
logger.debug('Reading configuration from {}'.format(filename))
conf = SafeConfigParser()
conf.read(filename)
if not conf:
logger.error('Configuration file {} not found'.format(filename))
sys.exit(1)
if not conf.has_section('general'):
logger.error('Missing [general] section in the configuration file')
sys.exit(1)
try:
config = {
'access-key-id': conf.get('general', 'access-key-id'),
'secret-access-key': conf.get('general', 'secret-access-key'),
'region': conf.get('general', 'region'),
}
except NoOptionError as err:
logger.error('Error in config file: {}'.format(err))
sys.exit(1)
return config |
def parsed_forensic_reports_to_csv(reports):
"""
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
"""
fields = ["feedback_type", "user_agent", "version", "original_envelope_id",
"original_mail_from", "original_rcpt_to", "arrival_date",
"arrival_date_utc", "subject", "message_id",
"authentication_results", "dkim_domain", "source_ip_address",
"source_country", "source_reverse_dns", "source_base_domain",
"delivery_result", "auth_failure", "reported_domain",
"authentication_mechanisms", "sample_headers_only"]
if type(reports) == OrderedDict:
reports = [reports]
csv_file = StringIO()
csv_writer = DictWriter(csv_file, fieldnames=fields)
csv_writer.writeheader()
for report in reports:
row = report.copy()
row["source_ip_address"] = report["source"]["ip_address"]
row["source_reverse_dns"] = report["source"]["reverse_dns"]
row["source_base_domain"] = report["source"]["base_domain"]
row["source_country"] = report["source"]["country"]
del row["source"]
row["subject"] = report["parsed_sample"]["subject"]
row["auth_failure"] = ",".join(report["auth_failure"])
authentication_mechanisms = report["authentication_mechanisms"]
row["authentication_mechanisms"] = ",".join(
authentication_mechanisms)
del row["sample"]
del row["parsed_sample"]
csv_writer.writerow(row)
return csv_file.getvalue() | Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers | Below is the the instruction that describes the task:
### Input:
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
### Response:
def parsed_forensic_reports_to_csv(reports):
"""
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
"""
fields = ["feedback_type", "user_agent", "version", "original_envelope_id",
"original_mail_from", "original_rcpt_to", "arrival_date",
"arrival_date_utc", "subject", "message_id",
"authentication_results", "dkim_domain", "source_ip_address",
"source_country", "source_reverse_dns", "source_base_domain",
"delivery_result", "auth_failure", "reported_domain",
"authentication_mechanisms", "sample_headers_only"]
if type(reports) == OrderedDict:
reports = [reports]
csv_file = StringIO()
csv_writer = DictWriter(csv_file, fieldnames=fields)
csv_writer.writeheader()
for report in reports:
row = report.copy()
row["source_ip_address"] = report["source"]["ip_address"]
row["source_reverse_dns"] = report["source"]["reverse_dns"]
row["source_base_domain"] = report["source"]["base_domain"]
row["source_country"] = report["source"]["country"]
del row["source"]
row["subject"] = report["parsed_sample"]["subject"]
row["auth_failure"] = ",".join(report["auth_failure"])
authentication_mechanisms = report["authentication_mechanisms"]
row["authentication_mechanisms"] = ",".join(
authentication_mechanisms)
del row["sample"]
del row["parsed_sample"]
csv_writer.writerow(row)
return csv_file.getvalue() |
def get_field_keys(self, pattern=None):
"""
Builds a set of all field keys used in the pattern including nested fields.
:param pattern: The kmatch pattern to get field keys from or None to use self.pattern
:type pattern: list or None
:returns: A set object of all field keys used in the pattern
:rtype: set
"""
# Use own pattern or passed in argument for recursion
pattern = pattern or self.pattern
# Validate the pattern so we can make assumptions about the data
self._validate(pattern)
keys = set()
# Valid pattern length can only be 2 or 3
# With key filters, field key is second item just like 3 item patterns
if len(pattern) == 2 and pattern[0] not in self._KEY_FILTER_MAP:
if pattern[0] in ('&', '|', '^'):
# Pass each nested pattern to get_field_keys
for filter_item in pattern[1]:
keys = keys.union(self.get_field_keys(filter_item))
else:
# pattern[0] == '!'
keys = keys.union(self.get_field_keys(pattern[1]))
else:
# Pattern length is 3
keys.add(pattern[1])
return keys | Builds a set of all field keys used in the pattern including nested fields.
:param pattern: The kmatch pattern to get field keys from or None to use self.pattern
:type pattern: list or None
:returns: A set object of all field keys used in the pattern
:rtype: set | Below is the the instruction that describes the task:
### Input:
Builds a set of all field keys used in the pattern including nested fields.
:param pattern: The kmatch pattern to get field keys from or None to use self.pattern
:type pattern: list or None
:returns: A set object of all field keys used in the pattern
:rtype: set
### Response:
def get_field_keys(self, pattern=None):
"""
Builds a set of all field keys used in the pattern including nested fields.
:param pattern: The kmatch pattern to get field keys from or None to use self.pattern
:type pattern: list or None
:returns: A set object of all field keys used in the pattern
:rtype: set
"""
# Use own pattern or passed in argument for recursion
pattern = pattern or self.pattern
# Validate the pattern so we can make assumptions about the data
self._validate(pattern)
keys = set()
# Valid pattern length can only be 2 or 3
# With key filters, field key is second item just like 3 item patterns
if len(pattern) == 2 and pattern[0] not in self._KEY_FILTER_MAP:
if pattern[0] in ('&', '|', '^'):
# Pass each nested pattern to get_field_keys
for filter_item in pattern[1]:
keys = keys.union(self.get_field_keys(filter_item))
else:
# pattern[0] == '!'
keys = keys.union(self.get_field_keys(pattern[1]))
else:
# Pattern length is 3
keys.add(pattern[1])
return keys |
def split(input_file, file_1, file_2, no_in_first_file):
'''
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
'''
# get feature collection
with open(input_file) as f:
feat_collection = geojson.load(f)
features = feat_collection['features']
feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file])
feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:])
with open(file_1, 'w') as f:
geojson.dump(feat_collection_1, f)
with open(file_2, 'w') as f:
geojson.dump(feat_collection_2, f) | Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name. | Below is the the instruction that describes the task:
### Input:
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
### Response:
def split(input_file, file_1, file_2, no_in_first_file):
'''
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
'''
# get feature collection
with open(input_file) as f:
feat_collection = geojson.load(f)
features = feat_collection['features']
feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file])
feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:])
with open(file_1, 'w') as f:
geojson.dump(feat_collection_1, f)
with open(file_2, 'w') as f:
geojson.dump(feat_collection_2, f) |
def list_song_standby(self, song, onlyone=True):
"""try to list all valid standby
Search a song in all providers. The typical usage scenario is when a
song is not available in one provider, we can try to acquire it from other
providers.
Standby choosing strategy: search from all providers, select two song from each provide.
Those standby song should have same title and artist name.
TODO: maybe we should read a strategy from user config, user
knows which provider owns copyright about an artist.
FIXME: this method will send several network requests,
which may block the caller.
:param song: song model
:param exclude: exclude providers list
:return: list of songs (maximum count: 2)
"""
def get_score(standby):
score = 1
# 分数占比关系:
# title + album > artist
# artist > title > album
if song.artists_name != standby.artists_name:
score -= 0.4
if song.title != standby.title:
score -= 0.3
if song.album_name != standby.album_name:
score -= 0.2
return score
valid_sources = [p.identifier for p in self.list() if p.identifier != song.source]
q = '{} {}'.format(song.title, song.artists_name)
standby_list = []
for result in self.search(q, source_in=valid_sources, limit=10):
for standby in result.songs[:2]:
standby_list.append(standby)
standby_list = sorted(
standby_list,
key=lambda standby: get_score(standby), reverse=True
)
valid_standby_list = []
for standby in standby_list:
if standby.url:
valid_standby_list.append(standby)
if get_score(standby) == 1 or onlyone:
break
if len(valid_standby_list) >= 2:
break
return valid_standby_list | try to list all valid standby
Search a song in all providers. The typical usage scenario is when a
song is not available in one provider, we can try to acquire it from other
providers.
Standby choosing strategy: search from all providers, select two song from each provide.
Those standby song should have same title and artist name.
TODO: maybe we should read a strategy from user config, user
knows which provider owns copyright about an artist.
FIXME: this method will send several network requests,
which may block the caller.
:param song: song model
:param exclude: exclude providers list
:return: list of songs (maximum count: 2) | Below is the the instruction that describes the task:
### Input:
try to list all valid standby
Search a song in all providers. The typical usage scenario is when a
song is not available in one provider, we can try to acquire it from other
providers.
Standby choosing strategy: search from all providers, select two song from each provide.
Those standby song should have same title and artist name.
TODO: maybe we should read a strategy from user config, user
knows which provider owns copyright about an artist.
FIXME: this method will send several network requests,
which may block the caller.
:param song: song model
:param exclude: exclude providers list
:return: list of songs (maximum count: 2)
### Response:
def list_song_standby(self, song, onlyone=True):
"""try to list all valid standby
Search a song in all providers. The typical usage scenario is when a
song is not available in one provider, we can try to acquire it from other
providers.
Standby choosing strategy: search from all providers, select two song from each provide.
Those standby song should have same title and artist name.
TODO: maybe we should read a strategy from user config, user
knows which provider owns copyright about an artist.
FIXME: this method will send several network requests,
which may block the caller.
:param song: song model
:param exclude: exclude providers list
:return: list of songs (maximum count: 2)
"""
def get_score(standby):
score = 1
# 分数占比关系:
# title + album > artist
# artist > title > album
if song.artists_name != standby.artists_name:
score -= 0.4
if song.title != standby.title:
score -= 0.3
if song.album_name != standby.album_name:
score -= 0.2
return score
valid_sources = [p.identifier for p in self.list() if p.identifier != song.source]
q = '{} {}'.format(song.title, song.artists_name)
standby_list = []
for result in self.search(q, source_in=valid_sources, limit=10):
for standby in result.songs[:2]:
standby_list.append(standby)
standby_list = sorted(
standby_list,
key=lambda standby: get_score(standby), reverse=True
)
valid_standby_list = []
for standby in standby_list:
if standby.url:
valid_standby_list.append(standby)
if get_score(standby) == 1 or onlyone:
break
if len(valid_standby_list) >= 2:
break
return valid_standby_list |
def _compute_secondary(self):
"""Compute secondary axis min max and label positions"""
# secondary y axis support
if self.secondary_series and self._y_labels:
y_pos = list(zip(*self._y_labels))[1]
if self.include_x_axis:
ymin = min(self._secondary_min, 0)
ymax = max(self._secondary_max, 0)
else:
ymin = self._secondary_min
ymax = self._secondary_max
steps = len(y_pos)
left_range = abs(y_pos[-1] - y_pos[0])
right_range = abs(ymax - ymin) or 1
scale = right_range / ((steps - 1) or 1)
self._y_2nd_labels = [(self._y_format(ymin + i * scale), pos)
for i, pos in enumerate(y_pos)]
self._scale = left_range / right_range
self._scale_diff = y_pos[0]
self._scale_min_2nd = ymin | Compute secondary axis min max and label positions | Below is the the instruction that describes the task:
### Input:
Compute secondary axis min max and label positions
### Response:
def _compute_secondary(self):
"""Compute secondary axis min max and label positions"""
# secondary y axis support
if self.secondary_series and self._y_labels:
y_pos = list(zip(*self._y_labels))[1]
if self.include_x_axis:
ymin = min(self._secondary_min, 0)
ymax = max(self._secondary_max, 0)
else:
ymin = self._secondary_min
ymax = self._secondary_max
steps = len(y_pos)
left_range = abs(y_pos[-1] - y_pos[0])
right_range = abs(ymax - ymin) or 1
scale = right_range / ((steps - 1) or 1)
self._y_2nd_labels = [(self._y_format(ymin + i * scale), pos)
for i, pos in enumerate(y_pos)]
self._scale = left_range / right_range
self._scale_diff = y_pos[0]
self._scale_min_2nd = ymin |
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj) | Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path. | Below is the the instruction that describes the task:
### Input:
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
### Response:
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj) |
def search_registered_query_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data | Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
### Response:
def search_registered_query_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data |
def send(self, message, envelope_from=None):
"""Verifies and sends message.
:param message: Message instance.
:param envelope_from: Email address to be used in MAIL FROM command.
"""
assert message.send_to, "No recipients have been added"
assert message.sender, (
"The message does not specify a sender and a default sender "
"has not been configured")
if message.has_bad_headers():
raise BadHeaderError
if message.date is None:
message.date = time.time()
ret = None
if self.host:
ret = self.host.sendmail(
sanitize_address(envelope_from or message.sender),
list(sanitize_addresses(message.send_to)),
message.as_bytes() if PY3 else message.as_string(),
message.mail_options,
message.rcpt_options
)
email_dispatched.send(message, app=current_app._get_current_object())
self.num_emails += 1
if self.num_emails == self.mail.max_emails:
self.num_emails = 0
if self.host:
self.host.quit()
self.host = self.configure_host()
return ret | Verifies and sends message.
:param message: Message instance.
:param envelope_from: Email address to be used in MAIL FROM command. | Below is the the instruction that describes the task:
### Input:
Verifies and sends message.
:param message: Message instance.
:param envelope_from: Email address to be used in MAIL FROM command.
### Response:
def send(self, message, envelope_from=None):
"""Verifies and sends message.
:param message: Message instance.
:param envelope_from: Email address to be used in MAIL FROM command.
"""
assert message.send_to, "No recipients have been added"
assert message.sender, (
"The message does not specify a sender and a default sender "
"has not been configured")
if message.has_bad_headers():
raise BadHeaderError
if message.date is None:
message.date = time.time()
ret = None
if self.host:
ret = self.host.sendmail(
sanitize_address(envelope_from or message.sender),
list(sanitize_addresses(message.send_to)),
message.as_bytes() if PY3 else message.as_string(),
message.mail_options,
message.rcpt_options
)
email_dispatched.send(message, app=current_app._get_current_object())
self.num_emails += 1
if self.num_emails == self.mail.max_emails:
self.num_emails = 0
if self.host:
self.host.quit()
self.host = self.configure_host()
return ret |
def convert_from_file(cls, input_file=None, output_file=None, output_format='json', indent=2, compact=False):
"""Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation
"""
if input_file is None:
content = sys.stdin.read()
config = ConfigFactory.parse_string(content)
else:
config = ConfigFactory.parse_file(input_file)
res = cls.convert(config, output_format, indent, compact)
if output_file is None:
print(res)
else:
with open(output_file, "w") as fd:
fd.write(res) | Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation | Below is the the instruction that describes the task:
### Input:
Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation
### Response:
def convert_from_file(cls, input_file=None, output_file=None, output_format='json', indent=2, compact=False):
"""Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation
"""
if input_file is None:
content = sys.stdin.read()
config = ConfigFactory.parse_string(content)
else:
config = ConfigFactory.parse_file(input_file)
res = cls.convert(config, output_format, indent, compact)
if output_file is None:
print(res)
else:
with open(output_file, "w") as fd:
fd.write(res) |
def _time_show(self):
"""Show the time marker window"""
if not self._time_visible:
self._time_visible = True
self._time_window = tk.Toplevel(self)
self._time_window.attributes("-topmost", True)
self._time_window.overrideredirect(True)
self._time_label = ttk.Label(self._time_window)
self._time_label.grid()
self._time_window.lift()
x, y = self.master.winfo_pointerxy()
geometry = "{0}x{1}+{2}+{3}".format(
self._time_label.winfo_width(),
self._time_label.winfo_height(),
x - 15,
self._canvas_ticks.winfo_rooty() - 10)
self._time_window.wm_geometry(geometry)
self._time_label.config(text=TimeLine.get_time_string(self.time, self._unit)) | Show the time marker window | Below is the the instruction that describes the task:
### Input:
Show the time marker window
### Response:
def _time_show(self):
"""Show the time marker window"""
if not self._time_visible:
self._time_visible = True
self._time_window = tk.Toplevel(self)
self._time_window.attributes("-topmost", True)
self._time_window.overrideredirect(True)
self._time_label = ttk.Label(self._time_window)
self._time_label.grid()
self._time_window.lift()
x, y = self.master.winfo_pointerxy()
geometry = "{0}x{1}+{2}+{3}".format(
self._time_label.winfo_width(),
self._time_label.winfo_height(),
x - 15,
self._canvas_ticks.winfo_rooty() - 10)
self._time_window.wm_geometry(geometry)
self._time_label.config(text=TimeLine.get_time_string(self.time, self._unit)) |
def open(self):
"""Turn the device ON."""
open_command = StandardSend(self._address,
COMMAND_LIGHT_ON_0X11_NONE, cmd2=0xff)
self._send_method(open_command, self._open_message_received) | Turn the device ON. | Below is the the instruction that describes the task:
### Input:
Turn the device ON.
### Response:
def open(self):
"""Turn the device ON."""
open_command = StandardSend(self._address,
COMMAND_LIGHT_ON_0X11_NONE, cmd2=0xff)
self._send_method(open_command, self._open_message_received) |
def load_module(self, name):
"""Load a module from a file.
"""
# Implementation inspired from pytest.rewrite and importlib
# If there is an existing module object named 'name' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
try:
# we have already done the search, an gone through package layers
# so we directly feed the latest module and correct path
# to reuse the logic for choosing the proper loading behavior
# TODO : double check maybe we do not need the loop here, already handled by finders in dir hierarchy
# TODO : use exec_module (recent, more tested API) from here
for name_idx, name_part in enumerate(name.split('.')):
pkgname = ".".join(name.split('.')[:name_idx+1])
if pkgname not in sys.modules:
if '.' in pkgname:
# parent has to be in sys.modules. make sure it is a package, else fails
if '__path__' in vars(sys.modules[pkgname.rpartition('.')[0]]):
path = sys.modules[pkgname.rpartition('.')[0]].__path__
else:
raise ImportError("{0} is not a package (no __path__ detected)".format(pkgname.rpartition('.')[0]))
else: # using __file__ instead. should always be there.
path = os.path.dirname(sys.modules[pkgname].__file__)if pkgname in sys.modules else None
try:
file, pathname, description = imp.find_module(pkgname.rpartition('.')[-1], path)
sys.modules[pkgname] = imp.load_module(pkgname, file, pathname, description)
finally:
if file:
file.close()
except:
# dont pollute the interpreter environment if we dont know what we are doing
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name] | Load a module from a file. | Below is the the instruction that describes the task:
### Input:
Load a module from a file.
### Response:
def load_module(self, name):
"""Load a module from a file.
"""
# Implementation inspired from pytest.rewrite and importlib
# If there is an existing module object named 'name' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
try:
# we have already done the search, an gone through package layers
# so we directly feed the latest module and correct path
# to reuse the logic for choosing the proper loading behavior
# TODO : double check maybe we do not need the loop here, already handled by finders in dir hierarchy
# TODO : use exec_module (recent, more tested API) from here
for name_idx, name_part in enumerate(name.split('.')):
pkgname = ".".join(name.split('.')[:name_idx+1])
if pkgname not in sys.modules:
if '.' in pkgname:
# parent has to be in sys.modules. make sure it is a package, else fails
if '__path__' in vars(sys.modules[pkgname.rpartition('.')[0]]):
path = sys.modules[pkgname.rpartition('.')[0]].__path__
else:
raise ImportError("{0} is not a package (no __path__ detected)".format(pkgname.rpartition('.')[0]))
else: # using __file__ instead. should always be there.
path = os.path.dirname(sys.modules[pkgname].__file__)if pkgname in sys.modules else None
try:
file, pathname, description = imp.find_module(pkgname.rpartition('.')[-1], path)
sys.modules[pkgname] = imp.load_module(pkgname, file, pathname, description)
finally:
if file:
file.close()
except:
# dont pollute the interpreter environment if we dont know what we are doing
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name] |
def validate(style, value, vectorized=True):
"""
Validates a style and associated value.
Arguments
---------
style: str
The style to validate (e.g. 'color', 'size' or 'marker')
value:
The style value to validate
vectorized: bool
Whether validator should allow vectorized setting
Returns
-------
valid: boolean or None
If validation is supported returns boolean, otherwise None
"""
validator = get_validator(style)
if validator is None:
return None
if isinstance(value, (np.ndarray, list)) and vectorized:
return all(validator(v) for v in value)
try:
valid = validator(value)
return False if valid == False else True
except:
return False | Validates a style and associated value.
Arguments
---------
style: str
The style to validate (e.g. 'color', 'size' or 'marker')
value:
The style value to validate
vectorized: bool
Whether validator should allow vectorized setting
Returns
-------
valid: boolean or None
If validation is supported returns boolean, otherwise None | Below is the the instruction that describes the task:
### Input:
Validates a style and associated value.
Arguments
---------
style: str
The style to validate (e.g. 'color', 'size' or 'marker')
value:
The style value to validate
vectorized: bool
Whether validator should allow vectorized setting
Returns
-------
valid: boolean or None
If validation is supported returns boolean, otherwise None
### Response:
def validate(style, value, vectorized=True):
"""
Validates a style and associated value.
Arguments
---------
style: str
The style to validate (e.g. 'color', 'size' or 'marker')
value:
The style value to validate
vectorized: bool
Whether validator should allow vectorized setting
Returns
-------
valid: boolean or None
If validation is supported returns boolean, otherwise None
"""
validator = get_validator(style)
if validator is None:
return None
if isinstance(value, (np.ndarray, list)) and vectorized:
return all(validator(v) for v in value)
try:
valid = validator(value)
return False if valid == False else True
except:
return False |
def num_gpus(): # type: () -> int
"""The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
"""
try:
cmd = shlex.split('nvidia-smi --list-gpus')
output = subprocess.check_output(cmd).decode('utf-8')
return sum([1 for x in output.split('\n') if x.startswith('GPU ')])
except (OSError, subprocess.CalledProcessError):
logger.info('No GPUs detected (normal if no gpus installed)')
return 0 | The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container. | Below is the the instruction that describes the task:
### Input:
The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
### Response:
def num_gpus(): # type: () -> int
"""The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
"""
try:
cmd = shlex.split('nvidia-smi --list-gpus')
output = subprocess.check_output(cmd).decode('utf-8')
return sum([1 for x in output.split('\n') if x.startswith('GPU ')])
except (OSError, subprocess.CalledProcessError):
logger.info('No GPUs detected (normal if no gpus installed)')
return 0 |
def reload(filename=None,
url=r"https://raw.githubusercontent.com/googlei18n/emoji4unicode/master/data/emoji4unicode.xml",
loader_class=None):
u"""reload google's `emoji4unicode` project's xml file. must call this method first to use `e4u` library."""
if loader_class is None:
loader_class = loader.Loader
global _loader
_loader = loader_class()
_loader.load(filename, url) | u"""reload google's `emoji4unicode` project's xml file. must call this method first to use `e4u` library. | Below is the the instruction that describes the task:
### Input:
u"""reload google's `emoji4unicode` project's xml file. must call this method first to use `e4u` library.
### Response:
def reload(filename=None,
url=r"https://raw.githubusercontent.com/googlei18n/emoji4unicode/master/data/emoji4unicode.xml",
loader_class=None):
u"""reload google's `emoji4unicode` project's xml file. must call this method first to use `e4u` library."""
if loader_class is None:
loader_class = loader.Loader
global _loader
_loader = loader_class()
_loader.load(filename, url) |
def get_homepath(self, ignore_session=False, force_cookieless=False):
"""
:param ignore_session: Ignore the cookieless session_id that should be put in the URL
:param force_cookieless: Force the cookieless session; the link will include the session_creator if needed.
"""
if not ignore_session and self._session.get("session_id") is not None and self._session.get("cookieless", False):
return web.ctx.homepath + "/@" + self._session.get("session_id") + "@"
elif not ignore_session and force_cookieless:
return web.ctx.homepath + "/@@"
else:
return web.ctx.homepath | :param ignore_session: Ignore the cookieless session_id that should be put in the URL
:param force_cookieless: Force the cookieless session; the link will include the session_creator if needed. | Below is the the instruction that describes the task:
### Input:
:param ignore_session: Ignore the cookieless session_id that should be put in the URL
:param force_cookieless: Force the cookieless session; the link will include the session_creator if needed.
### Response:
def get_homepath(self, ignore_session=False, force_cookieless=False):
"""
:param ignore_session: Ignore the cookieless session_id that should be put in the URL
:param force_cookieless: Force the cookieless session; the link will include the session_creator if needed.
"""
if not ignore_session and self._session.get("session_id") is not None and self._session.get("cookieless", False):
return web.ctx.homepath + "/@" + self._session.get("session_id") + "@"
elif not ignore_session and force_cookieless:
return web.ctx.homepath + "/@@"
else:
return web.ctx.homepath |
def getEndTag(self):
'''
getEndTag - returns the end tag representation as HTML string
@return - String of end tag
'''
# If this is a self-closing tag, we have no end tag (opens and closes in the start)
if self.isSelfClosing is True:
return ''
tagName = self.tagName
# Do not add any indentation to the end of preformatted tags.
if self._indent and tagName in PREFORMATTED_TAGS:
return "</%s>" %(tagName, )
# Otherwise, indent the end of this tag
return "%s</%s>" %(self._indent, tagName) | getEndTag - returns the end tag representation as HTML string
@return - String of end tag | Below is the the instruction that describes the task:
### Input:
getEndTag - returns the end tag representation as HTML string
@return - String of end tag
### Response:
def getEndTag(self):
'''
getEndTag - returns the end tag representation as HTML string
@return - String of end tag
'''
# If this is a self-closing tag, we have no end tag (opens and closes in the start)
if self.isSelfClosing is True:
return ''
tagName = self.tagName
# Do not add any indentation to the end of preformatted tags.
if self._indent and tagName in PREFORMATTED_TAGS:
return "</%s>" %(tagName, )
# Otherwise, indent the end of this tag
return "%s</%s>" %(self._indent, tagName) |
def getStatus(jobStoreName):
"""
Determine the status of a workflow.
If the jobstore does not exist, this returns 'QUEUED', assuming it has not been created yet.
Checks for the existence of files created in the toil.Leader.run(). In toil.Leader.run(), if a workflow completes
with failed jobs, 'failed.log' is created, otherwise 'succeeded.log' is written. If neither of these exist,
the leader is still running jobs.
:return: A string indicating the status of the workflow. ['COMPLETED', 'RUNNING', 'ERROR', 'QUEUED']
:rtype: str
"""
try:
jobstore = Toil.resumeJobStore(jobStoreName)
except NoSuchJobStoreException:
return 'QUEUED'
except NoSuchFileException:
return 'QUEUED'
try:
with jobstore.readSharedFileStream('succeeded.log') as successful:
pass
return 'COMPLETED'
except NoSuchFileException:
try:
with jobstore.readSharedFileStream('failed.log') as failed:
pass
return 'ERROR'
except NoSuchFileException:
pass
return 'RUNNING' | Determine the status of a workflow.
If the jobstore does not exist, this returns 'QUEUED', assuming it has not been created yet.
Checks for the existence of files created in the toil.Leader.run(). In toil.Leader.run(), if a workflow completes
with failed jobs, 'failed.log' is created, otherwise 'succeeded.log' is written. If neither of these exist,
the leader is still running jobs.
:return: A string indicating the status of the workflow. ['COMPLETED', 'RUNNING', 'ERROR', 'QUEUED']
:rtype: str | Below is the the instruction that describes the task:
### Input:
Determine the status of a workflow.
If the jobstore does not exist, this returns 'QUEUED', assuming it has not been created yet.
Checks for the existence of files created in the toil.Leader.run(). In toil.Leader.run(), if a workflow completes
with failed jobs, 'failed.log' is created, otherwise 'succeeded.log' is written. If neither of these exist,
the leader is still running jobs.
:return: A string indicating the status of the workflow. ['COMPLETED', 'RUNNING', 'ERROR', 'QUEUED']
:rtype: str
### Response:
def getStatus(jobStoreName):
"""
Determine the status of a workflow.
If the jobstore does not exist, this returns 'QUEUED', assuming it has not been created yet.
Checks for the existence of files created in the toil.Leader.run(). In toil.Leader.run(), if a workflow completes
with failed jobs, 'failed.log' is created, otherwise 'succeeded.log' is written. If neither of these exist,
the leader is still running jobs.
:return: A string indicating the status of the workflow. ['COMPLETED', 'RUNNING', 'ERROR', 'QUEUED']
:rtype: str
"""
try:
jobstore = Toil.resumeJobStore(jobStoreName)
except NoSuchJobStoreException:
return 'QUEUED'
except NoSuchFileException:
return 'QUEUED'
try:
with jobstore.readSharedFileStream('succeeded.log') as successful:
pass
return 'COMPLETED'
except NoSuchFileException:
try:
with jobstore.readSharedFileStream('failed.log') as failed:
pass
return 'ERROR'
except NoSuchFileException:
pass
return 'RUNNING' |
def import_model(self, source):
"""Import and return model instance."""
model = super(NonstrictImporter, self).import_model(source)
sbml.convert_sbml_model(model)
return model | Import and return model instance. | Below is the the instruction that describes the task:
### Input:
Import and return model instance.
### Response:
def import_model(self, source):
"""Import and return model instance."""
model = super(NonstrictImporter, self).import_model(source)
sbml.convert_sbml_model(model)
return model |
def triplify(self, data, parent=None):
""" Recursively generate statements from the data supplied. """
if data is None:
return
if self.is_object:
for res in self._triplify_object(data, parent):
yield res
elif self.is_array:
for item in data:
for res in self.items.triplify(item, parent):
yield res
else:
# TODO: figure out if I ever want to check for reverse here.
type_name = typecast.name(data)
obj = typecast.stringify(type_name, data)
if obj is not None:
obj = obj.strip()
yield (parent, self.predicate, obj, type_name) | Recursively generate statements from the data supplied. | Below is the the instruction that describes the task:
### Input:
Recursively generate statements from the data supplied.
### Response:
def triplify(self, data, parent=None):
""" Recursively generate statements from the data supplied. """
if data is None:
return
if self.is_object:
for res in self._triplify_object(data, parent):
yield res
elif self.is_array:
for item in data:
for res in self.items.triplify(item, parent):
yield res
else:
# TODO: figure out if I ever want to check for reverse here.
type_name = typecast.name(data)
obj = typecast.stringify(type_name, data)
if obj is not None:
obj = obj.strip()
yield (parent, self.predicate, obj, type_name) |
def runlogs_policy(log_group_ref):
"""Policy needed for Empire -> Cloudwatch logs to record run output."""
p = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=[
Join('', [
'arn:aws:logs:*:*:log-group:',
log_group_ref,
':log-stream:*'])],
Action=[
logs.CreateLogStream,
logs.PutLogEvents,
])])
return p | Policy needed for Empire -> Cloudwatch logs to record run output. | Below is the the instruction that describes the task:
### Input:
Policy needed for Empire -> Cloudwatch logs to record run output.
### Response:
def runlogs_policy(log_group_ref):
"""Policy needed for Empire -> Cloudwatch logs to record run output."""
p = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=[
Join('', [
'arn:aws:logs:*:*:log-group:',
log_group_ref,
':log-stream:*'])],
Action=[
logs.CreateLogStream,
logs.PutLogEvents,
])])
return p |
def get_tagged_tracks(self, tag, limit=None, cacheable=True):
"""Returns the tracks tagged by a user."""
params = self._get_params()
params["tag"] = tag
params["taggingtype"] = "track"
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getpersonaltags", cacheable, params)
return _extract_tracks(doc, self.network) | Returns the tracks tagged by a user. | Below is the the instruction that describes the task:
### Input:
Returns the tracks tagged by a user.
### Response:
def get_tagged_tracks(self, tag, limit=None, cacheable=True):
"""Returns the tracks tagged by a user."""
params = self._get_params()
params["tag"] = tag
params["taggingtype"] = "track"
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getpersonaltags", cacheable, params)
return _extract_tracks(doc, self.network) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.