code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def left_stop(self):
"""allows left motor to coast to a stop"""
self.board.digital_write(L_CTRL_1, 0)
self.board.digital_write(L_CTRL_2, 0)
self.board.analog_write(PWM_L, 0) | allows left motor to coast to a stop | Below is the the instruction that describes the task:
### Input:
allows left motor to coast to a stop
### Response:
def left_stop(self):
"""allows left motor to coast to a stop"""
self.board.digital_write(L_CTRL_1, 0)
self.board.digital_write(L_CTRL_2, 0)
self.board.analog_write(PWM_L, 0) |
def SInt64(value, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False):
'''Signed 64-bit field'''
return BitField(value, 64, signed=True, min_value=min_value, max_value=max_value, encoder=encoder, fuzzable=fuzzable, name=name, full_range=full_range) | Signed 64-bit field | Below is the the instruction that describes the task:
### Input:
Signed 64-bit field
### Response:
def SInt64(value, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False):
'''Signed 64-bit field'''
return BitField(value, 64, signed=True, min_value=min_value, max_value=max_value, encoder=encoder, fuzzable=fuzzable, name=name, full_range=full_range) |
def get_snapshot_version_string(nexus_url, repository, group_id, artifact_id, packaging, version, classifier=None, username=None, password=None):
'''
Gets the specific version string of a snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
'''
log.debug('======================== MODULE FUNCTION: nexus.get_snapshot_version_string(nexus_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, classifier=%s)',
nexus_url, repository, group_id, artifact_id, packaging, version, classifier)
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
return _get_snapshot_url(nexus_url=nexus_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, just_get_version_string=True) | Gets the specific version string of a snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter. | Below is the the instruction that describes the task:
### Input:
Gets the specific version string of a snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
### Response:
def get_snapshot_version_string(nexus_url, repository, group_id, artifact_id, packaging, version, classifier=None, username=None, password=None):
'''
Gets the specific version string of a snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
'''
log.debug('======================== MODULE FUNCTION: nexus.get_snapshot_version_string(nexus_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, classifier=%s)',
nexus_url, repository, group_id, artifact_id, packaging, version, classifier)
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
return _get_snapshot_url(nexus_url=nexus_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, just_get_version_string=True) |
def list_nodes_min(kwargs=None, call=None):
'''
Return a list of all VMs and templates that are on the specified provider, with no details
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-vmware-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called '
'with -f or --function.'
)
ret = {}
vm_properties = ["name"]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
ret[vm['name']] = {'state': 'Running', 'id': vm['name']}
return ret | Return a list of all VMs and templates that are on the specified provider, with no details
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-vmware-config | Below is the the instruction that describes the task:
### Input:
Return a list of all VMs and templates that are on the specified provider, with no details
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-vmware-config
### Response:
def list_nodes_min(kwargs=None, call=None):
'''
Return a list of all VMs and templates that are on the specified provider, with no details
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-vmware-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called '
'with -f or --function.'
)
ret = {}
vm_properties = ["name"]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
ret[vm['name']] = {'state': 'Running', 'id': vm['name']}
return ret |
def assign_candidate(self, verse: Verse, candidate: str) -> Verse:
"""
Helper method; make sure that the verse object is properly packaged.
:param verse:
:param candidate:
:return:
"""
verse.scansion = candidate
verse.valid = True
verse.accented = self.formatter.merge_line_scansion(
verse.original, verse.scansion)
return verse | Helper method; make sure that the verse object is properly packaged.
:param verse:
:param candidate:
:return: | Below is the the instruction that describes the task:
### Input:
Helper method; make sure that the verse object is properly packaged.
:param verse:
:param candidate:
:return:
### Response:
def assign_candidate(self, verse: Verse, candidate: str) -> Verse:
"""
Helper method; make sure that the verse object is properly packaged.
:param verse:
:param candidate:
:return:
"""
verse.scansion = candidate
verse.valid = True
verse.accented = self.formatter.merge_line_scansion(
verse.original, verse.scansion)
return verse |
def _ProduceEvent(
self, parser_mediator, event_data, properties, property_name,
timestamp_description, error_description):
"""Produces an event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
event_data (OpenXMLEventData): event data.
properties (dict[str, object]): properties.
property_name (str): name of the date and time property.
timestamp_description (str): description of the meaning of the timestamp
value.
error_description (str): description of the meaning of the timestamp
value for error reporting purposes.
"""
time_string = properties.get(property_name, None)
if not time_string:
return
# Date and time strings are in ISO 8601 format either with 1 second
# or 100th nano second precision. For example:
# 2012-11-07T23:29:00Z
# 2012-03-05T20:40:00.0000000Z
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(time_string)
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported {0:s}: {1:s} with error: {2!s}'.format(
error_description, time_string, exception)) | Produces an event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
event_data (OpenXMLEventData): event data.
properties (dict[str, object]): properties.
property_name (str): name of the date and time property.
timestamp_description (str): description of the meaning of the timestamp
value.
error_description (str): description of the meaning of the timestamp
value for error reporting purposes. | Below is the the instruction that describes the task:
### Input:
Produces an event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
event_data (OpenXMLEventData): event data.
properties (dict[str, object]): properties.
property_name (str): name of the date and time property.
timestamp_description (str): description of the meaning of the timestamp
value.
error_description (str): description of the meaning of the timestamp
value for error reporting purposes.
### Response:
def _ProduceEvent(
self, parser_mediator, event_data, properties, property_name,
timestamp_description, error_description):
"""Produces an event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
event_data (OpenXMLEventData): event data.
properties (dict[str, object]): properties.
property_name (str): name of the date and time property.
timestamp_description (str): description of the meaning of the timestamp
value.
error_description (str): description of the meaning of the timestamp
value for error reporting purposes.
"""
time_string = properties.get(property_name, None)
if not time_string:
return
# Date and time strings are in ISO 8601 format either with 1 second
# or 100th nano second precision. For example:
# 2012-11-07T23:29:00Z
# 2012-03-05T20:40:00.0000000Z
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(time_string)
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported {0:s}: {1:s} with error: {2!s}'.format(
error_description, time_string, exception)) |
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response) | Parse an error response | Below is the the instruction that describes the task:
### Input:
Parse an error response
### Response:
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response) |
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result | Executes a given query string on an open postgres database. | Below is the the instruction that describes the task:
### Input:
Executes a given query string on an open postgres database.
### Response:
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result |
def post_unvote(self, post_id):
"""Action lets you unvote for a post (Requires login).
Parameters:
post_id (int):
"""
return self._get('posts/{0}/unvote.json'.format(post_id),
method='PUT', auth=True) | Action lets you unvote for a post (Requires login).
Parameters:
post_id (int): | Below is the the instruction that describes the task:
### Input:
Action lets you unvote for a post (Requires login).
Parameters:
post_id (int):
### Response:
def post_unvote(self, post_id):
"""Action lets you unvote for a post (Requires login).
Parameters:
post_id (int):
"""
return self._get('posts/{0}/unvote.json'.format(post_id),
method='PUT', auth=True) |
def get_session_indexes(request):
"""
Gets the SessionIndexes from the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The SessionIndex value
:rtype: list
"""
if isinstance(request, etree._Element):
elem = request
else:
if isinstance(request, Document):
request = request.toxml()
elem = fromstring(request, forbid_dtd=True)
session_indexes = []
session_index_nodes = OneLogin_Saml2_Utils.query(elem, '/samlp:LogoutRequest/samlp:SessionIndex')
for session_index_node in session_index_nodes:
session_indexes.append(OneLogin_Saml2_Utils.element_text(session_index_node))
return session_indexes | Gets the SessionIndexes from the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The SessionIndex value
:rtype: list | Below is the the instruction that describes the task:
### Input:
Gets the SessionIndexes from the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The SessionIndex value
:rtype: list
### Response:
def get_session_indexes(request):
"""
Gets the SessionIndexes from the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The SessionIndex value
:rtype: list
"""
if isinstance(request, etree._Element):
elem = request
else:
if isinstance(request, Document):
request = request.toxml()
elem = fromstring(request, forbid_dtd=True)
session_indexes = []
session_index_nodes = OneLogin_Saml2_Utils.query(elem, '/samlp:LogoutRequest/samlp:SessionIndex')
for session_index_node in session_index_nodes:
session_indexes.append(OneLogin_Saml2_Utils.element_text(session_index_node))
return session_indexes |
def reply_bytes(self, request):
"""Take a `Request` and return an OP_REPLY message as bytes."""
flags = struct.pack("<i", self._flags)
cursor_id = struct.pack("<q", self._cursor_id)
starting_from = struct.pack("<i", self._starting_from)
number_returned = struct.pack("<i", len(self._docs))
reply_id = random.randint(0, 1000000)
response_to = request.request_id
data = b''.join([flags, cursor_id, starting_from, number_returned])
data += b''.join([bson.BSON.encode(doc) for doc in self._docs])
message = struct.pack("<i", 16 + len(data))
message += struct.pack("<i", reply_id)
message += struct.pack("<i", response_to)
message += struct.pack("<i", OP_REPLY)
return message + data | Take a `Request` and return an OP_REPLY message as bytes. | Below is the the instruction that describes the task:
### Input:
Take a `Request` and return an OP_REPLY message as bytes.
### Response:
def reply_bytes(self, request):
"""Take a `Request` and return an OP_REPLY message as bytes."""
flags = struct.pack("<i", self._flags)
cursor_id = struct.pack("<q", self._cursor_id)
starting_from = struct.pack("<i", self._starting_from)
number_returned = struct.pack("<i", len(self._docs))
reply_id = random.randint(0, 1000000)
response_to = request.request_id
data = b''.join([flags, cursor_id, starting_from, number_returned])
data += b''.join([bson.BSON.encode(doc) for doc in self._docs])
message = struct.pack("<i", 16 + len(data))
message += struct.pack("<i", reply_id)
message += struct.pack("<i", response_to)
message += struct.pack("<i", OP_REPLY)
return message + data |
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError, e:
raise DistutilsSetupError(e) | Verify that entry_points map is parseable | Below is the the instruction that describes the task:
### Input:
Verify that entry_points map is parseable
### Response:
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError, e:
raise DistutilsSetupError(e) |
def string(self): # noqa: C901
"""
Return a human-readable version of the decoded report.
"""
lines = ["station: %s" % self.station_id]
if self.type:
lines.append("type: %s" % self.report_type())
if self.time:
lines.append("time: %s" % self.time.ctime())
if self.temp:
lines.append("temperature: %s" % self.temp.string("C"))
if self.dewpt:
lines.append("dew point: %s" % self.dewpt.string("C"))
if self.wind_speed:
lines.append("wind: %s" % self.wind())
if self.wind_speed_peak:
lines.append("peak wind: %s" % self.peak_wind())
if self.wind_shift_time:
lines.append("wind shift: %s" % self.wind_shift())
if self.vis:
lines.append("visibility: %s" % self.visibility())
if self.runway:
lines.append("visual range: %s" % self.runway_visual_range())
if self.press:
lines.append(f"pressure: {self.press.string('MB')} {self.press.string('IN')} {self.press.string('MM')}")
if self.weather:
lines.append("weather: %s" % self.present_weather())
if self.sky:
lines.append("sky: %s" % self.sky_conditions("\n "))
if self.press_sea_level:
lines.append("sea-level pressure: %s" % self.press_sea_level.string("mb"))
if self.max_temp_6hr:
lines.append("6-hour max temp: %s" % str(self.max_temp_6hr))
if self.max_temp_6hr:
lines.append("6-hour min temp: %s" % str(self.min_temp_6hr))
if self.max_temp_24hr:
lines.append("24-hour max temp: %s" % str(self.max_temp_24hr))
if self.max_temp_24hr:
lines.append("24-hour min temp: %s" % str(self.min_temp_24hr))
if self.precip_1hr:
lines.append("1-hour precipitation: %s" % str(self.precip_1hr))
if self.precip_3hr:
lines.append("3-hour precipitation: %s" % str(self.precip_3hr))
if self.precip_6hr:
lines.append("6-hour precipitation: %s" % str(self.precip_6hr))
if self.precip_24hr:
lines.append("24-hour precipitation: %s" % str(self.precip_24hr))
if self._remarks:
lines.append("remarks:")
lines.append("- " + self.remarks("\n- "))
if self._unparsed_remarks:
lines.append("- " + ' '.join(self._unparsed_remarks))
lines.append("METAR: " + self.code)
return "\n".join(lines) | Return a human-readable version of the decoded report. | Below is the the instruction that describes the task:
### Input:
Return a human-readable version of the decoded report.
### Response:
def string(self): # noqa: C901
"""
Return a human-readable version of the decoded report.
"""
lines = ["station: %s" % self.station_id]
if self.type:
lines.append("type: %s" % self.report_type())
if self.time:
lines.append("time: %s" % self.time.ctime())
if self.temp:
lines.append("temperature: %s" % self.temp.string("C"))
if self.dewpt:
lines.append("dew point: %s" % self.dewpt.string("C"))
if self.wind_speed:
lines.append("wind: %s" % self.wind())
if self.wind_speed_peak:
lines.append("peak wind: %s" % self.peak_wind())
if self.wind_shift_time:
lines.append("wind shift: %s" % self.wind_shift())
if self.vis:
lines.append("visibility: %s" % self.visibility())
if self.runway:
lines.append("visual range: %s" % self.runway_visual_range())
if self.press:
lines.append(f"pressure: {self.press.string('MB')} {self.press.string('IN')} {self.press.string('MM')}")
if self.weather:
lines.append("weather: %s" % self.present_weather())
if self.sky:
lines.append("sky: %s" % self.sky_conditions("\n "))
if self.press_sea_level:
lines.append("sea-level pressure: %s" % self.press_sea_level.string("mb"))
if self.max_temp_6hr:
lines.append("6-hour max temp: %s" % str(self.max_temp_6hr))
if self.max_temp_6hr:
lines.append("6-hour min temp: %s" % str(self.min_temp_6hr))
if self.max_temp_24hr:
lines.append("24-hour max temp: %s" % str(self.max_temp_24hr))
if self.max_temp_24hr:
lines.append("24-hour min temp: %s" % str(self.min_temp_24hr))
if self.precip_1hr:
lines.append("1-hour precipitation: %s" % str(self.precip_1hr))
if self.precip_3hr:
lines.append("3-hour precipitation: %s" % str(self.precip_3hr))
if self.precip_6hr:
lines.append("6-hour precipitation: %s" % str(self.precip_6hr))
if self.precip_24hr:
lines.append("24-hour precipitation: %s" % str(self.precip_24hr))
if self._remarks:
lines.append("remarks:")
lines.append("- " + self.remarks("\n- "))
if self._unparsed_remarks:
lines.append("- " + ' '.join(self._unparsed_remarks))
lines.append("METAR: " + self.code)
return "\n".join(lines) |
def object_build_methoddescriptor(node, member, localname):
"""create astroid for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(
getattr(member, "__name__", None) or localname, doc=member.__doc__
)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
_add_dunder_class(func, member) | create astroid for a living method descriptor object | Below is the the instruction that describes the task:
### Input:
create astroid for a living method descriptor object
### Response:
def object_build_methoddescriptor(node, member, localname):
"""create astroid for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(
getattr(member, "__name__", None) or localname, doc=member.__doc__
)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
_add_dunder_class(func, member) |
def remove_cached_item(self, path):
"""
Remove cached resource item
:param path: str
:return: PIL.Image
"""
item_path = '%s/%s' % (
self.cache_folder,
path.strip('/')
)
self.blob_service.delete_blob(self.container_name, item_path)
while self.blob_service.exists(self.container_name, item_path):
time.sleep(0.5)
return True | Remove cached resource item
:param path: str
:return: PIL.Image | Below is the the instruction that describes the task:
### Input:
Remove cached resource item
:param path: str
:return: PIL.Image
### Response:
def remove_cached_item(self, path):
"""
Remove cached resource item
:param path: str
:return: PIL.Image
"""
item_path = '%s/%s' % (
self.cache_folder,
path.strip('/')
)
self.blob_service.delete_blob(self.container_name, item_path)
while self.blob_service.exists(self.container_name, item_path):
time.sleep(0.5)
return True |
def _exec(cmd, cwd=None, env=None, ignored_error_codes=None,
encoding='utf-8'):
"""Run a command.
Execute `cmd` command in the directory set by `cwd`. Environment
variables can be set using the `env` dictionary. The output
data is returned as encoded bytes.
Commands which their returning status codes are non-zero will
be treated as failed. Error codes considered as valid can be
ignored giving them in the `ignored_error_codes` list.
:returns: the output of the command as encoded bytes
:raises RepositoryError: when an error occurs running the command
"""
if ignored_error_codes is None:
ignored_error_codes = []
logger.debug("Running command %s (cwd: %s, env: %s)",
' '.join(cmd), cwd, str(env))
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd, env=env)
(outs, errs) = proc.communicate()
except OSError as e:
raise RepositoryError(cause=str(e))
if proc.returncode != 0 and proc.returncode not in ignored_error_codes:
err = errs.decode(encoding, errors='surrogateescape')
cause = "git command - %s" % err
raise RepositoryError(cause=cause)
else:
logger.debug(errs.decode(encoding, errors='surrogateescape'))
return outs | Run a command.
Execute `cmd` command in the directory set by `cwd`. Environment
variables can be set using the `env` dictionary. The output
data is returned as encoded bytes.
Commands which their returning status codes are non-zero will
be treated as failed. Error codes considered as valid can be
ignored giving them in the `ignored_error_codes` list.
:returns: the output of the command as encoded bytes
:raises RepositoryError: when an error occurs running the command | Below is the the instruction that describes the task:
### Input:
Run a command.
Execute `cmd` command in the directory set by `cwd`. Environment
variables can be set using the `env` dictionary. The output
data is returned as encoded bytes.
Commands which their returning status codes are non-zero will
be treated as failed. Error codes considered as valid can be
ignored giving them in the `ignored_error_codes` list.
:returns: the output of the command as encoded bytes
:raises RepositoryError: when an error occurs running the command
### Response:
def _exec(cmd, cwd=None, env=None, ignored_error_codes=None,
encoding='utf-8'):
"""Run a command.
Execute `cmd` command in the directory set by `cwd`. Environment
variables can be set using the `env` dictionary. The output
data is returned as encoded bytes.
Commands which their returning status codes are non-zero will
be treated as failed. Error codes considered as valid can be
ignored giving them in the `ignored_error_codes` list.
:returns: the output of the command as encoded bytes
:raises RepositoryError: when an error occurs running the command
"""
if ignored_error_codes is None:
ignored_error_codes = []
logger.debug("Running command %s (cwd: %s, env: %s)",
' '.join(cmd), cwd, str(env))
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd, env=env)
(outs, errs) = proc.communicate()
except OSError as e:
raise RepositoryError(cause=str(e))
if proc.returncode != 0 and proc.returncode not in ignored_error_codes:
err = errs.decode(encoding, errors='surrogateescape')
cause = "git command - %s" % err
raise RepositoryError(cause=cause)
else:
logger.debug(errs.decode(encoding, errors='surrogateescape'))
return outs |
def calc(args):
"""
%prog calc [prot.fasta] cds.fasta > out.ks
Protein file is optional. If only one file is given, it is assumed to
be CDS sequences with correct frame (frame 0). Results will be written to
stdout. Both protein file and nucleotide file are assumed to be Fasta format,
with adjacent records as the pairs to compare.
Author: Haibao Tang <bao@uga.edu>, Brad Chapman, Jingping Li
Calculate synonymous mutation rates for gene pairs
This does the following:
1. Fetches a protein pair.
2. Aligns the protein pair with clustalw (default) or muscle.
3. Convert the output to Fasta format.
4. Use this alignment info to align gene sequences using PAL2NAL
5. Run PAML yn00 to calculate synonymous mutation rates.
"""
from jcvi.formats.fasta import translate
p = OptionParser(calc.__doc__)
p.add_option("--longest", action="store_true",
help="Get longest ORF, only works if no pep file, "\
"e.g. ESTs [default: %default]")
p.add_option("--msa", default="clustalw", choices=("clustalw", "muscle"),
help="software used to align the proteins [default: %default]")
p.add_option("--workdir", default=os.getcwd(), help="Work directory")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 1:
protein_file, dna_file = None, args[0]
elif len(args) == 2:
protein_file, dna_file = args
else:
print("Incorrect arguments", file=sys.stderr)
sys.exit(not p.print_help())
output_h = must_open(opts.outfile, "w")
print(fields, file=output_h)
work_dir = op.join(opts.workdir, "syn_analysis")
mkdir(work_dir)
if not protein_file:
protein_file = dna_file + ".pep"
translate_args = [dna_file, "--outfile=" + protein_file]
if opts.longest:
translate_args += ["--longest"]
dna_file, protein_file = translate(translate_args)
prot_iterator = SeqIO.parse(open(protein_file), "fasta")
dna_iterator = SeqIO.parse(open(dna_file), "fasta")
for p_rec_1, p_rec_2, n_rec_1, n_rec_2 in \
zip(prot_iterator, prot_iterator, dna_iterator, dna_iterator):
print("--------", p_rec_1.name, p_rec_2.name, file=sys.stderr)
if opts.msa == "clustalw":
align_fasta = clustal_align_protein((p_rec_1, p_rec_2), work_dir)
elif opts.msa == "muscle":
align_fasta = muscle_align_protein((p_rec_1, p_rec_2), work_dir)
mrtrans_fasta = run_mrtrans(align_fasta, (n_rec_1, n_rec_2), work_dir)
if mrtrans_fasta:
ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng = \
find_synonymous(mrtrans_fasta, work_dir)
if ds_subs_yn is not None:
pair_name = "%s;%s" % (p_rec_1.name, p_rec_2.name)
output_h.write("%s\n" % (",".join(str(x) for x in (pair_name,
ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng))))
output_h.flush()
# Clean-up
sh("rm -rf 2YN.t 2YN.dN 2YN.dS rst rub rst1 syn_analysis") | %prog calc [prot.fasta] cds.fasta > out.ks
Protein file is optional. If only one file is given, it is assumed to
be CDS sequences with correct frame (frame 0). Results will be written to
stdout. Both protein file and nucleotide file are assumed to be Fasta format,
with adjacent records as the pairs to compare.
Author: Haibao Tang <bao@uga.edu>, Brad Chapman, Jingping Li
Calculate synonymous mutation rates for gene pairs
This does the following:
1. Fetches a protein pair.
2. Aligns the protein pair with clustalw (default) or muscle.
3. Convert the output to Fasta format.
4. Use this alignment info to align gene sequences using PAL2NAL
5. Run PAML yn00 to calculate synonymous mutation rates. | Below is the the instruction that describes the task:
### Input:
%prog calc [prot.fasta] cds.fasta > out.ks
Protein file is optional. If only one file is given, it is assumed to
be CDS sequences with correct frame (frame 0). Results will be written to
stdout. Both protein file and nucleotide file are assumed to be Fasta format,
with adjacent records as the pairs to compare.
Author: Haibao Tang <bao@uga.edu>, Brad Chapman, Jingping Li
Calculate synonymous mutation rates for gene pairs
This does the following:
1. Fetches a protein pair.
2. Aligns the protein pair with clustalw (default) or muscle.
3. Convert the output to Fasta format.
4. Use this alignment info to align gene sequences using PAL2NAL
5. Run PAML yn00 to calculate synonymous mutation rates.
### Response:
def calc(args):
"""
%prog calc [prot.fasta] cds.fasta > out.ks
Protein file is optional. If only one file is given, it is assumed to
be CDS sequences with correct frame (frame 0). Results will be written to
stdout. Both protein file and nucleotide file are assumed to be Fasta format,
with adjacent records as the pairs to compare.
Author: Haibao Tang <bao@uga.edu>, Brad Chapman, Jingping Li
Calculate synonymous mutation rates for gene pairs
This does the following:
1. Fetches a protein pair.
2. Aligns the protein pair with clustalw (default) or muscle.
3. Convert the output to Fasta format.
4. Use this alignment info to align gene sequences using PAL2NAL
5. Run PAML yn00 to calculate synonymous mutation rates.
"""
from jcvi.formats.fasta import translate
p = OptionParser(calc.__doc__)
p.add_option("--longest", action="store_true",
help="Get longest ORF, only works if no pep file, "\
"e.g. ESTs [default: %default]")
p.add_option("--msa", default="clustalw", choices=("clustalw", "muscle"),
help="software used to align the proteins [default: %default]")
p.add_option("--workdir", default=os.getcwd(), help="Work directory")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 1:
protein_file, dna_file = None, args[0]
elif len(args) == 2:
protein_file, dna_file = args
else:
print("Incorrect arguments", file=sys.stderr)
sys.exit(not p.print_help())
output_h = must_open(opts.outfile, "w")
print(fields, file=output_h)
work_dir = op.join(opts.workdir, "syn_analysis")
mkdir(work_dir)
if not protein_file:
protein_file = dna_file + ".pep"
translate_args = [dna_file, "--outfile=" + protein_file]
if opts.longest:
translate_args += ["--longest"]
dna_file, protein_file = translate(translate_args)
prot_iterator = SeqIO.parse(open(protein_file), "fasta")
dna_iterator = SeqIO.parse(open(dna_file), "fasta")
for p_rec_1, p_rec_2, n_rec_1, n_rec_2 in \
zip(prot_iterator, prot_iterator, dna_iterator, dna_iterator):
print("--------", p_rec_1.name, p_rec_2.name, file=sys.stderr)
if opts.msa == "clustalw":
align_fasta = clustal_align_protein((p_rec_1, p_rec_2), work_dir)
elif opts.msa == "muscle":
align_fasta = muscle_align_protein((p_rec_1, p_rec_2), work_dir)
mrtrans_fasta = run_mrtrans(align_fasta, (n_rec_1, n_rec_2), work_dir)
if mrtrans_fasta:
ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng = \
find_synonymous(mrtrans_fasta, work_dir)
if ds_subs_yn is not None:
pair_name = "%s;%s" % (p_rec_1.name, p_rec_2.name)
output_h.write("%s\n" % (",".join(str(x) for x in (pair_name,
ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng))))
output_h.flush()
# Clean-up
sh("rm -rf 2YN.t 2YN.dN 2YN.dS rst rub rst1 syn_analysis") |
def paging(data_list, group_number=1, group_size=10):
"""
获取分组列表数据
:param:
* data_list: (list) 需要获取分组的数据列表
* group_number: (int) 分组信息,默认为 1
* group_size: (int) 分组大小,默认为 10
:return:
* group_data: (list) 分组数据
举例如下::
print('--- paging demo---')
all_records = [1, 2, 3, 4, 5]
print(get_group_list_data(all_records))
all_records1 = list(range(100))
print(get_group_list_data(all_records1, group_number=5, group_size=15))
print(get_group_list_data(all_records1, group_number=7, group_size=15))
print('---')
执行结果::
--- paging demo---
[1, 2, 3, 4, 5]
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74]
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
---
"""
if not isinstance(data_list, list):
raise TypeError('data_list should be a list, but we got {}'.format(type(data_list)))
if not isinstance(group_number, int) or not isinstance(group_size, int):
raise TypeError('group_number and group_size should be int, but we got group_number: {0}, '
'group_size: {1}'.format(type(group_number), type(group_size)))
if group_number < 0 or group_size < 0:
raise ValueError('group_number and group_size should be positive int, but we got '
'group_number: {0}, group_size: {1}'.format(group_number, group_size))
start = (group_number - 1) * group_size
end = group_number * group_size
return data_list[start:end] | 获取分组列表数据
:param:
* data_list: (list) 需要获取分组的数据列表
* group_number: (int) 分组信息,默认为 1
* group_size: (int) 分组大小,默认为 10
:return:
* group_data: (list) 分组数据
举例如下::
print('--- paging demo---')
all_records = [1, 2, 3, 4, 5]
print(get_group_list_data(all_records))
all_records1 = list(range(100))
print(get_group_list_data(all_records1, group_number=5, group_size=15))
print(get_group_list_data(all_records1, group_number=7, group_size=15))
print('---')
执行结果::
--- paging demo---
[1, 2, 3, 4, 5]
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74]
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
--- | Below is the the instruction that describes the task:
### Input:
获取分组列表数据
:param:
* data_list: (list) 需要获取分组的数据列表
* group_number: (int) 分组信息,默认为 1
* group_size: (int) 分组大小,默认为 10
:return:
* group_data: (list) 分组数据
举例如下::
print('--- paging demo---')
all_records = [1, 2, 3, 4, 5]
print(get_group_list_data(all_records))
all_records1 = list(range(100))
print(get_group_list_data(all_records1, group_number=5, group_size=15))
print(get_group_list_data(all_records1, group_number=7, group_size=15))
print('---')
执行结果::
--- paging demo---
[1, 2, 3, 4, 5]
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74]
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
---
### Response:
def paging(data_list, group_number=1, group_size=10):
"""
获取分组列表数据
:param:
* data_list: (list) 需要获取分组的数据列表
* group_number: (int) 分组信息,默认为 1
* group_size: (int) 分组大小,默认为 10
:return:
* group_data: (list) 分组数据
举例如下::
print('--- paging demo---')
all_records = [1, 2, 3, 4, 5]
print(get_group_list_data(all_records))
all_records1 = list(range(100))
print(get_group_list_data(all_records1, group_number=5, group_size=15))
print(get_group_list_data(all_records1, group_number=7, group_size=15))
print('---')
执行结果::
--- paging demo---
[1, 2, 3, 4, 5]
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74]
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
---
"""
if not isinstance(data_list, list):
raise TypeError('data_list should be a list, but we got {}'.format(type(data_list)))
if not isinstance(group_number, int) or not isinstance(group_size, int):
raise TypeError('group_number and group_size should be int, but we got group_number: {0}, '
'group_size: {1}'.format(type(group_number), type(group_size)))
if group_number < 0 or group_size < 0:
raise ValueError('group_number and group_size should be positive int, but we got '
'group_number: {0}, group_size: {1}'.format(group_number, group_size))
start = (group_number - 1) * group_size
end = group_number * group_size
return data_list[start:end] |
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ | compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|. | Below is the the instruction that describes the task:
### Input:
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
### Response:
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ |
def not_as_alias_handler(names_list):
"""Returns a list of names ignoring any aliases."""
list_ = list()
for alias in names_list:
list_.append(alias.name)
return list_ | Returns a list of names ignoring any aliases. | Below is the the instruction that describes the task:
### Input:
Returns a list of names ignoring any aliases.
### Response:
def not_as_alias_handler(names_list):
"""Returns a list of names ignoring any aliases."""
list_ = list()
for alias in names_list:
list_.append(alias.name)
return list_ |
def transformer_librispeech_tpu_v2():
"""HParams for training ASR model on Librispeech on TPU v2."""
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams | HParams for training ASR model on Librispeech on TPU v2. | Below is the the instruction that describes the task:
### Input:
HParams for training ASR model on Librispeech on TPU v2.
### Response:
def transformer_librispeech_tpu_v2():
"""HParams for training ASR model on Librispeech on TPU v2."""
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams |
def get_url(self, url_or_dict):
"""
Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled
"""
if isinstance(url_or_dict, basestring):
url_or_dict = {'viewname': url_or_dict}
try:
return reverse(**url_or_dict)
except NoReverseMatch:
if MENU_DEBUG:
print >>stderr,'Unable to reverse URL with kwargs %s' % url_or_dict | Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled | Below is the the instruction that describes the task:
### Input:
Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled
### Response:
def get_url(self, url_or_dict):
"""
Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled
"""
if isinstance(url_or_dict, basestring):
url_or_dict = {'viewname': url_or_dict}
try:
return reverse(**url_or_dict)
except NoReverseMatch:
if MENU_DEBUG:
print >>stderr,'Unable to reverse URL with kwargs %s' % url_or_dict |
def load_users(path=settings.LOGIN_FILE):
"""
Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
}
"""
if not os.path.exists(path):
return {}
data = ""
with open(path) as f:
data = f.read().splitlines()
users = {}
cnt = 1
for line in data:
line = line.split(":")
assert len(line) == 7, "Bad number of fields in '%s', at line %d!" % (
path,
cnt
)
users[line[0]] = {
"pass_hash": line[1],
"uid": line[2],
"gid": line[3],
"full_name": line[4],
"home": line[5],
"shell": line[6]
}
cnt += 1
return users | Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
} | Below is the the instruction that describes the task:
### Input:
Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
}
### Response:
def load_users(path=settings.LOGIN_FILE):
"""
Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
}
"""
if not os.path.exists(path):
return {}
data = ""
with open(path) as f:
data = f.read().splitlines()
users = {}
cnt = 1
for line in data:
line = line.split(":")
assert len(line) == 7, "Bad number of fields in '%s', at line %d!" % (
path,
cnt
)
users[line[0]] = {
"pass_hash": line[1],
"uid": line[2],
"gid": line[3],
"full_name": line[4],
"home": line[5],
"shell": line[6]
}
cnt += 1
return users |
def set_option(self, key, value):
"""Sets general options used by plugins and streams originating
from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
hds-live-edge ( float) Specify the time live HDS
streams will start from the edge of
stream, default: ``10.0``
hds-segment-attempts (int) How many attempts should be done
to download each HDS segment, default: ``3``
hds-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hds-segment-timeout (float) HDS segment connect and read
timeout, default: ``10.0``
hds-timeout (float) Timeout for reading data from
HDS streams, default: ``60.0``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-attempts (int) How many attempts should be done
to download each HLS segment, default: ``3``
hls-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hls-segment-timeout (float) HLS segment connect and read
timeout, default: ``10.0``
hls-timeout (float) Timeout for reading data from
HLS streams, default: ``60.0``
http-proxy (str) Specify a HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify a HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semi-colon (;)
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semi-colon (;)
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or a ampersand (&)
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
http-stream-timeout (float) Timeout for reading data from
HTTP streams, default: ``60.0``
subprocess-errorlog (bool) Log errors from subprocesses to
a file located in the temp directory
subprocess-errorlog-path (str) Log errors from subprocesses to
a specific file
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP
streams will use
rtmp-rtmpdump (str) Specify the location of the
rtmpdump executable used by RTMP streams,
e.g. ``/usr/local/bin/rtmpdump``
rtmp-timeout (float) Timeout for reading data from
RTMP streams, default: ``60.0``
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
General option used by streams not
covered by other options.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
General option used by streams not
covered by other options.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
General option used by streams not
covered by other options.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
General option used by streams not
covered by other options.
locale (str) Locale setting, in the RFC 1766 format
eg. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime. Must be
set before the plugins are loaded.
default: ``UserInputRequester``.
======================== =========================================
"""
# Backwards compatibility
if key == "rtmpdump":
key = "rtmp-rtmpdump"
elif key == "rtmpdump-proxy":
key = "rtmp-proxy"
elif key == "errorlog":
key = "subprocess-errorlog"
elif key == "errorlog-path":
key = "subprocess-errorlog-path"
if key == "http-proxy":
self.http.proxies["http"] = update_scheme("http://", value)
elif key == "https-proxy":
self.http.proxies["https"] = update_scheme("https://", value)
elif key == "http-cookies":
if isinstance(value, dict):
self.http.cookies.update(value)
else:
self.http.parse_cookies(value)
elif key == "http-headers":
if isinstance(value, dict):
self.http.headers.update(value)
else:
self.http.parse_headers(value)
elif key == "http-query-params":
if isinstance(value, dict):
self.http.params.update(value)
else:
self.http.parse_query_params(value)
elif key == "http-trust-env":
self.http.trust_env = value
elif key == "http-ssl-verify":
self.http.verify = value
elif key == "http-disable-dh":
if value:
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH'
try:
requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii")
except AttributeError:
# no ssl to disable the cipher on
pass
elif key == "http-ssl-cert":
self.http.cert = value
elif key == "http-timeout":
self.http.timeout = value
else:
self.options.set(key, value) | Sets general options used by plugins and streams originating
from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
hds-live-edge ( float) Specify the time live HDS
streams will start from the edge of
stream, default: ``10.0``
hds-segment-attempts (int) How many attempts should be done
to download each HDS segment, default: ``3``
hds-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hds-segment-timeout (float) HDS segment connect and read
timeout, default: ``10.0``
hds-timeout (float) Timeout for reading data from
HDS streams, default: ``60.0``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-attempts (int) How many attempts should be done
to download each HLS segment, default: ``3``
hls-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hls-segment-timeout (float) HLS segment connect and read
timeout, default: ``10.0``
hls-timeout (float) Timeout for reading data from
HLS streams, default: ``60.0``
http-proxy (str) Specify a HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify a HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semi-colon (;)
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semi-colon (;)
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or a ampersand (&)
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
http-stream-timeout (float) Timeout for reading data from
HTTP streams, default: ``60.0``
subprocess-errorlog (bool) Log errors from subprocesses to
a file located in the temp directory
subprocess-errorlog-path (str) Log errors from subprocesses to
a specific file
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP
streams will use
rtmp-rtmpdump (str) Specify the location of the
rtmpdump executable used by RTMP streams,
e.g. ``/usr/local/bin/rtmpdump``
rtmp-timeout (float) Timeout for reading data from
RTMP streams, default: ``60.0``
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
General option used by streams not
covered by other options.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
General option used by streams not
covered by other options.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
General option used by streams not
covered by other options.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
General option used by streams not
covered by other options.
locale (str) Locale setting, in the RFC 1766 format
eg. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime. Must be
set before the plugins are loaded.
default: ``UserInputRequester``.
======================== ========================================= | Below is the the instruction that describes the task:
### Input:
Sets general options used by plugins and streams originating
from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
hds-live-edge ( float) Specify the time live HDS
streams will start from the edge of
stream, default: ``10.0``
hds-segment-attempts (int) How many attempts should be done
to download each HDS segment, default: ``3``
hds-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hds-segment-timeout (float) HDS segment connect and read
timeout, default: ``10.0``
hds-timeout (float) Timeout for reading data from
HDS streams, default: ``60.0``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-attempts (int) How many attempts should be done
to download each HLS segment, default: ``3``
hls-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hls-segment-timeout (float) HLS segment connect and read
timeout, default: ``10.0``
hls-timeout (float) Timeout for reading data from
HLS streams, default: ``60.0``
http-proxy (str) Specify a HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify a HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semi-colon (;)
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semi-colon (;)
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or a ampersand (&)
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
http-stream-timeout (float) Timeout for reading data from
HTTP streams, default: ``60.0``
subprocess-errorlog (bool) Log errors from subprocesses to
a file located in the temp directory
subprocess-errorlog-path (str) Log errors from subprocesses to
a specific file
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP
streams will use
rtmp-rtmpdump (str) Specify the location of the
rtmpdump executable used by RTMP streams,
e.g. ``/usr/local/bin/rtmpdump``
rtmp-timeout (float) Timeout for reading data from
RTMP streams, default: ``60.0``
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
General option used by streams not
covered by other options.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
General option used by streams not
covered by other options.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
General option used by streams not
covered by other options.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
General option used by streams not
covered by other options.
locale (str) Locale setting, in the RFC 1766 format
eg. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime. Must be
set before the plugins are loaded.
default: ``UserInputRequester``.
======================== =========================================
### Response:
def set_option(self, key, value):
"""Sets general options used by plugins and streams originating
from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
hds-live-edge ( float) Specify the time live HDS
streams will start from the edge of
stream, default: ``10.0``
hds-segment-attempts (int) How many attempts should be done
to download each HDS segment, default: ``3``
hds-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hds-segment-timeout (float) HDS segment connect and read
timeout, default: ``10.0``
hds-timeout (float) Timeout for reading data from
HDS streams, default: ``60.0``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-attempts (int) How many attempts should be done
to download each HLS segment, default: ``3``
hls-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hls-segment-timeout (float) HLS segment connect and read
timeout, default: ``10.0``
hls-timeout (float) Timeout for reading data from
HLS streams, default: ``60.0``
http-proxy (str) Specify a HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify a HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semi-colon (;)
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semi-colon (;)
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or a ampersand (&)
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
http-stream-timeout (float) Timeout for reading data from
HTTP streams, default: ``60.0``
subprocess-errorlog (bool) Log errors from subprocesses to
a file located in the temp directory
subprocess-errorlog-path (str) Log errors from subprocesses to
a specific file
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP
streams will use
rtmp-rtmpdump (str) Specify the location of the
rtmpdump executable used by RTMP streams,
e.g. ``/usr/local/bin/rtmpdump``
rtmp-timeout (float) Timeout for reading data from
RTMP streams, default: ``60.0``
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
General option used by streams not
covered by other options.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
General option used by streams not
covered by other options.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
General option used by streams not
covered by other options.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
General option used by streams not
covered by other options.
locale (str) Locale setting, in the RFC 1766 format
eg. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime. Must be
set before the plugins are loaded.
default: ``UserInputRequester``.
======================== =========================================
"""
# Backwards compatibility
if key == "rtmpdump":
key = "rtmp-rtmpdump"
elif key == "rtmpdump-proxy":
key = "rtmp-proxy"
elif key == "errorlog":
key = "subprocess-errorlog"
elif key == "errorlog-path":
key = "subprocess-errorlog-path"
if key == "http-proxy":
self.http.proxies["http"] = update_scheme("http://", value)
elif key == "https-proxy":
self.http.proxies["https"] = update_scheme("https://", value)
elif key == "http-cookies":
if isinstance(value, dict):
self.http.cookies.update(value)
else:
self.http.parse_cookies(value)
elif key == "http-headers":
if isinstance(value, dict):
self.http.headers.update(value)
else:
self.http.parse_headers(value)
elif key == "http-query-params":
if isinstance(value, dict):
self.http.params.update(value)
else:
self.http.parse_query_params(value)
elif key == "http-trust-env":
self.http.trust_env = value
elif key == "http-ssl-verify":
self.http.verify = value
elif key == "http-disable-dh":
if value:
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH'
try:
requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii")
except AttributeError:
# no ssl to disable the cipher on
pass
elif key == "http-ssl-cert":
self.http.cert = value
elif key == "http-timeout":
self.http.timeout = value
else:
self.options.set(key, value) |
def _chk_docopts(self, kws):
"""Check for common user command-line errors."""
# outfile should contain .png, .png, etc.
outfile = kws['outfile']
if len(kws) == 2 and os.path.basename(kws['obo']) == "go-basic.obo" and \
kws['outfile'] == self.dflt_outfile:
self._err("NO GO IDS SPECFIED", err=False)
if 'obo' in outfile:
self._err("BAD outfile({O})".format(O=outfile))
if 'gaf' in kws and 'gene2go' in kws:
self._err("SPECIFY ANNOTAIONS FROM ONE FILE")
if 'gene2go' in kws:
if 'taxid' not in kws:
self._err("SPECIFIY taxid WHEN READ NCBI'S gene2go FILE") | Check for common user command-line errors. | Below is the the instruction that describes the task:
### Input:
Check for common user command-line errors.
### Response:
def _chk_docopts(self, kws):
"""Check for common user command-line errors."""
# outfile should contain .png, .png, etc.
outfile = kws['outfile']
if len(kws) == 2 and os.path.basename(kws['obo']) == "go-basic.obo" and \
kws['outfile'] == self.dflt_outfile:
self._err("NO GO IDS SPECFIED", err=False)
if 'obo' in outfile:
self._err("BAD outfile({O})".format(O=outfile))
if 'gaf' in kws and 'gene2go' in kws:
self._err("SPECIFY ANNOTAIONS FROM ONE FILE")
if 'gene2go' in kws:
if 'taxid' not in kws:
self._err("SPECIFIY taxid WHEN READ NCBI'S gene2go FILE") |
def expand(self):
"""
Builds a list of single dimensional variables representing current variable.
Examples:
For single dimensional variable, it is returned as is
discrete of (0,2,4) -> discrete of (0,2,4)
For multi dimensional variable, a list of variables is returned, each representing a single dimension
continuous {0<=x<=1, 2<=y<=3} -> continuous {0<=x<=1}, continuous {2<=y<=3}
"""
expanded_variables = []
for i in range(self.dimensionality):
one_d_variable = deepcopy(self)
one_d_variable.dimensionality = 1
if self.dimensionality > 1:
one_d_variable.name = '{}_{}'.format(self.name, i+1)
else:
one_d_variable.name = self.name
one_d_variable.dimensionality_in_model = 1
expanded_variables.append(one_d_variable)
return expanded_variables | Builds a list of single dimensional variables representing current variable.
Examples:
For single dimensional variable, it is returned as is
discrete of (0,2,4) -> discrete of (0,2,4)
For multi dimensional variable, a list of variables is returned, each representing a single dimension
continuous {0<=x<=1, 2<=y<=3} -> continuous {0<=x<=1}, continuous {2<=y<=3} | Below is the the instruction that describes the task:
### Input:
Builds a list of single dimensional variables representing current variable.
Examples:
For single dimensional variable, it is returned as is
discrete of (0,2,4) -> discrete of (0,2,4)
For multi dimensional variable, a list of variables is returned, each representing a single dimension
continuous {0<=x<=1, 2<=y<=3} -> continuous {0<=x<=1}, continuous {2<=y<=3}
### Response:
def expand(self):
"""
Builds a list of single dimensional variables representing current variable.
Examples:
For single dimensional variable, it is returned as is
discrete of (0,2,4) -> discrete of (0,2,4)
For multi dimensional variable, a list of variables is returned, each representing a single dimension
continuous {0<=x<=1, 2<=y<=3} -> continuous {0<=x<=1}, continuous {2<=y<=3}
"""
expanded_variables = []
for i in range(self.dimensionality):
one_d_variable = deepcopy(self)
one_d_variable.dimensionality = 1
if self.dimensionality > 1:
one_d_variable.name = '{}_{}'.format(self.name, i+1)
else:
one_d_variable.name = self.name
one_d_variable.dimensionality_in_model = 1
expanded_variables.append(one_d_variable)
return expanded_variables |
def native(self):
"""
The native Python datatype representation of this value
:return:
A unicode string or None
"""
if self.contents is None:
return None
if self._native is None:
self._native = self._map[self.__int__()]
return self._native | The native Python datatype representation of this value
:return:
A unicode string or None | Below is the the instruction that describes the task:
### Input:
The native Python datatype representation of this value
:return:
A unicode string or None
### Response:
def native(self):
"""
The native Python datatype representation of this value
:return:
A unicode string or None
"""
if self.contents is None:
return None
if self._native is None:
self._native = self._map[self.__int__()]
return self._native |
def convert_flatten(builder, layer, input_names, output_names, keras_layer):
"""Convert a flatten layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
# blob_order == 0 if the input blob needs not be rearranged
# blob_order == 1 if the input blob needs to be rearranged
blob_order = 0
# using keras_layer.input.shape have a "?" (Dimension[None] at the front),
# making a 3D tensor with unknown batch size 4D
if len(keras_layer.input.shape) == 4:
blob_order = 1
builder.add_flatten(name=layer, mode=blob_order, input_name=input_name, output_name=output_name) | Convert a flatten layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | Below is the the instruction that describes the task:
### Input:
Convert a flatten layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
### Response:
def convert_flatten(builder, layer, input_names, output_names, keras_layer):
"""Convert a flatten layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
# blob_order == 0 if the input blob needs not be rearranged
# blob_order == 1 if the input blob needs to be rearranged
blob_order = 0
# using keras_layer.input.shape have a "?" (Dimension[None] at the front),
# making a 3D tensor with unknown batch size 4D
if len(keras_layer.input.shape) == 4:
blob_order = 1
builder.add_flatten(name=layer, mode=blob_order, input_name=input_name, output_name=output_name) |
def matches_pattern(self, other):
"""Return if the current message matches a message template.
Compare the current message to a template message to test matches
to a pattern.
"""
properties = self._message_properties()
ismatch = False
if isinstance(other, Message) and self.code == other.code:
for prop in properties:
for key, prop_val in prop.items():
if hasattr(other, key):
key_val = getattr(other, key)
ismatch = self._test_match(prop_val, key_val)
else:
ismatch = False
if not ismatch:
break
if not ismatch:
break
return ismatch | Return if the current message matches a message template.
Compare the current message to a template message to test matches
to a pattern. | Below is the the instruction that describes the task:
### Input:
Return if the current message matches a message template.
Compare the current message to a template message to test matches
to a pattern.
### Response:
def matches_pattern(self, other):
"""Return if the current message matches a message template.
Compare the current message to a template message to test matches
to a pattern.
"""
properties = self._message_properties()
ismatch = False
if isinstance(other, Message) and self.code == other.code:
for prop in properties:
for key, prop_val in prop.items():
if hasattr(other, key):
key_val = getattr(other, key)
ismatch = self._test_match(prop_val, key_val)
else:
ismatch = False
if not ismatch:
break
if not ismatch:
break
return ismatch |
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
errmsg = "%s must be an integer or float" % (option,)
try:
value = float(value)
except ValueError:
raise ValueError(errmsg)
except TypeError:
raise TypeError(errmsg)
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise ValueError("%s must be greater than 0 and "
"less than one billion" % (option,))
return value | Validates that 'value' is a float, or can be converted to one, and is
positive. | Below is the the instruction that describes the task:
### Input:
Validates that 'value' is a float, or can be converted to one, and is
positive.
### Response:
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
errmsg = "%s must be an integer or float" % (option,)
try:
value = float(value)
except ValueError:
raise ValueError(errmsg)
except TypeError:
raise TypeError(errmsg)
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise ValueError("%s must be greater than 0 and "
"less than one billion" % (option,))
return value |
def _configure_logger_handler(cls, log_dest, log_filename):
"""
Return a logging handler for the specified `log_dest`, or `None` if
`log_dest` is `None`.
"""
if log_dest is None:
return None
msg_format = '%(asctime)s-%(name)s-%(message)s'
if log_dest == 'stderr':
# Note: sys.stderr is the default stream for StreamHandler
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(msg_format))
elif log_dest == 'file':
if not log_filename:
raise ValueError("Log filename is required if log destination "
"is 'file'")
handler = logging.FileHandler(log_filename, encoding="UTF-8")
handler.setFormatter(logging.Formatter(msg_format))
else:
raise ValueError(
_format("Invalid log destination: {0!A}; Must be one of: "
"{1!A}", log_dest, LOG_DESTINATIONS))
return handler | Return a logging handler for the specified `log_dest`, or `None` if
`log_dest` is `None`. | Below is the the instruction that describes the task:
### Input:
Return a logging handler for the specified `log_dest`, or `None` if
`log_dest` is `None`.
### Response:
def _configure_logger_handler(cls, log_dest, log_filename):
"""
Return a logging handler for the specified `log_dest`, or `None` if
`log_dest` is `None`.
"""
if log_dest is None:
return None
msg_format = '%(asctime)s-%(name)s-%(message)s'
if log_dest == 'stderr':
# Note: sys.stderr is the default stream for StreamHandler
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(msg_format))
elif log_dest == 'file':
if not log_filename:
raise ValueError("Log filename is required if log destination "
"is 'file'")
handler = logging.FileHandler(log_filename, encoding="UTF-8")
handler.setFormatter(logging.Formatter(msg_format))
else:
raise ValueError(
_format("Invalid log destination: {0!A}; Must be one of: "
"{1!A}", log_dest, LOG_DESTINATIONS))
return handler |
def config(self, **kwargs):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
"""
self.__headertext = kwargs.pop("headertext", self.__headertext)
self.__text = kwargs.pop("text", self.__text)
self.__width = kwargs.pop("width", self.__width)
self._timeout = kwargs.pop("timeout", self._timeout)
self.__background = kwargs.pop("background", self.__background)
if self._toplevel:
self._on_leave(None)
self.show()
ttk.Frame.config(self, **kwargs) | Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option. | Below is the the instruction that describes the task:
### Input:
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
### Response:
def config(self, **kwargs):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
"""
self.__headertext = kwargs.pop("headertext", self.__headertext)
self.__text = kwargs.pop("text", self.__text)
self.__width = kwargs.pop("width", self.__width)
self._timeout = kwargs.pop("timeout", self._timeout)
self.__background = kwargs.pop("background", self.__background)
if self._toplevel:
self._on_leave(None)
self.show()
ttk.Frame.config(self, **kwargs) |
def _union_lcs(evaluated_sentences, reference_sentence, prev_union=None):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if prev_union is None:
prev_union = set()
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = prev_union
prev_count = len(prev_union)
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
new_lcs_count = len(lcs_union) - prev_count
return new_lcs_count, lcs_union | Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0 | Below is the the instruction that describes the task:
### Input:
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
### Response:
def _union_lcs(evaluated_sentences, reference_sentence, prev_union=None):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if prev_union is None:
prev_union = set()
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = prev_union
prev_count = len(prev_union)
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
new_lcs_count = len(lcs_union) - prev_count
return new_lcs_count, lcs_union |
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
"""
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value) | Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'. | Below is the the instruction that describes the task:
### Input:
Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
### Response:
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
"""
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value) |
def get_median(data_np):
"""Like :func:`get_mean` but for median."""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
return np.median(data_np[i]) | Like :func:`get_mean` but for median. | Below is the the instruction that describes the task:
### Input:
Like :func:`get_mean` but for median.
### Response:
def get_median(data_np):
"""Like :func:`get_mean` but for median."""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
return np.median(data_np[i]) |
def product(self, other, inplace=True):
"""
TODO: Make it work when using `*` instead of product.
Returns the product of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be multiplied.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.product(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
return self._operate(other, operation='product', inplace=inplace) | TODO: Make it work when using `*` instead of product.
Returns the product of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be multiplied.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.product(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]]) | Below is the the instruction that describes the task:
### Input:
TODO: Make it work when using `*` instead of product.
Returns the product of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be multiplied.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.product(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
### Response:
def product(self, other, inplace=True):
"""
TODO: Make it work when using `*` instead of product.
Returns the product of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be multiplied.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.product(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
return self._operate(other, operation='product', inplace=inplace) |
def get_statistics(ds, methods=None, features=None):
"""Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics.
"""
if methods is None:
cls = list(Statistics.available_methods.keys())
# sort the features in a usable way
avm = Statistics.available_methods
me1 = [m for m in cls if not avm[m].req_feature]
me2 = [m for m in cls if avm[m].req_feature]
methods = me1 + me2
if features is None:
features = dfn.scalar_feature_names
else:
features = [a.lower() for a in features]
header = []
values = []
# To make sure that all methods are computed for each feature in a block,
# we loop over all features. It would be easier to loop over the methods,
# but the resulting statistics would not be human-friendly.
for ft in features:
for mt in methods:
meth = Statistics.available_methods[mt]
if meth.req_feature:
if ft in ds:
values.append(meth(ds=ds, feature=ft))
else:
values.append(np.nan)
header.append(" ".join([mt, dfn.feature_name2label[ft]]))
else:
# Prevent multiple entries of this method.
if not header.count(mt):
values.append(meth(ds=ds))
header.append(mt)
return header, values | Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics. | Below is the the instruction that describes the task:
### Input:
Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics.
### Response:
def get_statistics(ds, methods=None, features=None):
"""Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics.
"""
if methods is None:
cls = list(Statistics.available_methods.keys())
# sort the features in a usable way
avm = Statistics.available_methods
me1 = [m for m in cls if not avm[m].req_feature]
me2 = [m for m in cls if avm[m].req_feature]
methods = me1 + me2
if features is None:
features = dfn.scalar_feature_names
else:
features = [a.lower() for a in features]
header = []
values = []
# To make sure that all methods are computed for each feature in a block,
# we loop over all features. It would be easier to loop over the methods,
# but the resulting statistics would not be human-friendly.
for ft in features:
for mt in methods:
meth = Statistics.available_methods[mt]
if meth.req_feature:
if ft in ds:
values.append(meth(ds=ds, feature=ft))
else:
values.append(np.nan)
header.append(" ".join([mt, dfn.feature_name2label[ft]]))
else:
# Prevent multiple entries of this method.
if not header.count(mt):
values.append(meth(ds=ds))
header.append(mt)
return header, values |
def main(args=None):
"""
Entry point for the tag CLI.
Isolated as a method so that the CLI can be called by other Python code
(e.g. for testing), in which case the arguments are passed to the function.
If no arguments are passed to the function, parse them from the command
line.
"""
if args is None:
args = tag.cli.parser().parse_args()
assert args.cmd in mains
mainmethod = mains[args.cmd]
mainmethod(args) | Entry point for the tag CLI.
Isolated as a method so that the CLI can be called by other Python code
(e.g. for testing), in which case the arguments are passed to the function.
If no arguments are passed to the function, parse them from the command
line. | Below is the the instruction that describes the task:
### Input:
Entry point for the tag CLI.
Isolated as a method so that the CLI can be called by other Python code
(e.g. for testing), in which case the arguments are passed to the function.
If no arguments are passed to the function, parse them from the command
line.
### Response:
def main(args=None):
"""
Entry point for the tag CLI.
Isolated as a method so that the CLI can be called by other Python code
(e.g. for testing), in which case the arguments are passed to the function.
If no arguments are passed to the function, parse them from the command
line.
"""
if args is None:
args = tag.cli.parser().parse_args()
assert args.cmd in mains
mainmethod = mains[args.cmd]
mainmethod(args) |
def fwd_chunk(self):
"""
Returns the chunk following this chunk in the list of free chunks.
"""
raise NotImplementedError("%s not implemented for %s" % (self.fwd_chunk.__func__.__name__,
self.__class__.__name__)) | Returns the chunk following this chunk in the list of free chunks. | Below is the the instruction that describes the task:
### Input:
Returns the chunk following this chunk in the list of free chunks.
### Response:
def fwd_chunk(self):
"""
Returns the chunk following this chunk in the list of free chunks.
"""
raise NotImplementedError("%s not implemented for %s" % (self.fwd_chunk.__func__.__name__,
self.__class__.__name__)) |
def populateFromRow(self, continuousSetRecord):
"""
Populates the instance variables of this ContinuousSet from the
specified DB row.
"""
self._filePath = continuousSetRecord.dataurl
self.setAttributesJson(continuousSetRecord.attributes) | Populates the instance variables of this ContinuousSet from the
specified DB row. | Below is the the instruction that describes the task:
### Input:
Populates the instance variables of this ContinuousSet from the
specified DB row.
### Response:
def populateFromRow(self, continuousSetRecord):
"""
Populates the instance variables of this ContinuousSet from the
specified DB row.
"""
self._filePath = continuousSetRecord.dataurl
self.setAttributesJson(continuousSetRecord.attributes) |
def register_gate(name, gateclass, allow_overwrite=False):
"""Register new gate to gate set.
Args:
name (str): The name of gate.
gateclass (type): The type object of gate.
allow_overwrite (bool, optional): If True, allow to overwrite the existing gate.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing gate.
When `allow_overwrite=True`, this error is not raised.
"""
if hasattr(Circuit, name):
if allow_overwrite:
warnings.warn(f"Circuit has attribute `{name}`.")
else:
raise ValueError(f"Circuit has attribute `{name}`.")
if name.startswith("run_with_"):
if allow_overwrite:
warnings.warn(f"Gate name `{name}` may conflict with run of backend.")
else:
raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.")
if not allow_overwrite:
if name in GATE_SET:
raise ValueError(f"Gate '{name}' is already exists in gate set.")
if name in GLOBAL_MACROS:
raise ValueError(f"Macro '{name}' is already exists.")
GATE_SET[name] = gateclass | Register new gate to gate set.
Args:
name (str): The name of gate.
gateclass (type): The type object of gate.
allow_overwrite (bool, optional): If True, allow to overwrite the existing gate.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing gate.
When `allow_overwrite=True`, this error is not raised. | Below is the the instruction that describes the task:
### Input:
Register new gate to gate set.
Args:
name (str): The name of gate.
gateclass (type): The type object of gate.
allow_overwrite (bool, optional): If True, allow to overwrite the existing gate.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing gate.
When `allow_overwrite=True`, this error is not raised.
### Response:
def register_gate(name, gateclass, allow_overwrite=False):
"""Register new gate to gate set.
Args:
name (str): The name of gate.
gateclass (type): The type object of gate.
allow_overwrite (bool, optional): If True, allow to overwrite the existing gate.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing gate.
When `allow_overwrite=True`, this error is not raised.
"""
if hasattr(Circuit, name):
if allow_overwrite:
warnings.warn(f"Circuit has attribute `{name}`.")
else:
raise ValueError(f"Circuit has attribute `{name}`.")
if name.startswith("run_with_"):
if allow_overwrite:
warnings.warn(f"Gate name `{name}` may conflict with run of backend.")
else:
raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.")
if not allow_overwrite:
if name in GATE_SET:
raise ValueError(f"Gate '{name}' is already exists in gate set.")
if name in GLOBAL_MACROS:
raise ValueError(f"Macro '{name}' is already exists.")
GATE_SET[name] = gateclass |
def proto_0111(theABF):
"""protocol: IC ramp for AP shape analysis."""
abf=ABF(theABF)
abf.log.info("analyzing as an IC ramp")
# AP detection
ap=AP(abf)
ap.detect()
# also calculate derivative for each sweep
abf.derivative=True
# create the multi-plot figure
plt.figure(figsize=(SQUARESIZE,SQUARESIZE))
ax1=plt.subplot(221)
plt.ylabel(abf.units2)
ax2=plt.subplot(222,sharey=ax1)
ax3=plt.subplot(223)
plt.ylabel(abf.unitsD2)
ax4=plt.subplot(224,sharey=ax3)
# put data in each subplot
for sweep in range(abf.sweeps):
abf.setsweep(sweep)
ax1.plot(abf.sweepX,abf.sweepY,color='b',lw=.25)
ax2.plot(abf.sweepX,abf.sweepY,color='b')
ax3.plot(abf.sweepX,abf.sweepD,color='r',lw=.25)
ax4.plot(abf.sweepX,abf.sweepD,color='r')
# modify axis
for ax in [ax1,ax2,ax3,ax4]: # everything
ax.margins(0,.1)
ax.grid(alpha=.5)
for ax in [ax3,ax4]: # only derivative APs
ax.axhline(-100,color='r',alpha=.5,ls="--",lw=2)
for ax in [ax2,ax4]: # only zoomed in APs
ax.get_yaxis().set_visible(False)
if len(ap.APs):
firstAP=ap.APs[0]["T"]
ax2.axis([firstAP-.25,firstAP+.25,None,None])
ax4.axis([firstAP-.01,firstAP+.01,None,None])
# show message from first AP
if len(ap.APs):
firstAP=ap.APs[0]
msg="\n".join(["%s = %s"%(x,str(firstAP[x])) for x in sorted(firstAP.keys()) if not "I" in x[-2:]])
plt.subplot(221)
plt.gca().text(0.02, 0.98, msg, transform= plt.gca().transAxes, fontsize=10, verticalalignment='top', family='monospace')
# save it
plt.tight_layout()
frameAndSave(abf,"AP shape")
plt.close('all') | protocol: IC ramp for AP shape analysis. | Below is the the instruction that describes the task:
### Input:
protocol: IC ramp for AP shape analysis.
### Response:
def proto_0111(theABF):
"""protocol: IC ramp for AP shape analysis."""
abf=ABF(theABF)
abf.log.info("analyzing as an IC ramp")
# AP detection
ap=AP(abf)
ap.detect()
# also calculate derivative for each sweep
abf.derivative=True
# create the multi-plot figure
plt.figure(figsize=(SQUARESIZE,SQUARESIZE))
ax1=plt.subplot(221)
plt.ylabel(abf.units2)
ax2=plt.subplot(222,sharey=ax1)
ax3=plt.subplot(223)
plt.ylabel(abf.unitsD2)
ax4=plt.subplot(224,sharey=ax3)
# put data in each subplot
for sweep in range(abf.sweeps):
abf.setsweep(sweep)
ax1.plot(abf.sweepX,abf.sweepY,color='b',lw=.25)
ax2.plot(abf.sweepX,abf.sweepY,color='b')
ax3.plot(abf.sweepX,abf.sweepD,color='r',lw=.25)
ax4.plot(abf.sweepX,abf.sweepD,color='r')
# modify axis
for ax in [ax1,ax2,ax3,ax4]: # everything
ax.margins(0,.1)
ax.grid(alpha=.5)
for ax in [ax3,ax4]: # only derivative APs
ax.axhline(-100,color='r',alpha=.5,ls="--",lw=2)
for ax in [ax2,ax4]: # only zoomed in APs
ax.get_yaxis().set_visible(False)
if len(ap.APs):
firstAP=ap.APs[0]["T"]
ax2.axis([firstAP-.25,firstAP+.25,None,None])
ax4.axis([firstAP-.01,firstAP+.01,None,None])
# show message from first AP
if len(ap.APs):
firstAP=ap.APs[0]
msg="\n".join(["%s = %s"%(x,str(firstAP[x])) for x in sorted(firstAP.keys()) if not "I" in x[-2:]])
plt.subplot(221)
plt.gca().text(0.02, 0.98, msg, transform= plt.gca().transAxes, fontsize=10, verticalalignment='top', family='monospace')
# save it
plt.tight_layout()
frameAndSave(abf,"AP shape")
plt.close('all') |
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0) | Calculates the variance of the data. | Below is the the instruction that describes the task:
### Input:
Calculates the variance of the data.
### Response:
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0) |
def notify(title, message, retcode=None):
"""Sends message over Telegram using telegram-send, title is ignored."""
if not path.exists(config_file):
if not path.exists(config_dir):
makedirs(config_dir)
print("Follow the instructions to configure the Telegram backend.\n")
configure(config_file)
send(messages=[message], conf=config_file) | Sends message over Telegram using telegram-send, title is ignored. | Below is the the instruction that describes the task:
### Input:
Sends message over Telegram using telegram-send, title is ignored.
### Response:
def notify(title, message, retcode=None):
"""Sends message over Telegram using telegram-send, title is ignored."""
if not path.exists(config_file):
if not path.exists(config_dir):
makedirs(config_dir)
print("Follow the instructions to configure the Telegram backend.\n")
configure(config_file)
send(messages=[message], conf=config_file) |
def execute_fragment_under_context(self, ctx, start_label, end_label):
''' just like run but returns if moved outside of the specified fragment
# 4 different exectution results
# 0=normal, 1=return, 2=jump_outside, 3=errors
# execute_fragment_under_context returns:
# (return_value, typ, return_value/jump_loc/py_error)
# IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged.
'''
old_curr_ctx = self.current_ctx
self.ctx_depth += 1
old_stack_len = len(ctx.stack)
old_ret_len = len(self.return_locs)
old_ctx_len = len(self.contexts)
try:
self.current_ctx = ctx
return self._execute_fragment_under_context(
ctx, start_label, end_label)
except JsException as err:
if self.debug_mode:
self._on_fragment_exit("js errors")
# undo the things that were put on the stack (if any) to ensure a proper error recovery
del ctx.stack[old_stack_len:]
del self.return_locs[old_ret_len:]
del self.contexts[old_ctx_len :]
return undefined, 3, err
finally:
self.ctx_depth -= 1
self.current_ctx = old_curr_ctx
assert old_stack_len == len(ctx.stack) | just like run but returns if moved outside of the specified fragment
# 4 different exectution results
# 0=normal, 1=return, 2=jump_outside, 3=errors
# execute_fragment_under_context returns:
# (return_value, typ, return_value/jump_loc/py_error)
# IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged. | Below is the the instruction that describes the task:
### Input:
just like run but returns if moved outside of the specified fragment
# 4 different exectution results
# 0=normal, 1=return, 2=jump_outside, 3=errors
# execute_fragment_under_context returns:
# (return_value, typ, return_value/jump_loc/py_error)
# IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged.
### Response:
def execute_fragment_under_context(self, ctx, start_label, end_label):
''' just like run but returns if moved outside of the specified fragment
# 4 different exectution results
# 0=normal, 1=return, 2=jump_outside, 3=errors
# execute_fragment_under_context returns:
# (return_value, typ, return_value/jump_loc/py_error)
# IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged.
'''
old_curr_ctx = self.current_ctx
self.ctx_depth += 1
old_stack_len = len(ctx.stack)
old_ret_len = len(self.return_locs)
old_ctx_len = len(self.contexts)
try:
self.current_ctx = ctx
return self._execute_fragment_under_context(
ctx, start_label, end_label)
except JsException as err:
if self.debug_mode:
self._on_fragment_exit("js errors")
# undo the things that were put on the stack (if any) to ensure a proper error recovery
del ctx.stack[old_stack_len:]
del self.return_locs[old_ret_len:]
del self.contexts[old_ctx_len :]
return undefined, 3, err
finally:
self.ctx_depth -= 1
self.current_ctx = old_curr_ctx
assert old_stack_len == len(ctx.stack) |
def reply_all(self, reply_comment):
"""Replies to everyone on the email, including those on the CC line.
With great power, comes great responsibility.
Args:
reply_comment: The string comment to send to everyone on the email.
"""
payload = '{ "Comment": "' + reply_comment + '"}'
endpoint = 'https://outlook.office.com/api/v2.0/me/messages/{}/replyall'.format(self.message_id)
self._make_api_call('post', endpoint, data=payload) | Replies to everyone on the email, including those on the CC line.
With great power, comes great responsibility.
Args:
reply_comment: The string comment to send to everyone on the email. | Below is the the instruction that describes the task:
### Input:
Replies to everyone on the email, including those on the CC line.
With great power, comes great responsibility.
Args:
reply_comment: The string comment to send to everyone on the email.
### Response:
def reply_all(self, reply_comment):
"""Replies to everyone on the email, including those on the CC line.
With great power, comes great responsibility.
Args:
reply_comment: The string comment to send to everyone on the email.
"""
payload = '{ "Comment": "' + reply_comment + '"}'
endpoint = 'https://outlook.office.com/api/v2.0/me/messages/{}/replyall'.format(self.message_id)
self._make_api_call('post', endpoint, data=payload) |
def full2ph(trans, n_pstates):
"""
Convert a full transmat to the respective p-state and h-state transmats
"""
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans | Convert a full transmat to the respective p-state and h-state transmats | Below is the the instruction that describes the task:
### Input:
Convert a full transmat to the respective p-state and h-state transmats
### Response:
def full2ph(trans, n_pstates):
"""
Convert a full transmat to the respective p-state and h-state transmats
"""
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans |
def p_empty_statement(self, p):
"""empty_statement : SEMI"""
p[0] = self.asttypes.EmptyStatement(p[1])
p[0].setpos(p) | empty_statement : SEMI | Below is the the instruction that describes the task:
### Input:
empty_statement : SEMI
### Response:
def p_empty_statement(self, p):
"""empty_statement : SEMI"""
p[0] = self.asttypes.EmptyStatement(p[1])
p[0].setpos(p) |
def get_resource(self, resource_type, resource_id, depth=1):
"""
Retrieves a single resource of a particular type.
:param resource_type: The resource type: datacenter, image,
snapshot or ipblock.
:type resource_type: ``str``
:param resource_id: The unique ID of the resource.
:type resource_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/um/resources/%s/%s?depth=%s' % (
resource_type, resource_id, str(depth)))
return response | Retrieves a single resource of a particular type.
:param resource_type: The resource type: datacenter, image,
snapshot or ipblock.
:type resource_type: ``str``
:param resource_id: The unique ID of the resource.
:type resource_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int`` | Below is the the instruction that describes the task:
### Input:
Retrieves a single resource of a particular type.
:param resource_type: The resource type: datacenter, image,
snapshot or ipblock.
:type resource_type: ``str``
:param resource_id: The unique ID of the resource.
:type resource_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
### Response:
def get_resource(self, resource_type, resource_id, depth=1):
"""
Retrieves a single resource of a particular type.
:param resource_type: The resource type: datacenter, image,
snapshot or ipblock.
:type resource_type: ``str``
:param resource_id: The unique ID of the resource.
:type resource_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/um/resources/%s/%s?depth=%s' % (
resource_type, resource_id, str(depth)))
return response |
def dump_to_store(dataset, store, writer=None, encoder=None,
encoding=None, unlimited_dims=None):
"""Store dataset contents to a backends.*DataStore object."""
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer,
unlimited_dims=unlimited_dims) | Store dataset contents to a backends.*DataStore object. | Below is the the instruction that describes the task:
### Input:
Store dataset contents to a backends.*DataStore object.
### Response:
def dump_to_store(dataset, store, writer=None, encoder=None,
encoding=None, unlimited_dims=None):
"""Store dataset contents to a backends.*DataStore object."""
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer,
unlimited_dims=unlimited_dims) |
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
"""Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson. | Below is the the instruction that describes the task:
### Input:
Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson.
### Response:
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
"""Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session |
def indexOf(self, pattern, start_idx, bitlength):
"""
Return the start index of the pattern inside the input string in a
Bitvector representation, otherwise it returns -1 (always using a BitVector)
:param bitlength: size of the biitvector holding the result
"""
return StrIndexOf(self, pattern, start_idx, bitlength) | Return the start index of the pattern inside the input string in a
Bitvector representation, otherwise it returns -1 (always using a BitVector)
:param bitlength: size of the biitvector holding the result | Below is the the instruction that describes the task:
### Input:
Return the start index of the pattern inside the input string in a
Bitvector representation, otherwise it returns -1 (always using a BitVector)
:param bitlength: size of the biitvector holding the result
### Response:
def indexOf(self, pattern, start_idx, bitlength):
"""
Return the start index of the pattern inside the input string in a
Bitvector representation, otherwise it returns -1 (always using a BitVector)
:param bitlength: size of the biitvector holding the result
"""
return StrIndexOf(self, pattern, start_idx, bitlength) |
def create_argparser(self):
""" Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. """
if self.desc:
if self.title:
fulldesc = '%s\n\n%s' % (self.title, self.desc)
else:
fulldesc = self.desc
else:
fulldesc = self.title
return self.ArgumentParser(command=self, prog=self.name,
description=fulldesc) | Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. | Below is the the instruction that describes the task:
### Input:
Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance.
### Response:
def create_argparser(self):
""" Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. """
if self.desc:
if self.title:
fulldesc = '%s\n\n%s' % (self.title, self.desc)
else:
fulldesc = self.desc
else:
fulldesc = self.title
return self.ArgumentParser(command=self, prog=self.name,
description=fulldesc) |
def wait(self):
"""wait until all jobs finish and return a list of pids
"""
finished_pids = [ ]
while self.running_procs:
finished_pids.extend(self.poll())
return finished_pids | wait until all jobs finish and return a list of pids | Below is the the instruction that describes the task:
### Input:
wait until all jobs finish and return a list of pids
### Response:
def wait(self):
"""wait until all jobs finish and return a list of pids
"""
finished_pids = [ ]
while self.running_procs:
finished_pids.extend(self.poll())
return finished_pids |
def allow_choice_custom_value(self):
"""
Returns boolean indicating whether choice parameter supports custom value.
If choice parameter supports custom value, user can provide parameter value
other than those provided in choice list.
"""
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter')
return self._is_string_true(self.dto[self.name]['choiceInfo']['choiceAllowCustom']) | Returns boolean indicating whether choice parameter supports custom value.
If choice parameter supports custom value, user can provide parameter value
other than those provided in choice list. | Below is the the instruction that describes the task:
### Input:
Returns boolean indicating whether choice parameter supports custom value.
If choice parameter supports custom value, user can provide parameter value
other than those provided in choice list.
### Response:
def allow_choice_custom_value(self):
"""
Returns boolean indicating whether choice parameter supports custom value.
If choice parameter supports custom value, user can provide parameter value
other than those provided in choice list.
"""
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter')
return self._is_string_true(self.dto[self.name]['choiceInfo']['choiceAllowCustom']) |
def tree_multiresolution(G, Nlevel, reduction_method='resistance_distance',
compute_full_eigen=False, root=None):
r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree.
"""
if not root:
if hasattr(G, 'root'):
root = G.root
else:
root = 1
Gs = [G]
if compute_full_eigen:
Gs[0].compute_fourier_basis()
subsampled_vertex_indices = []
depths, parents = _tree_depths(G.A, root)
old_W = G.W
for lev in range(Nlevel):
# Identify the vertices in the even depths of the current tree
down_odd = round(depths) % 2
down_even = np.ones((Gs[lev].N)) - down_odd
keep_inds = np.where(down_even == 1)[0]
subsampled_vertex_indices.append(keep_inds)
# There will be one undirected edge in the new graph connecting each
# non-root subsampled vertex to its new parent. Here, we find the new
# indices of the new parents
non_root_keep_inds, new_non_root_inds = np.setdiff1d(keep_inds, root)
old_parents_of_non_root_keep_inds = parents[non_root_keep_inds]
old_grandparents_of_non_root_keep_inds = parents[old_parents_of_non_root_keep_inds]
# TODO new_non_root_parents = dsearchn(keep_inds, old_grandparents_of_non_root_keep_inds)
old_W_i_inds, old_W_j_inds, old_W_weights = sparse.find(old_W)
i_inds = np.concatenate((new_non_root_inds, new_non_root_parents))
j_inds = np.concatenate((new_non_root_parents, new_non_root_inds))
new_N = np.sum(down_even)
if reduction_method == "unweighted":
new_weights = np.ones(np.shape(i_inds))
elif reduction_method == "sum":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds,old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds]);
old_weights_to_parents = old_W_weights[old_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = old_weights_to_parents + old_weights_parents_to_grandparents
new_weights = np.concatenate((new_weights. new_weights))
elif reduction_method == "resistance_distance":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds])
old_weights_to_parents = old_W_weight[sold_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = 1./(1./old_weights_to_parents + 1./old_weights_parents_to_grandparents)
new_weights = np.concatenate(([new_weights, new_weights]))
else:
raise ValueError('Unknown graph reduction method.')
new_W = sparse.csc_matrix((new_weights, (i_inds, j_inds)),
shape=(new_N, new_N))
# Update parents
new_root = np.where(keep_inds == root)[0]
parents = np.zeros(np.shape(keep_inds)[0], np.shape(keep_inds)[0])
parents[:new_root - 1, new_root:] = new_non_root_parents
# Update depths
depths = depths[keep_inds]
depths = depths/2.
# Store new tree
Gtemp = graphs.Graph(new_W, coords=Gs[lev].coords[keep_inds], limits=G.limits, root=new_root)
#Gs[lev].copy_graph_attributes(Gtemp, False)
if compute_full_eigen:
Gs[lev + 1].compute_fourier_basis()
# Replace current adjacency matrix and root
Gs.append(Gtemp)
old_W = new_W
root = new_root
return Gs, subsampled_vertex_indices | r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree. | Below is the the instruction that describes the task:
### Input:
r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree.
### Response:
def tree_multiresolution(G, Nlevel, reduction_method='resistance_distance',
compute_full_eigen=False, root=None):
r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree.
"""
if not root:
if hasattr(G, 'root'):
root = G.root
else:
root = 1
Gs = [G]
if compute_full_eigen:
Gs[0].compute_fourier_basis()
subsampled_vertex_indices = []
depths, parents = _tree_depths(G.A, root)
old_W = G.W
for lev in range(Nlevel):
# Identify the vertices in the even depths of the current tree
down_odd = round(depths) % 2
down_even = np.ones((Gs[lev].N)) - down_odd
keep_inds = np.where(down_even == 1)[0]
subsampled_vertex_indices.append(keep_inds)
# There will be one undirected edge in the new graph connecting each
# non-root subsampled vertex to its new parent. Here, we find the new
# indices of the new parents
non_root_keep_inds, new_non_root_inds = np.setdiff1d(keep_inds, root)
old_parents_of_non_root_keep_inds = parents[non_root_keep_inds]
old_grandparents_of_non_root_keep_inds = parents[old_parents_of_non_root_keep_inds]
# TODO new_non_root_parents = dsearchn(keep_inds, old_grandparents_of_non_root_keep_inds)
old_W_i_inds, old_W_j_inds, old_W_weights = sparse.find(old_W)
i_inds = np.concatenate((new_non_root_inds, new_non_root_parents))
j_inds = np.concatenate((new_non_root_parents, new_non_root_inds))
new_N = np.sum(down_even)
if reduction_method == "unweighted":
new_weights = np.ones(np.shape(i_inds))
elif reduction_method == "sum":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds,old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds]);
old_weights_to_parents = old_W_weights[old_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = old_weights_to_parents + old_weights_parents_to_grandparents
new_weights = np.concatenate((new_weights. new_weights))
elif reduction_method == "resistance_distance":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds])
old_weights_to_parents = old_W_weight[sold_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = 1./(1./old_weights_to_parents + 1./old_weights_parents_to_grandparents)
new_weights = np.concatenate(([new_weights, new_weights]))
else:
raise ValueError('Unknown graph reduction method.')
new_W = sparse.csc_matrix((new_weights, (i_inds, j_inds)),
shape=(new_N, new_N))
# Update parents
new_root = np.where(keep_inds == root)[0]
parents = np.zeros(np.shape(keep_inds)[0], np.shape(keep_inds)[0])
parents[:new_root - 1, new_root:] = new_non_root_parents
# Update depths
depths = depths[keep_inds]
depths = depths/2.
# Store new tree
Gtemp = graphs.Graph(new_W, coords=Gs[lev].coords[keep_inds], limits=G.limits, root=new_root)
#Gs[lev].copy_graph_attributes(Gtemp, False)
if compute_full_eigen:
Gs[lev + 1].compute_fourier_basis()
# Replace current adjacency matrix and root
Gs.append(Gtemp)
old_W = new_W
root = new_root
return Gs, subsampled_vertex_indices |
def before_start(self, checkers):
"""
Loads entry points named kibitzr.before_start
and call each one with two arguments:
1. Application instance;
2. List of configured checkers
"""
for point in entrypoints.get_group_all("kibitzr.before_start"):
entry = point.load()
entry(self, checkers) | Loads entry points named kibitzr.before_start
and call each one with two arguments:
1. Application instance;
2. List of configured checkers | Below is the the instruction that describes the task:
### Input:
Loads entry points named kibitzr.before_start
and call each one with two arguments:
1. Application instance;
2. List of configured checkers
### Response:
def before_start(self, checkers):
"""
Loads entry points named kibitzr.before_start
and call each one with two arguments:
1. Application instance;
2. List of configured checkers
"""
for point in entrypoints.get_group_all("kibitzr.before_start"):
entry = point.load()
entry(self, checkers) |
def distance_to(self, *args):
"""Return the distance to a rectangle or another point."""
if not len(args) > 0:
raise ValueError("at least one parameter must be given")
x = args[0]
if len(args) > 1:
unit = args[1]
else:
unit = "px"
u = {"px": (1.,1.), "in": (1.,72.), "cm": (2.54, 72.),
"mm": (25.4, 72.)}
f = u[unit][0] / u[unit][1]
if type(x) is Point:
return abs(self - x) * f
# from here on, x is a rectangle
# as a safeguard, make a finite copy of it
r = Rect(x.top_left, x.top_left)
r = r | x.bottom_right
if self in r:
return 0.0
if self.x > r.x1:
if self.y >= r.y1:
return self.distance_to(r.bottom_right, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_right, unit)
else:
return (self.x - r.x1) * f
elif r.x0 <= self.x <= r.x1:
if self.y >= r.y1:
return (self.y - r.y1) * f
else:
return (r.y0 - self.y) * f
else:
if self.y >= r.y1:
return self.distance_to(r.bottom_left, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_left, unit)
else:
return (r.x0 - self.x) * f | Return the distance to a rectangle or another point. | Below is the the instruction that describes the task:
### Input:
Return the distance to a rectangle or another point.
### Response:
def distance_to(self, *args):
"""Return the distance to a rectangle or another point."""
if not len(args) > 0:
raise ValueError("at least one parameter must be given")
x = args[0]
if len(args) > 1:
unit = args[1]
else:
unit = "px"
u = {"px": (1.,1.), "in": (1.,72.), "cm": (2.54, 72.),
"mm": (25.4, 72.)}
f = u[unit][0] / u[unit][1]
if type(x) is Point:
return abs(self - x) * f
# from here on, x is a rectangle
# as a safeguard, make a finite copy of it
r = Rect(x.top_left, x.top_left)
r = r | x.bottom_right
if self in r:
return 0.0
if self.x > r.x1:
if self.y >= r.y1:
return self.distance_to(r.bottom_right, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_right, unit)
else:
return (self.x - r.x1) * f
elif r.x0 <= self.x <= r.x1:
if self.y >= r.y1:
return (self.y - r.y1) * f
else:
return (r.y0 - self.y) * f
else:
if self.y >= r.y1:
return self.distance_to(r.bottom_left, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_left, unit)
else:
return (r.x0 - self.x) * f |
def _write_particle_information(gsd_file, structure, xyz, ref_distance,
ref_mass, ref_energy, rigid_bodies):
"""Write out the particle information.
"""
gsd_file.particles.N = len(structure.atoms)
gsd_file.particles.position = xyz / ref_distance
types = [atom.name if atom.type == '' else atom.type
for atom in structure.atoms]
unique_types = list(set(types))
unique_types.sort(key=natural_sort)
gsd_file.particles.types = unique_types
typeids = np.array([unique_types.index(t) for t in types])
gsd_file.particles.typeid = typeids
masses = np.array([atom.mass for atom in structure.atoms])
masses[masses==0] = 1.0
gsd_file.particles.mass = masses / ref_mass
charges = np.array([atom.charge for atom in structure.atoms])
e0 = 2.39725e-4
'''
Permittivity of free space = 2.39725e-4 e^2/((kcal/mol)(angstrom)),
where e is the elementary charge
'''
charge_factor = (4.0*np.pi*e0*ref_distance*ref_energy)**0.5
gsd_file.particles.charge = charges / charge_factor
if rigid_bodies:
rigid_bodies = [-1 if body is None else body for body in rigid_bodies]
gsd_file.particles.body = rigid_bodies | Write out the particle information. | Below is the the instruction that describes the task:
### Input:
Write out the particle information.
### Response:
def _write_particle_information(gsd_file, structure, xyz, ref_distance,
ref_mass, ref_energy, rigid_bodies):
"""Write out the particle information.
"""
gsd_file.particles.N = len(structure.atoms)
gsd_file.particles.position = xyz / ref_distance
types = [atom.name if atom.type == '' else atom.type
for atom in structure.atoms]
unique_types = list(set(types))
unique_types.sort(key=natural_sort)
gsd_file.particles.types = unique_types
typeids = np.array([unique_types.index(t) for t in types])
gsd_file.particles.typeid = typeids
masses = np.array([atom.mass for atom in structure.atoms])
masses[masses==0] = 1.0
gsd_file.particles.mass = masses / ref_mass
charges = np.array([atom.charge for atom in structure.atoms])
e0 = 2.39725e-4
'''
Permittivity of free space = 2.39725e-4 e^2/((kcal/mol)(angstrom)),
where e is the elementary charge
'''
charge_factor = (4.0*np.pi*e0*ref_distance*ref_energy)**0.5
gsd_file.particles.charge = charges / charge_factor
if rigid_bodies:
rigid_bodies = [-1 if body is None else body for body in rigid_bodies]
gsd_file.particles.body = rigid_bodies |
def random_walk_uniform_fn(scale=1., name=None):
"""Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a uniform perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn')
next_state_parts = [
tf.random.uniform(
minval=state_part - scale_part,
maxval=state_part + scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn | Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm. | Below is the the instruction that describes the task:
### Input:
Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
### Response:
def random_walk_uniform_fn(scale=1., name=None):
"""Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a uniform perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn')
next_state_parts = [
tf.random.uniform(
minval=state_part - scale_part,
maxval=state_part + scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn |
def render(request, templates, dictionary=None, context_instance=None,
**kwargs):
"""
Mimics ``django.shortcuts.render`` but uses a TemplateResponse for
``yacms.core.middleware.TemplateForDeviceMiddleware``
"""
warnings.warn(
"yacms.utils.views.render is deprecated and will be removed "
"in a future version. Please update your project to use Django's "
"TemplateResponse, which now provides equivalent functionality.",
DeprecationWarning
)
dictionary = dictionary or {}
if context_instance:
context_instance.update(dictionary)
else:
context_instance = RequestContext(request, dictionary)
return TemplateResponse(request, templates, context_instance, **kwargs) | Mimics ``django.shortcuts.render`` but uses a TemplateResponse for
``yacms.core.middleware.TemplateForDeviceMiddleware`` | Below is the the instruction that describes the task:
### Input:
Mimics ``django.shortcuts.render`` but uses a TemplateResponse for
``yacms.core.middleware.TemplateForDeviceMiddleware``
### Response:
def render(request, templates, dictionary=None, context_instance=None,
**kwargs):
"""
Mimics ``django.shortcuts.render`` but uses a TemplateResponse for
``yacms.core.middleware.TemplateForDeviceMiddleware``
"""
warnings.warn(
"yacms.utils.views.render is deprecated and will be removed "
"in a future version. Please update your project to use Django's "
"TemplateResponse, which now provides equivalent functionality.",
DeprecationWarning
)
dictionary = dictionary or {}
if context_instance:
context_instance.update(dictionary)
else:
context_instance = RequestContext(request, dictionary)
return TemplateResponse(request, templates, context_instance, **kwargs) |
def list_not_state(subset=None, show_ip=False, show_ipv4=None):
'''
.. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent to minions)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.list_not_state
'''
show_ip = _show_ip_migration(show_ip, show_ipv4)
connected = list_state(subset=None, show_ip=show_ip)
key = salt.key.get_key(__opts__)
keys = key.list_keys()
not_connected = []
for minion in keys[key.ACC]:
if minion not in connected and (subset is None or minion in subset):
not_connected.append(minion)
return not_connected | .. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent to minions)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.list_not_state | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent to minions)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.list_not_state
### Response:
def list_not_state(subset=None, show_ip=False, show_ipv4=None):
'''
.. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent to minions)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.list_not_state
'''
show_ip = _show_ip_migration(show_ip, show_ipv4)
connected = list_state(subset=None, show_ip=show_ip)
key = salt.key.get_key(__opts__)
keys = key.list_keys()
not_connected = []
for minion in keys[key.ACC]:
if minion not in connected and (subset is None or minion in subset):
not_connected.append(minion)
return not_connected |
def compute(self, x_arr, y_arr):
'''
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
'''
return np.linalg.norm(x_arr - y_arr, axis=-1) | Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances. | Below is the the instruction that describes the task:
### Input:
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
### Response:
def compute(self, x_arr, y_arr):
'''
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
'''
return np.linalg.norm(x_arr - y_arr, axis=-1) |
def get_formatted_string(self, input_string):
"""Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type.
"""
if isinstance(input_string, str):
try:
return self.get_processed_string(input_string)
except KeyNotInContextError as err:
# Wrapping the KeyError into a less cryptic error for end-user
# friendliness
raise KeyNotInContextError(
f'Unable to format \'{input_string}\' because {err}'
) from err
elif isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
raise TypeError(f"can only format on strings. {input_string} is a "
f"{type(input_string)} instead.") | Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type. | Below is the the instruction that describes the task:
### Input:
Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type.
### Response:
def get_formatted_string(self, input_string):
"""Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type.
"""
if isinstance(input_string, str):
try:
return self.get_processed_string(input_string)
except KeyNotInContextError as err:
# Wrapping the KeyError into a less cryptic error for end-user
# friendliness
raise KeyNotInContextError(
f'Unable to format \'{input_string}\' because {err}'
) from err
elif isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
raise TypeError(f"can only format on strings. {input_string} is a "
f"{type(input_string)} instead.") |
def convert_random_normal(node, **kwargs):
"""Map MXNet's random_normal operator attributes to onnx's RandomNormal
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to float32
mean = float(attrs.get("loc", 0))
scale = float(attrs.get("scale", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
node = onnx.helper.make_node(
'RandomNormal',
input_nodes,
[name],
mean=mean,
scale=scale,
dtype=dtype,
shape=shape,
name=name
)
return [node] | Map MXNet's random_normal operator attributes to onnx's RandomNormal
operator and return the created node. | Below is the the instruction that describes the task:
### Input:
Map MXNet's random_normal operator attributes to onnx's RandomNormal
operator and return the created node.
### Response:
def convert_random_normal(node, **kwargs):
"""Map MXNet's random_normal operator attributes to onnx's RandomNormal
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to float32
mean = float(attrs.get("loc", 0))
scale = float(attrs.get("scale", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
node = onnx.helper.make_node(
'RandomNormal',
input_nodes,
[name],
mean=mean,
scale=scale,
dtype=dtype,
shape=shape,
name=name
)
return [node] |
def create_from_stack(cls, shape, components, ylims, weights=None):
""" Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if len(components) == 0:
return None
norm_vals, nll_vals, nll_offsets = CastroData_Base.stack_nll(shape,
components,
ylims,
weights)
return cls(norm_vals, nll_vals,
components[0].refSpec,
components[0].norm_type) | Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData` | Below is the the instruction that describes the task:
### Input:
Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData`
### Response:
def create_from_stack(cls, shape, components, ylims, weights=None):
""" Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if len(components) == 0:
return None
norm_vals, nll_vals, nll_offsets = CastroData_Base.stack_nll(shape,
components,
ylims,
weights)
return cls(norm_vals, nll_vals,
components[0].refSpec,
components[0].norm_type) |
def __runTaskMainLoop(self, numIters, learningOffAt=None):
""" Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
"""
## Reset sequence states in the model, so it starts looking for a new
## sequence
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If the process is about to be killed, set as orphaned
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# Turn off learning?
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
# Read input record. Note that any failure here is a critical JOB failure
# and results in the job being immediately canceled and marked as
# failed. The runModelXXX code in hypesearch.utils, if it sees an
# exception of type utils.JobFailException, will cancel the job and
# copy the error message into the job record.
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
# EOF
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
# Process input record
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
# Compute metrics.
result.metrics = self.__metricMgr.update(result)
# If there are None, use defaults. see MetricsManager.getMetrics()
# TODO remove this when JAVA API server is gone
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
# Write the result to the output cache. Don't write encodings, if they
# were computed
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
# Run periodic activities
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
# Input source returned an empty record.
#
# NOTE: This is okay with Stream-based Source (when it times out
# waiting for next record), but not okay with FileSource, which should
# always return either with a valid record or None for EOF.
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord) | Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number | Below is the the instruction that describes the task:
### Input:
Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
### Response:
def __runTaskMainLoop(self, numIters, learningOffAt=None):
""" Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
"""
## Reset sequence states in the model, so it starts looking for a new
## sequence
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If the process is about to be killed, set as orphaned
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# Turn off learning?
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
# Read input record. Note that any failure here is a critical JOB failure
# and results in the job being immediately canceled and marked as
# failed. The runModelXXX code in hypesearch.utils, if it sees an
# exception of type utils.JobFailException, will cancel the job and
# copy the error message into the job record.
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
# EOF
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
# Process input record
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
# Compute metrics.
result.metrics = self.__metricMgr.update(result)
# If there are None, use defaults. see MetricsManager.getMetrics()
# TODO remove this when JAVA API server is gone
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
# Write the result to the output cache. Don't write encodings, if they
# were computed
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
# Run periodic activities
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
# Input source returned an empty record.
#
# NOTE: This is okay with Stream-based Source (when it times out
# waiting for next record), but not okay with FileSource, which should
# always return either with a valid record or None for EOF.
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord) |
def timestamp_with_timezone(dt=None):
"""
Return a timestamp with a timezone for the configured locale. If all else
fails, consider localtime to be UTC.
"""
dt = dt or datetime.now()
if timezone is None:
return dt.strftime('%Y-%m-%d %H:%M%z')
if not dt.tzinfo:
tz = timezone.get_current_timezone()
if not tz:
tz = timezone.utc
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt.strftime("%Y-%m-%d %H:%M%z") | Return a timestamp with a timezone for the configured locale. If all else
fails, consider localtime to be UTC. | Below is the the instruction that describes the task:
### Input:
Return a timestamp with a timezone for the configured locale. If all else
fails, consider localtime to be UTC.
### Response:
def timestamp_with_timezone(dt=None):
"""
Return a timestamp with a timezone for the configured locale. If all else
fails, consider localtime to be UTC.
"""
dt = dt or datetime.now()
if timezone is None:
return dt.strftime('%Y-%m-%d %H:%M%z')
if not dt.tzinfo:
tz = timezone.get_current_timezone()
if not tz:
tz = timezone.utc
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt.strftime("%Y-%m-%d %H:%M%z") |
def dump(self) -> dict:
"""
Dumps data from the ConfigKey into a dict.
:return: The keys and values from the ConfigKey encapsulated in a dict.
"""
d = {}
for item in self.__dict__:
if item in ['parsed', 'dump', 'parse_data', 'iter_list', 'safe_load']:
continue
if isinstance(self.__dict__[item], ConfigKey):
d[item] = self.__dict__[item].dump()
elif isinstance(self.__dict__[item], list):
d[item] = self.iter_list_dump(self.__dict__[item])
else:
d[item] = self.__dict__[item]
return d | Dumps data from the ConfigKey into a dict.
:return: The keys and values from the ConfigKey encapsulated in a dict. | Below is the the instruction that describes the task:
### Input:
Dumps data from the ConfigKey into a dict.
:return: The keys and values from the ConfigKey encapsulated in a dict.
### Response:
def dump(self) -> dict:
"""
Dumps data from the ConfigKey into a dict.
:return: The keys and values from the ConfigKey encapsulated in a dict.
"""
d = {}
for item in self.__dict__:
if item in ['parsed', 'dump', 'parse_data', 'iter_list', 'safe_load']:
continue
if isinstance(self.__dict__[item], ConfigKey):
d[item] = self.__dict__[item].dump()
elif isinstance(self.__dict__[item], list):
d[item] = self.iter_list_dump(self.__dict__[item])
else:
d[item] = self.__dict__[item]
return d |
def endpoint_show(endpoint_id):
"""
Executor for `globus endpoint show`
"""
client = get_client()
res = client.get_endpoint(endpoint_id)
formatted_print(
res,
text_format=FORMAT_TEXT_RECORD,
fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS,
) | Executor for `globus endpoint show` | Below is the the instruction that describes the task:
### Input:
Executor for `globus endpoint show`
### Response:
def endpoint_show(endpoint_id):
"""
Executor for `globus endpoint show`
"""
client = get_client()
res = client.get_endpoint(endpoint_id)
formatted_print(
res,
text_format=FORMAT_TEXT_RECORD,
fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS,
) |
def simple_profile(self, sex=None):
"""
Generates a basic profile with personal informations
"""
SEX = ["F", "M"]
if sex not in SEX:
sex = self.random_element(SEX)
if sex == 'F':
name = self.generator.name_female()
elif sex == 'M':
name = self.generator.name_male()
return {
"username": self.generator.user_name(),
"name": name,
"sex": sex,
"address": self.generator.address(),
"mail": self.generator.free_email(),
"birthdate": self.generator.date_of_birth(),
} | Generates a basic profile with personal informations | Below is the the instruction that describes the task:
### Input:
Generates a basic profile with personal informations
### Response:
def simple_profile(self, sex=None):
"""
Generates a basic profile with personal informations
"""
SEX = ["F", "M"]
if sex not in SEX:
sex = self.random_element(SEX)
if sex == 'F':
name = self.generator.name_female()
elif sex == 'M':
name = self.generator.name_male()
return {
"username": self.generator.user_name(),
"name": name,
"sex": sex,
"address": self.generator.address(),
"mail": self.generator.free_email(),
"birthdate": self.generator.date_of_birth(),
} |
def handle(self):
"""Request handler for a single Pailgun request."""
# Parse the Nailgun request portion.
_, _, arguments, environment = NailgunProtocol.parse_request(self.request)
# N.B. the first and second nailgun request arguments (working_dir and command) are currently
# ignored in favor of a get_buildroot() call within LocalPantsRunner.run() and an assumption
# that anyone connecting to this nailgun server always intends to run pants itself.
# Prepend the command to our arguments so it aligns with the expected sys.argv format of python
# (e.g. [list', '::'] -> ['./pants', 'list', '::']).
arguments.insert(0, './pants')
self.logger.info('handling pailgun request: `{}`'.format(' '.join(arguments)))
self.logger.debug('pailgun request environment: %s', environment)
# Execute the requested command with optional daemon-side profiling.
with maybe_profiled(environment.get('PANTSD_PROFILE')):
self._run_pants(self.request, arguments, environment)
# NB: This represents the end of pantsd's involvement in the request, but the request will
# continue to run post-fork.
self.logger.info('pailgun request completed: `{}`'.format(' '.join(arguments))) | Request handler for a single Pailgun request. | Below is the the instruction that describes the task:
### Input:
Request handler for a single Pailgun request.
### Response:
def handle(self):
"""Request handler for a single Pailgun request."""
# Parse the Nailgun request portion.
_, _, arguments, environment = NailgunProtocol.parse_request(self.request)
# N.B. the first and second nailgun request arguments (working_dir and command) are currently
# ignored in favor of a get_buildroot() call within LocalPantsRunner.run() and an assumption
# that anyone connecting to this nailgun server always intends to run pants itself.
# Prepend the command to our arguments so it aligns with the expected sys.argv format of python
# (e.g. [list', '::'] -> ['./pants', 'list', '::']).
arguments.insert(0, './pants')
self.logger.info('handling pailgun request: `{}`'.format(' '.join(arguments)))
self.logger.debug('pailgun request environment: %s', environment)
# Execute the requested command with optional daemon-side profiling.
with maybe_profiled(environment.get('PANTSD_PROFILE')):
self._run_pants(self.request, arguments, environment)
# NB: This represents the end of pantsd's involvement in the request, but the request will
# continue to run post-fork.
self.logger.info('pailgun request completed: `{}`'.format(' '.join(arguments))) |
def connect(self):
"""
Connects to Scratch.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.host, self.port))
except socket.error as (err, msg):
self.connected = False
raise ScratchError("[Errno %d] %s" % (err, msg))
self.connected = True | Connects to Scratch. | Below is the the instruction that describes the task:
### Input:
Connects to Scratch.
### Response:
def connect(self):
"""
Connects to Scratch.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.host, self.port))
except socket.error as (err, msg):
self.connected = False
raise ScratchError("[Errno %d] %s" % (err, msg))
self.connected = True |
def load_names(self):
# type: () -> None
""" Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/
"""
self.all_male_first_names = load_csv_data('male-first-names.csv')
self.all_female_first_names = load_csv_data('female-first-names.csv')
self.all_last_names = load_csv_data('CSV_Database_of_Last_Names.csv') | Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/ | Below is the the instruction that describes the task:
### Input:
Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/
### Response:
def load_names(self):
# type: () -> None
""" Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/
"""
self.all_male_first_names = load_csv_data('male-first-names.csv')
self.all_female_first_names = load_csv_data('female-first-names.csv')
self.all_last_names = load_csv_data('CSV_Database_of_Last_Names.csv') |
def fetch(self):
"""
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CountryInstance(self._version, payload, iso_code=self._solution['iso_code'], ) | Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance | Below is the the instruction that describes the task:
### Input:
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
### Response:
def fetch(self):
"""
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CountryInstance(self._version, payload, iso_code=self._solution['iso_code'], ) |
def get_size_at_time(self, timestamp):
"""
Get the size of the object at a specific time (snapshot).
If the object was not alive/sized at that instant, return 0.
"""
size = 0
for (t, s) in self.snapshots:
if t == timestamp:
size = s.size
return size | Get the size of the object at a specific time (snapshot).
If the object was not alive/sized at that instant, return 0. | Below is the the instruction that describes the task:
### Input:
Get the size of the object at a specific time (snapshot).
If the object was not alive/sized at that instant, return 0.
### Response:
def get_size_at_time(self, timestamp):
"""
Get the size of the object at a specific time (snapshot).
If the object was not alive/sized at that instant, return 0.
"""
size = 0
for (t, s) in self.snapshots:
if t == timestamp:
size = s.size
return size |
def __create_file(self):
"""
Open text file. Write one section at a time. Close text file. Move completed file to dir_root/noaa/
:return none:
"""
logger_lpd_noaa.info("enter create_file")
self.__get_output_filenames()
for idx, filename in enumerate(self.output_filenames):
try:
# self.noaa_txt = open(os.path.join(self.path, filename), "w+")
# self.noaa_file_output[filename] = ""
# self.noaa_txt = self.noaa_file_output[filename]
self.noaa_txt = ""
print("writing: {}".format(filename))
logger_lpd_noaa.info("write_file: opened output txt file")
except Exception as e:
logger_lpd_noaa.error("write_file: failed to open output txt file, {}".format(e))
return
self.__get_max_min_time_1(self.noaa_data_sorted["Data"][idx]["paleo"])
self.__check_time_values()
self.__write_top(filename)
self.__write_generic('Contribution_Date')
self.__write_generic('File_Last_Modified_Date')
self.__write_generic('Title')
self.__write_generic('Investigators')
self.__write_generic('Description_Notes_and_Keywords')
self.__write_pub()
self.__write_funding()
self.__write_geo()
self.__write_generic('Data_Collection')
self.__write_generic('Species')
self.__write_data(idx)
self.noaa_file_output[filename] = self.noaa_txt
# logger_lpd_noaa.info("closed output text file")
# reset the max min time unit to none
self.max_min_time = {"min": "", "max": "", "time": ""}
# shutil.copy(os.path.join(os.getcwd(), filename), self.dir_root)
logger_lpd_noaa.info("exit create_file")
return | Open text file. Write one section at a time. Close text file. Move completed file to dir_root/noaa/
:return none: | Below is the the instruction that describes the task:
### Input:
Open text file. Write one section at a time. Close text file. Move completed file to dir_root/noaa/
:return none:
### Response:
def __create_file(self):
"""
Open text file. Write one section at a time. Close text file. Move completed file to dir_root/noaa/
:return none:
"""
logger_lpd_noaa.info("enter create_file")
self.__get_output_filenames()
for idx, filename in enumerate(self.output_filenames):
try:
# self.noaa_txt = open(os.path.join(self.path, filename), "w+")
# self.noaa_file_output[filename] = ""
# self.noaa_txt = self.noaa_file_output[filename]
self.noaa_txt = ""
print("writing: {}".format(filename))
logger_lpd_noaa.info("write_file: opened output txt file")
except Exception as e:
logger_lpd_noaa.error("write_file: failed to open output txt file, {}".format(e))
return
self.__get_max_min_time_1(self.noaa_data_sorted["Data"][idx]["paleo"])
self.__check_time_values()
self.__write_top(filename)
self.__write_generic('Contribution_Date')
self.__write_generic('File_Last_Modified_Date')
self.__write_generic('Title')
self.__write_generic('Investigators')
self.__write_generic('Description_Notes_and_Keywords')
self.__write_pub()
self.__write_funding()
self.__write_geo()
self.__write_generic('Data_Collection')
self.__write_generic('Species')
self.__write_data(idx)
self.noaa_file_output[filename] = self.noaa_txt
# logger_lpd_noaa.info("closed output text file")
# reset the max min time unit to none
self.max_min_time = {"min": "", "max": "", "time": ""}
# shutil.copy(os.path.join(os.getcwd(), filename), self.dir_root)
logger_lpd_noaa.info("exit create_file")
return |
def get_domain_class_terminal_attribute_iterator(ent):
"""
Returns an iterator over all terminal attributes in the given registered
resource.
"""
for attr in itervalues_(ent.__everest_attributes__):
if attr.kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL:
yield attr | Returns an iterator over all terminal attributes in the given registered
resource. | Below is the the instruction that describes the task:
### Input:
Returns an iterator over all terminal attributes in the given registered
resource.
### Response:
def get_domain_class_terminal_attribute_iterator(ent):
"""
Returns an iterator over all terminal attributes in the given registered
resource.
"""
for attr in itervalues_(ent.__everest_attributes__):
if attr.kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL:
yield attr |
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps' | Add Builders and construction variables for dvips to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for dvips to an Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps' |
def addPendingResult( self, ps, jobid ):
"""Add a "pending" result that we expect to get results for.
:param ps: the parameters for the result
:param jobid: an identifier for the pending result"""
k = self._parametersAsIndex(ps)
# retrieve or create the result list
if k in self._results.keys():
rs = self._results[k]
else:
rs = []
self._results[k] = rs
# append the pending result's jobid
rs.insert(0, jobid)
# map job id to parameters to which it refers
self._pending[jobid] = k | Add a "pending" result that we expect to get results for.
:param ps: the parameters for the result
:param jobid: an identifier for the pending result | Below is the the instruction that describes the task:
### Input:
Add a "pending" result that we expect to get results for.
:param ps: the parameters for the result
:param jobid: an identifier for the pending result
### Response:
def addPendingResult( self, ps, jobid ):
"""Add a "pending" result that we expect to get results for.
:param ps: the parameters for the result
:param jobid: an identifier for the pending result"""
k = self._parametersAsIndex(ps)
# retrieve or create the result list
if k in self._results.keys():
rs = self._results[k]
else:
rs = []
self._results[k] = rs
# append the pending result's jobid
rs.insert(0, jobid)
# map job id to parameters to which it refers
self._pending[jobid] = k |
def supports_py3(project_name):
"""Check with PyPI if a project supports Python 3."""
log = logging.getLogger("ciu")
log.info("Checking {} ...".format(project_name))
request = requests.get("https://pypi.org/pypi/{}/json".format(project_name))
if request.status_code >= 400:
log = logging.getLogger("ciu")
log.warning("problem fetching {}, assuming ported ({})".format(
project_name, request.status_code))
return True
response = request.json()
return any(c.startswith("Programming Language :: Python :: 3")
for c in response["info"]["classifiers"]) | Check with PyPI if a project supports Python 3. | Below is the the instruction that describes the task:
### Input:
Check with PyPI if a project supports Python 3.
### Response:
def supports_py3(project_name):
"""Check with PyPI if a project supports Python 3."""
log = logging.getLogger("ciu")
log.info("Checking {} ...".format(project_name))
request = requests.get("https://pypi.org/pypi/{}/json".format(project_name))
if request.status_code >= 400:
log = logging.getLogger("ciu")
log.warning("problem fetching {}, assuming ported ({})".format(
project_name, request.status_code))
return True
response = request.json()
return any(c.startswith("Programming Language :: Python :: 3")
for c in response["info"]["classifiers"]) |
def writePlistToString(rootObject):
'''Return 'rootObject' as a plist-formatted string.'''
plistData, error = (
NSPropertyListSerialization.
dataFromPropertyList_format_errorDescription_(
rootObject, NSPropertyListXMLFormat_v1_0, None))
if plistData is None:
if error:
error = error.encode('ascii', 'ignore')
else:
error = "Unknown error"
raise NSPropertyListSerializationException(error)
else:
return str(plistData) | Return 'rootObject' as a plist-formatted string. | Below is the the instruction that describes the task:
### Input:
Return 'rootObject' as a plist-formatted string.
### Response:
def writePlistToString(rootObject):
'''Return 'rootObject' as a plist-formatted string.'''
plistData, error = (
NSPropertyListSerialization.
dataFromPropertyList_format_errorDescription_(
rootObject, NSPropertyListXMLFormat_v1_0, None))
if plistData is None:
if error:
error = error.encode('ascii', 'ignore')
else:
error = "Unknown error"
raise NSPropertyListSerializationException(error)
else:
return str(plistData) |
def search(self, search_phrase, limit=None):
""" Finds partitions by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to generate. None means without limit.
Yields:
PartitionSearchResult instances.
"""
query_string = self._make_query_from_terms(search_phrase)
self._parsed_query = query_string
schema = self._get_generic_schema()
parser = QueryParser('doc', schema=schema)
query = parser.parse(query_string)
logger.debug('Searching partitions using `{}` query.'.format(query))
with self.index.searcher() as searcher:
results = searcher.search(query, limit=limit)
for hit in results:
yield PartitionSearchResult(
vid=hit['vid'], dataset_vid=hit['dataset_vid'], score=hit.score) | Finds partitions by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to generate. None means without limit.
Yields:
PartitionSearchResult instances. | Below is the the instruction that describes the task:
### Input:
Finds partitions by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to generate. None means without limit.
Yields:
PartitionSearchResult instances.
### Response:
def search(self, search_phrase, limit=None):
""" Finds partitions by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to generate. None means without limit.
Yields:
PartitionSearchResult instances.
"""
query_string = self._make_query_from_terms(search_phrase)
self._parsed_query = query_string
schema = self._get_generic_schema()
parser = QueryParser('doc', schema=schema)
query = parser.parse(query_string)
logger.debug('Searching partitions using `{}` query.'.format(query))
with self.index.searcher() as searcher:
results = searcher.search(query, limit=limit)
for hit in results:
yield PartitionSearchResult(
vid=hit['vid'], dataset_vid=hit['dataset_vid'], score=hit.score) |
def save_photon_hdf5(self, identity=None, overwrite=True, path=None):
"""Create a smFRET Photon-HDF5 file with current timestamps."""
filepath = self.filepath
if path is not None:
filepath = Path(path, filepath.name)
self.merge_da()
data = self._make_photon_hdf5(identity=identity)
phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath),
overwrite=overwrite) | Create a smFRET Photon-HDF5 file with current timestamps. | Below is the the instruction that describes the task:
### Input:
Create a smFRET Photon-HDF5 file with current timestamps.
### Response:
def save_photon_hdf5(self, identity=None, overwrite=True, path=None):
"""Create a smFRET Photon-HDF5 file with current timestamps."""
filepath = self.filepath
if path is not None:
filepath = Path(path, filepath.name)
self.merge_da()
data = self._make_photon_hdf5(identity=identity)
phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath),
overwrite=overwrite) |
def dskp02(handle, dladsc, start, room):
"""
Fetch triangular plates from a type 2 DSK segment.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskp02_c.html
:param handle: DSK file handle.
:type handle: int
:param dladsc: DLA descriptor.
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:param start: Start index.
:type start: int
:param room: Amount of room in output array.
:type room: int
:return: Array containing plates.
"""
handle = ctypes.c_int(handle)
start = ctypes.c_int(start)
room = ctypes.c_int(room)
n = ctypes.c_int(0)
plates = stypes.emptyIntMatrix(3, room)
libspice.dskp02_c(handle, dladsc, start, room, ctypes.byref(n), plates)
return stypes.cMatrixToNumpy(plates) | Fetch triangular plates from a type 2 DSK segment.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskp02_c.html
:param handle: DSK file handle.
:type handle: int
:param dladsc: DLA descriptor.
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:param start: Start index.
:type start: int
:param room: Amount of room in output array.
:type room: int
:return: Array containing plates. | Below is the the instruction that describes the task:
### Input:
Fetch triangular plates from a type 2 DSK segment.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskp02_c.html
:param handle: DSK file handle.
:type handle: int
:param dladsc: DLA descriptor.
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:param start: Start index.
:type start: int
:param room: Amount of room in output array.
:type room: int
:return: Array containing plates.
### Response:
def dskp02(handle, dladsc, start, room):
"""
Fetch triangular plates from a type 2 DSK segment.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskp02_c.html
:param handle: DSK file handle.
:type handle: int
:param dladsc: DLA descriptor.
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:param start: Start index.
:type start: int
:param room: Amount of room in output array.
:type room: int
:return: Array containing plates.
"""
handle = ctypes.c_int(handle)
start = ctypes.c_int(start)
room = ctypes.c_int(room)
n = ctypes.c_int(0)
plates = stypes.emptyIntMatrix(3, room)
libspice.dskp02_c(handle, dladsc, start, room, ctypes.byref(n), plates)
return stypes.cMatrixToNumpy(plates) |
def step(self, actions):
"""Takes a step in all environments.
Subclasses should override _step to do the actual reset if something other
than the default implementation is desired.
Args:
actions: Batch of actions.
Returns:
(preprocessed_observations, processed_rewards, dones, infos).
"""
observations, raw_rewards, dones, infos = self._step(actions)
# Process rewards.
raw_rewards = raw_rewards.astype(np.float32)
processed_rewards = self.process_rewards(raw_rewards)
# Process observations.
processed_observations = self.process_observations(observations)
# Record history.
self.trajectories.step(processed_observations, raw_rewards,
processed_rewards, dones, actions)
return processed_observations, processed_rewards, dones, infos | Takes a step in all environments.
Subclasses should override _step to do the actual reset if something other
than the default implementation is desired.
Args:
actions: Batch of actions.
Returns:
(preprocessed_observations, processed_rewards, dones, infos). | Below is the the instruction that describes the task:
### Input:
Takes a step in all environments.
Subclasses should override _step to do the actual reset if something other
than the default implementation is desired.
Args:
actions: Batch of actions.
Returns:
(preprocessed_observations, processed_rewards, dones, infos).
### Response:
def step(self, actions):
"""Takes a step in all environments.
Subclasses should override _step to do the actual reset if something other
than the default implementation is desired.
Args:
actions: Batch of actions.
Returns:
(preprocessed_observations, processed_rewards, dones, infos).
"""
observations, raw_rewards, dones, infos = self._step(actions)
# Process rewards.
raw_rewards = raw_rewards.astype(np.float32)
processed_rewards = self.process_rewards(raw_rewards)
# Process observations.
processed_observations = self.process_observations(observations)
# Record history.
self.trajectories.step(processed_observations, raw_rewards,
processed_rewards, dones, actions)
return processed_observations, processed_rewards, dones, infos |
def _interpret_hits(self, results_list, base_list):
'''Sort reads that hit multiple HMMs to the databases to which they had
the highest bit score. Return a dictionary containing HMMs as keys, and
number of hits as the values.
This function is set up so that the read names could easily be returned
instead of numbers, for future development of GraftM
Parameters
----------
results_list: list
Iterable if SequenceSearchResult objects. e.g.
[SequenceSearchResult_1, SequenceSearchResult_2, ...]
base_list: list
Iterable of the basenames for each sequence file provided to graftM
e.g.
[sample_1, sample_2, ...]
Returns
-------
dictionary:
Contains samples as entries. The value for each sample is another
dictionary with HMM as the key, and number of hits as values:
{"sample_1":{HMM_1: 12
HMM_2: 35
HMM_3: 1258
...}
...
}
'''
logging.debug("Sorting reads into HMMs by bit score")
run_results = {}
########################################################################
################## - Sort reads to best hit db - #######################
for base, results in zip(base_list, results_list): # For each sample
search_results = {}
for search in results():
search_list = list(
search.each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.ALIGNMENT_BIT_SCORE,
SequenceSearchResult.HMM_NAME_FIELD])
)
for hit in search_list:
if hit[0] in search_results:
if float(hit[1]) > search_results[hit[0]][1]:
search_results[hit[0]] = [float(hit[1]), hit[2]]
else:
search_results[hit[0]] = [float(hit[1]), hit[2]]
run_results[base] = search_results
########################################################################
################## - Gather counts for each db - #######################
db_count = {}
for run in run_results.keys():
run_count = {}
for entry in run_results[run].values():
key = entry[1]
if key in run_count:
run_count[key] += 1
else:
run_count[key] = 1
db_count[run] = run_count
return db_count | Sort reads that hit multiple HMMs to the databases to which they had
the highest bit score. Return a dictionary containing HMMs as keys, and
number of hits as the values.
This function is set up so that the read names could easily be returned
instead of numbers, for future development of GraftM
Parameters
----------
results_list: list
Iterable if SequenceSearchResult objects. e.g.
[SequenceSearchResult_1, SequenceSearchResult_2, ...]
base_list: list
Iterable of the basenames for each sequence file provided to graftM
e.g.
[sample_1, sample_2, ...]
Returns
-------
dictionary:
Contains samples as entries. The value for each sample is another
dictionary with HMM as the key, and number of hits as values:
{"sample_1":{HMM_1: 12
HMM_2: 35
HMM_3: 1258
...}
...
} | Below is the the instruction that describes the task:
### Input:
Sort reads that hit multiple HMMs to the databases to which they had
the highest bit score. Return a dictionary containing HMMs as keys, and
number of hits as the values.
This function is set up so that the read names could easily be returned
instead of numbers, for future development of GraftM
Parameters
----------
results_list: list
Iterable if SequenceSearchResult objects. e.g.
[SequenceSearchResult_1, SequenceSearchResult_2, ...]
base_list: list
Iterable of the basenames for each sequence file provided to graftM
e.g.
[sample_1, sample_2, ...]
Returns
-------
dictionary:
Contains samples as entries. The value for each sample is another
dictionary with HMM as the key, and number of hits as values:
{"sample_1":{HMM_1: 12
HMM_2: 35
HMM_3: 1258
...}
...
}
### Response:
def _interpret_hits(self, results_list, base_list):
'''Sort reads that hit multiple HMMs to the databases to which they had
the highest bit score. Return a dictionary containing HMMs as keys, and
number of hits as the values.
This function is set up so that the read names could easily be returned
instead of numbers, for future development of GraftM
Parameters
----------
results_list: list
Iterable if SequenceSearchResult objects. e.g.
[SequenceSearchResult_1, SequenceSearchResult_2, ...]
base_list: list
Iterable of the basenames for each sequence file provided to graftM
e.g.
[sample_1, sample_2, ...]
Returns
-------
dictionary:
Contains samples as entries. The value for each sample is another
dictionary with HMM as the key, and number of hits as values:
{"sample_1":{HMM_1: 12
HMM_2: 35
HMM_3: 1258
...}
...
}
'''
logging.debug("Sorting reads into HMMs by bit score")
run_results = {}
########################################################################
################## - Sort reads to best hit db - #######################
for base, results in zip(base_list, results_list): # For each sample
search_results = {}
for search in results():
search_list = list(
search.each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.ALIGNMENT_BIT_SCORE,
SequenceSearchResult.HMM_NAME_FIELD])
)
for hit in search_list:
if hit[0] in search_results:
if float(hit[1]) > search_results[hit[0]][1]:
search_results[hit[0]] = [float(hit[1]), hit[2]]
else:
search_results[hit[0]] = [float(hit[1]), hit[2]]
run_results[base] = search_results
########################################################################
################## - Gather counts for each db - #######################
db_count = {}
for run in run_results.keys():
run_count = {}
for entry in run_results[run].values():
key = entry[1]
if key in run_count:
run_count[key] += 1
else:
run_count[key] = 1
db_count[run] = run_count
return db_count |
def dP_cone_meter(D, Dc, P1, P2):
r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return (1.09 - 0.813*beta)*dP | r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
### Response:
def dP_cone_meter(D, Dc, P1, P2):
r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return (1.09 - 0.813*beta)*dP |
def fill(self, value=b'\xff'):
"""Fill all empty space between segments with given value `value`.
"""
previous_segment_maximum_address = None
fill_segments = []
for address, data in self._segments:
maximum_address = address + len(data)
if previous_segment_maximum_address is not None:
fill_size = address - previous_segment_maximum_address
fill_size_words = fill_size // self.word_size_bytes
fill_segments.append(_Segment(
previous_segment_maximum_address,
previous_segment_maximum_address + fill_size,
value * fill_size_words,
self.word_size_bytes))
previous_segment_maximum_address = maximum_address
for segment in fill_segments:
self._segments.add(segment) | Fill all empty space between segments with given value `value`. | Below is the the instruction that describes the task:
### Input:
Fill all empty space between segments with given value `value`.
### Response:
def fill(self, value=b'\xff'):
"""Fill all empty space between segments with given value `value`.
"""
previous_segment_maximum_address = None
fill_segments = []
for address, data in self._segments:
maximum_address = address + len(data)
if previous_segment_maximum_address is not None:
fill_size = address - previous_segment_maximum_address
fill_size_words = fill_size // self.word_size_bytes
fill_segments.append(_Segment(
previous_segment_maximum_address,
previous_segment_maximum_address + fill_size,
value * fill_size_words,
self.word_size_bytes))
previous_segment_maximum_address = maximum_address
for segment in fill_segments:
self._segments.add(segment) |
def _read_opt_rpl(self, code, *, desc):
"""Read HOPOPT RPL option.
Structure of HOPOPT RPL option [RFC 6553]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Type | Opt Data Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|O|R|F|0|0|0|0|0| RPLInstanceID | SenderRank |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (sub-TLVs) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.rpl.type Option Type
0 0 hopopt.rpl.type.value Option Number
0 0 hopopt.rpl.type.action Action (01)
0 2 hopopt.rpl.type.change Change Flag (1)
1 8 hopopt.rpl.length Length of Option Data
2 16 hopopt.rpl.flags RPL Option Flags
2 16 hopopt.rpl.flags.down Down Flag
2 17 hopopt.rpl.flags.rank_error Rank-Error Flag
2 18 hopopt.rpl.flags.fwd_error Forwarding-Error Flag
3 24 hopopt.rpl.id RPLInstanceID
4 32 hopopt.rpl.rank SenderRank
6 48 hopopt.rpl.data Sub-TLVs
"""
_type = self._read_opt_type(code)
_size = self._read_unpack(1)
if _size < 4:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
_flag = self._read_binary(1)
_rpld = self._read_unpack(1)
_rank = self._read_unpack(2)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
flags=dict(
down=True if int(_flag[0], base=2) else False,
rank_error=True if int(_flag[1], base=2) else False,
fwd_error=True if int(_flag[2], base=2) else False,
),
id=_rpld,
rank=_rank,
)
if _size > 4:
opt['data'] = self._read_fileng(_size-4)
return opt | Read HOPOPT RPL option.
Structure of HOPOPT RPL option [RFC 6553]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Type | Opt Data Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|O|R|F|0|0|0|0|0| RPLInstanceID | SenderRank |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (sub-TLVs) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.rpl.type Option Type
0 0 hopopt.rpl.type.value Option Number
0 0 hopopt.rpl.type.action Action (01)
0 2 hopopt.rpl.type.change Change Flag (1)
1 8 hopopt.rpl.length Length of Option Data
2 16 hopopt.rpl.flags RPL Option Flags
2 16 hopopt.rpl.flags.down Down Flag
2 17 hopopt.rpl.flags.rank_error Rank-Error Flag
2 18 hopopt.rpl.flags.fwd_error Forwarding-Error Flag
3 24 hopopt.rpl.id RPLInstanceID
4 32 hopopt.rpl.rank SenderRank
6 48 hopopt.rpl.data Sub-TLVs | Below is the the instruction that describes the task:
### Input:
Read HOPOPT RPL option.
Structure of HOPOPT RPL option [RFC 6553]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Type | Opt Data Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|O|R|F|0|0|0|0|0| RPLInstanceID | SenderRank |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (sub-TLVs) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.rpl.type Option Type
0 0 hopopt.rpl.type.value Option Number
0 0 hopopt.rpl.type.action Action (01)
0 2 hopopt.rpl.type.change Change Flag (1)
1 8 hopopt.rpl.length Length of Option Data
2 16 hopopt.rpl.flags RPL Option Flags
2 16 hopopt.rpl.flags.down Down Flag
2 17 hopopt.rpl.flags.rank_error Rank-Error Flag
2 18 hopopt.rpl.flags.fwd_error Forwarding-Error Flag
3 24 hopopt.rpl.id RPLInstanceID
4 32 hopopt.rpl.rank SenderRank
6 48 hopopt.rpl.data Sub-TLVs
### Response:
def _read_opt_rpl(self, code, *, desc):
"""Read HOPOPT RPL option.
Structure of HOPOPT RPL option [RFC 6553]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Type | Opt Data Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|O|R|F|0|0|0|0|0| RPLInstanceID | SenderRank |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (sub-TLVs) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.rpl.type Option Type
0 0 hopopt.rpl.type.value Option Number
0 0 hopopt.rpl.type.action Action (01)
0 2 hopopt.rpl.type.change Change Flag (1)
1 8 hopopt.rpl.length Length of Option Data
2 16 hopopt.rpl.flags RPL Option Flags
2 16 hopopt.rpl.flags.down Down Flag
2 17 hopopt.rpl.flags.rank_error Rank-Error Flag
2 18 hopopt.rpl.flags.fwd_error Forwarding-Error Flag
3 24 hopopt.rpl.id RPLInstanceID
4 32 hopopt.rpl.rank SenderRank
6 48 hopopt.rpl.data Sub-TLVs
"""
_type = self._read_opt_type(code)
_size = self._read_unpack(1)
if _size < 4:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
_flag = self._read_binary(1)
_rpld = self._read_unpack(1)
_rank = self._read_unpack(2)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
flags=dict(
down=True if int(_flag[0], base=2) else False,
rank_error=True if int(_flag[1], base=2) else False,
fwd_error=True if int(_flag[2], base=2) else False,
),
id=_rpld,
rank=_rank,
)
if _size > 4:
opt['data'] = self._read_fileng(_size-4)
return opt |
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day) | Return a new date with new values for the specified fields. | Below is the the instruction that describes the task:
### Input:
Return a new date with new values for the specified fields.
### Response:
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day) |
def merged_cell_ranges(self):
"""Generates the sequence of merged cell ranges in the format:
((col_low, row_low), (col_hi, row_hi))
"""
for rlo, rhi, clo, chi in self.raw_sheet.merged_cells:
yield ((clo, rlo), (chi, rhi)) | Generates the sequence of merged cell ranges in the format:
((col_low, row_low), (col_hi, row_hi)) | Below is the the instruction that describes the task:
### Input:
Generates the sequence of merged cell ranges in the format:
((col_low, row_low), (col_hi, row_hi))
### Response:
def merged_cell_ranges(self):
"""Generates the sequence of merged cell ranges in the format:
((col_low, row_low), (col_hi, row_hi))
"""
for rlo, rhi, clo, chi in self.raw_sheet.merged_cells:
yield ((clo, rlo), (chi, rhi)) |
def pprint(self, stream=None, indent=1, width=80, depth=None):
"""
Pretty print the underlying literal Python object
"""
pp.pprint(to_literal(self), stream, indent, width, depth) | Pretty print the underlying literal Python object | Below is the the instruction that describes the task:
### Input:
Pretty print the underlying literal Python object
### Response:
def pprint(self, stream=None, indent=1, width=80, depth=None):
"""
Pretty print the underlying literal Python object
"""
pp.pprint(to_literal(self), stream, indent, width, depth) |
def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes) | Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane | Below is the the instruction that describes the task:
### Input:
Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
### Response:
def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes) |
def _renamer(self, tre):
""" renames newick from numbers to sample names"""
## get the tre with numbered tree tip labels
names = tre.get_leaves()
## replace numbered names with snames
for name in names:
name.name = self.samples[int(name.name)]
## return with only topology and leaf labels
return tre.write(format=9) | renames newick from numbers to sample names | Below is the the instruction that describes the task:
### Input:
renames newick from numbers to sample names
### Response:
def _renamer(self, tre):
""" renames newick from numbers to sample names"""
## get the tre with numbered tree tip labels
names = tre.get_leaves()
## replace numbered names with snames
for name in names:
name.name = self.samples[int(name.name)]
## return with only topology and leaf labels
return tre.write(format=9) |
async def run(self, command, timeout=None):
"""Run command on this unit.
:param str command: The command to run
:param int timeout: Time, in seconds, to wait before command is
considered failed
:returns: A :class:`juju.action.Action` instance.
"""
action = client.ActionFacade.from_connection(self.connection)
log.debug(
'Running `%s` on %s', command, self.name)
if timeout:
# Convert seconds to nanoseconds
timeout = int(timeout * 1000000000)
res = await action.Run(
[],
command,
[],
timeout,
[self.name],
)
return await self.model.wait_for_action(res.results[0].action.tag) | Run command on this unit.
:param str command: The command to run
:param int timeout: Time, in seconds, to wait before command is
considered failed
:returns: A :class:`juju.action.Action` instance. | Below is the the instruction that describes the task:
### Input:
Run command on this unit.
:param str command: The command to run
:param int timeout: Time, in seconds, to wait before command is
considered failed
:returns: A :class:`juju.action.Action` instance.
### Response:
async def run(self, command, timeout=None):
"""Run command on this unit.
:param str command: The command to run
:param int timeout: Time, in seconds, to wait before command is
considered failed
:returns: A :class:`juju.action.Action` instance.
"""
action = client.ActionFacade.from_connection(self.connection)
log.debug(
'Running `%s` on %s', command, self.name)
if timeout:
# Convert seconds to nanoseconds
timeout = int(timeout * 1000000000)
res = await action.Run(
[],
command,
[],
timeout,
[self.name],
)
return await self.model.wait_for_action(res.results[0].action.tag) |
def _get_site_term(self, C, vs30):
"""
Returns only a linear site amplification term
"""
dg1, dg2 = self._get_regional_site_term(C)
return (C["g1"] + dg1) + (C["g2"] + dg2) * np.log(vs30) | Returns only a linear site amplification term | Below is the the instruction that describes the task:
### Input:
Returns only a linear site amplification term
### Response:
def _get_site_term(self, C, vs30):
"""
Returns only a linear site amplification term
"""
dg1, dg2 = self._get_regional_site_term(C)
return (C["g1"] + dg1) + (C["g2"] + dg2) * np.log(vs30) |
def close_poll(
self,
chat_id: Union[int, str],
message_id: id
) -> bool:
"""Use this method to close (stop) a poll.
Closed polls can't be reopened and nobody will be able to vote in it anymore.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Unique poll message identifier inside this chat.
Returns:
On success, True is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
poll = self.get_messages(chat_id, message_id).poll
self.send(
functions.messages.EditMessage(
peer=self.resolve_peer(chat_id),
id=message_id,
media=types.InputMediaPoll(
poll=types.Poll(
id=poll.id,
closed=True,
question="",
answers=[]
)
)
)
)
return True | Use this method to close (stop) a poll.
Closed polls can't be reopened and nobody will be able to vote in it anymore.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Unique poll message identifier inside this chat.
Returns:
On success, True is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. | Below is the the instruction that describes the task:
### Input:
Use this method to close (stop) a poll.
Closed polls can't be reopened and nobody will be able to vote in it anymore.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Unique poll message identifier inside this chat.
Returns:
On success, True is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
### Response:
def close_poll(
self,
chat_id: Union[int, str],
message_id: id
) -> bool:
"""Use this method to close (stop) a poll.
Closed polls can't be reopened and nobody will be able to vote in it anymore.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Unique poll message identifier inside this chat.
Returns:
On success, True is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
poll = self.get_messages(chat_id, message_id).poll
self.send(
functions.messages.EditMessage(
peer=self.resolve_peer(chat_id),
id=message_id,
media=types.InputMediaPoll(
poll=types.Poll(
id=poll.id,
closed=True,
question="",
answers=[]
)
)
)
)
return True |
def _get_credentials(username=None, password=None, dbhost=None):
"""Obtain user credentials by arguments or asking the user"""
# Database salt
system_config = dbhost.objectmodels['systemconfig'].find_one({
'active': True
})
try:
salt = system_config.salt.encode('ascii')
except (KeyError, AttributeError):
log('No systemconfig or it is without a salt! '
'Reinstall the system provisioning with'
'hfos_manage.py install provisions -p system')
sys.exit(3)
if username is None:
username = _ask("Please enter username: ")
else:
username = username
if password is None:
password = _ask_password()
else:
password = password
try:
password = password.encode('utf-8')
except UnicodeDecodeError:
password = password
passhash = hashlib.sha512(password)
passhash.update(salt)
return username, passhash.hexdigest() | Obtain user credentials by arguments or asking the user | Below is the the instruction that describes the task:
### Input:
Obtain user credentials by arguments or asking the user
### Response:
def _get_credentials(username=None, password=None, dbhost=None):
"""Obtain user credentials by arguments or asking the user"""
# Database salt
system_config = dbhost.objectmodels['systemconfig'].find_one({
'active': True
})
try:
salt = system_config.salt.encode('ascii')
except (KeyError, AttributeError):
log('No systemconfig or it is without a salt! '
'Reinstall the system provisioning with'
'hfos_manage.py install provisions -p system')
sys.exit(3)
if username is None:
username = _ask("Please enter username: ")
else:
username = username
if password is None:
password = _ask_password()
else:
password = password
try:
password = password.encode('utf-8')
except UnicodeDecodeError:
password = password
passhash = hashlib.sha512(password)
passhash.update(salt)
return username, passhash.hexdigest() |
def has_default_file(cls):
"""Check if a configuration file exists."""
for filename in cls.config_files:
for searchpath in cls.config_searchpath:
path = os.path.join(searchpath, filename)
if os.path.exists(path):
return True
return False | Check if a configuration file exists. | Below is the the instruction that describes the task:
### Input:
Check if a configuration file exists.
### Response:
def has_default_file(cls):
"""Check if a configuration file exists."""
for filename in cls.config_files:
for searchpath in cls.config_searchpath:
path = os.path.join(searchpath, filename)
if os.path.exists(path):
return True
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.