code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
"""
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
"""
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self | Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``. | Below is the the instruction that describes the task:
### Input:
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
### Response:
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
"""
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
"""
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self |
def remove_child_families(self, family_id):
"""Removes all children from a family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: NotFound - ``family_id`` not in hierarchy
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=family_id)
return self._hierarchy_session.remove_children(id_=family_id) | Removes all children from a family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: NotFound - ``family_id`` not in hierarchy
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Removes all children from a family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: NotFound - ``family_id`` not in hierarchy
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def remove_child_families(self, family_id):
"""Removes all children from a family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: NotFound - ``family_id`` not in hierarchy
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=family_id)
return self._hierarchy_session.remove_children(id_=family_id) |
def _get(self, key, identity='image'):
"""
Deserializing, prefix wrapper for _get_raw
"""
value = self._get_raw(add_prefix(key, identity))
if not value:
return None
if identity == 'image':
return deserialize_image_file(value)
return deserialize(value) | Deserializing, prefix wrapper for _get_raw | Below is the the instruction that describes the task:
### Input:
Deserializing, prefix wrapper for _get_raw
### Response:
def _get(self, key, identity='image'):
"""
Deserializing, prefix wrapper for _get_raw
"""
value = self._get_raw(add_prefix(key, identity))
if not value:
return None
if identity == 'image':
return deserialize_image_file(value)
return deserialize(value) |
def woodbury_vector(self):
"""
Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_vector is None:
self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector | Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$ | Below is the the instruction that describes the task:
### Input:
Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
### Response:
def woodbury_vector(self):
"""
Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_vector is None:
self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector |
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass | Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
} | Below is the the instruction that describes the task:
### Input:
Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
### Response:
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass |
def check_connection (self):
"""
Open a telnet connection and try to login. Expected login
label is "login: ", expected password label is "Password: ".
"""
self.url_connection = telnetlib.Telnet(timeout=self.aggregate.config["timeout"])
if log.is_debug(LOG_CHECK):
self.url_connection.set_debuglevel(1)
self.url_connection.open(self.host, self.port)
if self.user:
self.url_connection.read_until("login: ", 10)
self.url_connection.write(encode(self.user)+"\n")
if self.password:
self.url_connection.read_until("Password: ", 10)
self.url_connection.write(encode(self.password)+"\n")
# XXX how to tell if we are logged in??
self.url_connection.write("exit\n") | Open a telnet connection and try to login. Expected login
label is "login: ", expected password label is "Password: ". | Below is the the instruction that describes the task:
### Input:
Open a telnet connection and try to login. Expected login
label is "login: ", expected password label is "Password: ".
### Response:
def check_connection (self):
"""
Open a telnet connection and try to login. Expected login
label is "login: ", expected password label is "Password: ".
"""
self.url_connection = telnetlib.Telnet(timeout=self.aggregate.config["timeout"])
if log.is_debug(LOG_CHECK):
self.url_connection.set_debuglevel(1)
self.url_connection.open(self.host, self.port)
if self.user:
self.url_connection.read_until("login: ", 10)
self.url_connection.write(encode(self.user)+"\n")
if self.password:
self.url_connection.read_until("Password: ", 10)
self.url_connection.write(encode(self.password)+"\n")
# XXX how to tell if we are logged in??
self.url_connection.write("exit\n") |
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
yield l | Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`. | Below is the the instruction that describes the task:
### Input:
Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
### Response:
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
yield l |
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title) | Set window title. | Below is the the instruction that describes the task:
### Input:
Set window title.
### Response:
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title) |
def delete(self, file_path, branch, commit_message, **kwargs):
"""Delete a file on the server.
Args:
file_path (str): Path of the file to remove
branch (str): Branch from which the file will be removed
commit_message (str): Commit message for the deletion
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
path = '%s/%s' % (self.path, file_path.replace('/', '%2F'))
data = {'branch': branch, 'commit_message': commit_message}
self.gitlab.http_delete(path, query_data=data, **kwargs) | Delete a file on the server.
Args:
file_path (str): Path of the file to remove
branch (str): Branch from which the file will be removed
commit_message (str): Commit message for the deletion
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request | Below is the the instruction that describes the task:
### Input:
Delete a file on the server.
Args:
file_path (str): Path of the file to remove
branch (str): Branch from which the file will be removed
commit_message (str): Commit message for the deletion
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
### Response:
def delete(self, file_path, branch, commit_message, **kwargs):
"""Delete a file on the server.
Args:
file_path (str): Path of the file to remove
branch (str): Branch from which the file will be removed
commit_message (str): Commit message for the deletion
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
path = '%s/%s' % (self.path, file_path.replace('/', '%2F'))
data = {'branch': branch, 'commit_message': commit_message}
self.gitlab.http_delete(path, query_data=data, **kwargs) |
def get(self,id):
'''Return all the semantic tag related to the given tag id
:returns: a semantic tag or None
:rtype: list of ckan.model.semantictag.SemanticTag object
'''
query = meta.Session.query(TagSemanticTag).filter(TagSemanticTag.id==id)
return query.first() | Return all the semantic tag related to the given tag id
:returns: a semantic tag or None
:rtype: list of ckan.model.semantictag.SemanticTag object | Below is the the instruction that describes the task:
### Input:
Return all the semantic tag related to the given tag id
:returns: a semantic tag or None
:rtype: list of ckan.model.semantictag.SemanticTag object
### Response:
def get(self,id):
'''Return all the semantic tag related to the given tag id
:returns: a semantic tag or None
:rtype: list of ckan.model.semantictag.SemanticTag object
'''
query = meta.Session.query(TagSemanticTag).filter(TagSemanticTag.id==id)
return query.first() |
def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
"""
# For primitive values, this is only called when the value is part of an array,
# in which case we do not need to check for missing or omitted values.
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element | Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None. | Below is the the instruction that describes the task:
### Input:
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
### Response:
def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
"""
# For primitive values, this is only called when the value is part of an array,
# in which case we do not need to check for missing or omitted values.
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element |
def install_pkgs(self, offline=False):
"""
TODO: maybe use conda as python package
"""
if not offline and self.pkgs:
self.logger.info("Installing conda packages ...")
cmd = [join(self.anaconda_home, 'bin', 'conda')]
cmd.append('install')
# if offline:
# cmd.append('--offline')
# self.logger.info("... offline mode ...")
if not self.newest:
cmd.append('--no-update-deps')
self.logger.info("... no update dependencies ...")
if self.env:
self.logger.info("... in conda environment %s ...", self.env)
cmd.extend(['-n', self.env])
cmd.append('--yes')
if self.no_pin:
cmd.append('--no-pin')
self.logger.info("... no pin ...")
if self.channel_priority:
self.logger.info("... channel priority ...")
cmd.append('--channel-priority')
if self.channels:
if self.override_channels:
self.logger.info('... override channels ...')
cmd.append('--override-channels')
self.logger.info("... with conda channels: %s ...",
', '.join(self.channels))
for channel in self.channels:
cmd.append('-c')
cmd.append(channel)
cmd.extend(self.pkgs)
try:
self.logger.debug("install_pkgs cmd: %s", cmd)
check_call(cmd)
except CalledProcessError as err:
self.logger.error("Conda exited with errors: %s", err.output)
return self.pkgs | TODO: maybe use conda as python package | Below is the the instruction that describes the task:
### Input:
TODO: maybe use conda as python package
### Response:
def install_pkgs(self, offline=False):
"""
TODO: maybe use conda as python package
"""
if not offline and self.pkgs:
self.logger.info("Installing conda packages ...")
cmd = [join(self.anaconda_home, 'bin', 'conda')]
cmd.append('install')
# if offline:
# cmd.append('--offline')
# self.logger.info("... offline mode ...")
if not self.newest:
cmd.append('--no-update-deps')
self.logger.info("... no update dependencies ...")
if self.env:
self.logger.info("... in conda environment %s ...", self.env)
cmd.extend(['-n', self.env])
cmd.append('--yes')
if self.no_pin:
cmd.append('--no-pin')
self.logger.info("... no pin ...")
if self.channel_priority:
self.logger.info("... channel priority ...")
cmd.append('--channel-priority')
if self.channels:
if self.override_channels:
self.logger.info('... override channels ...')
cmd.append('--override-channels')
self.logger.info("... with conda channels: %s ...",
', '.join(self.channels))
for channel in self.channels:
cmd.append('-c')
cmd.append(channel)
cmd.extend(self.pkgs)
try:
self.logger.debug("install_pkgs cmd: %s", cmd)
check_call(cmd)
except CalledProcessError as err:
self.logger.error("Conda exited with errors: %s", err.output)
return self.pkgs |
def load_clubs(self):
"""Fetches the MAL user clubs page and sets the current user's clubs attributes.
:rtype: :class:`.User`
:return: Current user object.
"""
user_clubs = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/clubs').text
self.set(self.parse_clubs(utilities.get_clean_dom(user_clubs)))
return self | Fetches the MAL user clubs page and sets the current user's clubs attributes.
:rtype: :class:`.User`
:return: Current user object. | Below is the the instruction that describes the task:
### Input:
Fetches the MAL user clubs page and sets the current user's clubs attributes.
:rtype: :class:`.User`
:return: Current user object.
### Response:
def load_clubs(self):
"""Fetches the MAL user clubs page and sets the current user's clubs attributes.
:rtype: :class:`.User`
:return: Current user object.
"""
user_clubs = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/clubs').text
self.set(self.parse_clubs(utilities.get_clean_dom(user_clubs)))
return self |
def object_info(**kw):
"""Make an object info dict with all fields present."""
infodict = dict(izip_longest(info_fields, [None]))
infodict.update(kw)
return infodict | Make an object info dict with all fields present. | Below is the the instruction that describes the task:
### Input:
Make an object info dict with all fields present.
### Response:
def object_info(**kw):
"""Make an object info dict with all fields present."""
infodict = dict(izip_longest(info_fields, [None]))
infodict.update(kw)
return infodict |
def _find_instance(iname, instance_repo):
"""
Find an instance in the instance repo by iname and return the
index of that instance.
Parameters:
iname: CIMInstancename to find
instance_repo: the instance repo to search
Return (None, None if not found. Otherwise return tuple of
index, instance
Raises:
CIMError: Failed if repo invalid.
"""
rtn_inst = None
rtn_index = None
for index, inst in enumerate(instance_repo):
if iname == inst.path:
if rtn_inst is not None:
# TODO:ks Future Remove dup test since we should be
# insuring no dups on instance creation
raise CIMError(
CIM_ERR_FAILED,
_format("Invalid Repository. Multiple instances with "
"same path {0!A}.", rtn_inst.path))
rtn_inst = inst
rtn_index = index
return(rtn_index, rtn_inst) | Find an instance in the instance repo by iname and return the
index of that instance.
Parameters:
iname: CIMInstancename to find
instance_repo: the instance repo to search
Return (None, None if not found. Otherwise return tuple of
index, instance
Raises:
CIMError: Failed if repo invalid. | Below is the the instruction that describes the task:
### Input:
Find an instance in the instance repo by iname and return the
index of that instance.
Parameters:
iname: CIMInstancename to find
instance_repo: the instance repo to search
Return (None, None if not found. Otherwise return tuple of
index, instance
Raises:
CIMError: Failed if repo invalid.
### Response:
def _find_instance(iname, instance_repo):
"""
Find an instance in the instance repo by iname and return the
index of that instance.
Parameters:
iname: CIMInstancename to find
instance_repo: the instance repo to search
Return (None, None if not found. Otherwise return tuple of
index, instance
Raises:
CIMError: Failed if repo invalid.
"""
rtn_inst = None
rtn_index = None
for index, inst in enumerate(instance_repo):
if iname == inst.path:
if rtn_inst is not None:
# TODO:ks Future Remove dup test since we should be
# insuring no dups on instance creation
raise CIMError(
CIM_ERR_FAILED,
_format("Invalid Repository. Multiple instances with "
"same path {0!A}.", rtn_inst.path))
rtn_inst = inst
rtn_index = index
return(rtn_index, rtn_inst) |
def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
cg_iter=None, callback=None):
r"""Newton's method for minimizing a functional.
Notes
-----
This is a general and optimized implementation of Newton's method
for solving the problem:
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
of finding a root of a function.
The algorithm is well-known and there is a vast literature about it.
Among others, the method is described in [BV2004], Sections 9.5
and 10.2 (`book available online
<http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
[GNS2009], Section 2.7 for solving nonlinear equations and Section
11.3 for its use in minimization, and wikipedia on `Newton's_method
<https://en.wikipedia.org/wiki/Newton's_method>`_.
The algorithm works by iteratively solving
.. math::
\partial f(x_k)p_k = -f(x_k)
and then updating as
.. math::
x_{k+1} = x_k + \alpha x_k,
where :math:`\alpha` is a suitable step length (see the
references). In this implementation the system of equations are
solved using the conjugate gradient method.
Parameters
----------
f : `Functional`
Goal functional. Needs to have ``f.gradient`` and
``f.gradient.derivative``.
x : ``op.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
cg_iter : int, optional
Number of iterations in the the conjugate gradient solver,
for computing the search direction.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate
References
----------
[BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
Cambridge university press, 2004.
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
"""
# TODO: update doc
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
''.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
if cg_iter is None:
# Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
# iterations to solve with cg
cg_iter = grad.domain.size
# TODO: optimize by using lincomb and avoiding to create copies
for _ in range(maxiter):
# Initialize the search direction to 0
search_direction = x.space.zero()
# Compute hessian (as operator) and gradient in the current point
hessian = grad.derivative(x)
deriv_in_point = grad(x)
# Solving A*x = b for x, in this case f''(x)*p = -f'(x)
# TODO: Let the user provide/choose method for how to solve this?
try:
hessian_inverse = hessian.inverse
except NotImplementedError:
conjugate_gradient(hessian, search_direction,
-deriv_in_point, cg_iter)
else:
hessian_inverse(-deriv_in_point, out=search_direction)
# Computing step length
dir_deriv = search_direction.inner(deriv_in_point)
if np.abs(dir_deriv) <= tol:
return
step_length = line_search(x, search_direction, dir_deriv)
# Updating
x += step_length * search_direction
if callback is not None:
callback(x) | r"""Newton's method for minimizing a functional.
Notes
-----
This is a general and optimized implementation of Newton's method
for solving the problem:
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
of finding a root of a function.
The algorithm is well-known and there is a vast literature about it.
Among others, the method is described in [BV2004], Sections 9.5
and 10.2 (`book available online
<http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
[GNS2009], Section 2.7 for solving nonlinear equations and Section
11.3 for its use in minimization, and wikipedia on `Newton's_method
<https://en.wikipedia.org/wiki/Newton's_method>`_.
The algorithm works by iteratively solving
.. math::
\partial f(x_k)p_k = -f(x_k)
and then updating as
.. math::
x_{k+1} = x_k + \alpha x_k,
where :math:`\alpha` is a suitable step length (see the
references). In this implementation the system of equations are
solved using the conjugate gradient method.
Parameters
----------
f : `Functional`
Goal functional. Needs to have ``f.gradient`` and
``f.gradient.derivative``.
x : ``op.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
cg_iter : int, optional
Number of iterations in the the conjugate gradient solver,
for computing the search direction.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate
References
----------
[BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
Cambridge university press, 2004.
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009. | Below is the the instruction that describes the task:
### Input:
r"""Newton's method for minimizing a functional.
Notes
-----
This is a general and optimized implementation of Newton's method
for solving the problem:
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
of finding a root of a function.
The algorithm is well-known and there is a vast literature about it.
Among others, the method is described in [BV2004], Sections 9.5
and 10.2 (`book available online
<http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
[GNS2009], Section 2.7 for solving nonlinear equations and Section
11.3 for its use in minimization, and wikipedia on `Newton's_method
<https://en.wikipedia.org/wiki/Newton's_method>`_.
The algorithm works by iteratively solving
.. math::
\partial f(x_k)p_k = -f(x_k)
and then updating as
.. math::
x_{k+1} = x_k + \alpha x_k,
where :math:`\alpha` is a suitable step length (see the
references). In this implementation the system of equations are
solved using the conjugate gradient method.
Parameters
----------
f : `Functional`
Goal functional. Needs to have ``f.gradient`` and
``f.gradient.derivative``.
x : ``op.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
cg_iter : int, optional
Number of iterations in the the conjugate gradient solver,
for computing the search direction.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate
References
----------
[BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
Cambridge university press, 2004.
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
### Response:
def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
cg_iter=None, callback=None):
r"""Newton's method for minimizing a functional.
Notes
-----
This is a general and optimized implementation of Newton's method
for solving the problem:
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
of finding a root of a function.
The algorithm is well-known and there is a vast literature about it.
Among others, the method is described in [BV2004], Sections 9.5
and 10.2 (`book available online
<http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
[GNS2009], Section 2.7 for solving nonlinear equations and Section
11.3 for its use in minimization, and wikipedia on `Newton's_method
<https://en.wikipedia.org/wiki/Newton's_method>`_.
The algorithm works by iteratively solving
.. math::
\partial f(x_k)p_k = -f(x_k)
and then updating as
.. math::
x_{k+1} = x_k + \alpha x_k,
where :math:`\alpha` is a suitable step length (see the
references). In this implementation the system of equations are
solved using the conjugate gradient method.
Parameters
----------
f : `Functional`
Goal functional. Needs to have ``f.gradient`` and
``f.gradient.derivative``.
x : ``op.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
cg_iter : int, optional
Number of iterations in the the conjugate gradient solver,
for computing the search direction.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate
References
----------
[BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
Cambridge university press, 2004.
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
"""
# TODO: update doc
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
''.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
if cg_iter is None:
# Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
# iterations to solve with cg
cg_iter = grad.domain.size
# TODO: optimize by using lincomb and avoiding to create copies
for _ in range(maxiter):
# Initialize the search direction to 0
search_direction = x.space.zero()
# Compute hessian (as operator) and gradient in the current point
hessian = grad.derivative(x)
deriv_in_point = grad(x)
# Solving A*x = b for x, in this case f''(x)*p = -f'(x)
# TODO: Let the user provide/choose method for how to solve this?
try:
hessian_inverse = hessian.inverse
except NotImplementedError:
conjugate_gradient(hessian, search_direction,
-deriv_in_point, cg_iter)
else:
hessian_inverse(-deriv_in_point, out=search_direction)
# Computing step length
dir_deriv = search_direction.inner(deriv_in_point)
if np.abs(dir_deriv) <= tol:
return
step_length = line_search(x, search_direction, dir_deriv)
# Updating
x += step_length * search_direction
if callback is not None:
callback(x) |
def refresh_content(self, order=None, name=None):
"""
Re-download comments and reset the page index
"""
order = order or self.content.order
url = name or self.content.name
# Hack to allow an order specified in the name by prompt_subreddit() to
# override the current default
if order == 'ignore':
order = None
with self.term.loader('Refreshing page'):
self.content = SubmissionContent.from_url(
self.reddit, url, self.term.loader, order=order,
max_comment_cols=self.config['max_comment_cols'])
if not self.term.loader.exception:
self.nav = Navigator(self.content.get, page_index=-1) | Re-download comments and reset the page index | Below is the the instruction that describes the task:
### Input:
Re-download comments and reset the page index
### Response:
def refresh_content(self, order=None, name=None):
"""
Re-download comments and reset the page index
"""
order = order or self.content.order
url = name or self.content.name
# Hack to allow an order specified in the name by prompt_subreddit() to
# override the current default
if order == 'ignore':
order = None
with self.term.loader('Refreshing page'):
self.content = SubmissionContent.from_url(
self.reddit, url, self.term.loader, order=order,
max_comment_cols=self.config['max_comment_cols'])
if not self.term.loader.exception:
self.nav = Navigator(self.content.get, page_index=-1) |
def cmp_traj(traj_a, traj_b):
"""
Parameters
----------
traj_a, traj_b: mdtraj.Trajectory
"""
if traj_a is None and traj_b is None:
return True
if traj_a is None and traj_b is not None:
return False
if traj_a is not None and traj_b is None:
return False
equal_top = traj_a.top == traj_b.top
xyz_close = np.allclose(traj_a.xyz, traj_b.xyz)
equal_time = np.all(traj_a.time == traj_b.time)
equal_unitcell_angles = np.array_equal(traj_a.unitcell_angles, traj_b.unitcell_angles)
equal_unitcell_lengths = np.array_equal(traj_a.unitcell_lengths, traj_b.unitcell_lengths)
return np.all([equal_top, equal_time, xyz_close, equal_time, equal_unitcell_angles, equal_unitcell_lengths]) | Parameters
----------
traj_a, traj_b: mdtraj.Trajectory | Below is the the instruction that describes the task:
### Input:
Parameters
----------
traj_a, traj_b: mdtraj.Trajectory
### Response:
def cmp_traj(traj_a, traj_b):
"""
Parameters
----------
traj_a, traj_b: mdtraj.Trajectory
"""
if traj_a is None and traj_b is None:
return True
if traj_a is None and traj_b is not None:
return False
if traj_a is not None and traj_b is None:
return False
equal_top = traj_a.top == traj_b.top
xyz_close = np.allclose(traj_a.xyz, traj_b.xyz)
equal_time = np.all(traj_a.time == traj_b.time)
equal_unitcell_angles = np.array_equal(traj_a.unitcell_angles, traj_b.unitcell_angles)
equal_unitcell_lengths = np.array_equal(traj_a.unitcell_lengths, traj_b.unitcell_lengths)
return np.all([equal_top, equal_time, xyz_close, equal_time, equal_unitcell_angles, equal_unitcell_lengths]) |
async def asgi_send(self, message: dict) -> None:
"""Called by the ASGI instance to send a message."""
if message["type"] == "websocket.accept" and self.state == ASGIWebsocketState.HANDSHAKE:
headers = build_and_validate_headers(message.get("headers", []))
raise_if_subprotocol_present(headers)
headers.extend(self.response_headers())
await self.asend(
AcceptConnection(
extensions=[PerMessageDeflate()],
extra_headers=headers,
subprotocol=message.get("subprotocol"),
)
)
self.state = ASGIWebsocketState.CONNECTED
self.config.access_logger.access(
self.scope, {"status": 101, "headers": []}, time() - self.start_time
)
elif (
message["type"] == "websocket.http.response.start"
and self.state == ASGIWebsocketState.HANDSHAKE
):
self.response = message
self.config.access_logger.access(self.scope, self.response, time() - self.start_time)
elif message["type"] == "websocket.http.response.body" and self.state in {
ASGIWebsocketState.HANDSHAKE,
ASGIWebsocketState.RESPONSE,
}:
await self._asgi_send_rejection(message)
elif message["type"] == "websocket.send" and self.state == ASGIWebsocketState.CONNECTED:
data: Union[bytes, str]
if message.get("bytes") is not None:
await self.asend(BytesMessage(data=bytes(message["bytes"])))
elif not isinstance(message["text"], str):
raise TypeError(f"{message['text']} should be a str")
else:
await self.asend(TextMessage(data=message["text"]))
elif message["type"] == "websocket.close" and self.state == ASGIWebsocketState.HANDSHAKE:
await self.send_http_error(403)
self.state = ASGIWebsocketState.HTTPCLOSED
elif message["type"] == "websocket.close":
await self.asend(CloseConnection(code=int(message["code"])))
self.state = ASGIWebsocketState.CLOSED
else:
raise UnexpectedMessage(self.state, message["type"]) | Called by the ASGI instance to send a message. | Below is the the instruction that describes the task:
### Input:
Called by the ASGI instance to send a message.
### Response:
async def asgi_send(self, message: dict) -> None:
"""Called by the ASGI instance to send a message."""
if message["type"] == "websocket.accept" and self.state == ASGIWebsocketState.HANDSHAKE:
headers = build_and_validate_headers(message.get("headers", []))
raise_if_subprotocol_present(headers)
headers.extend(self.response_headers())
await self.asend(
AcceptConnection(
extensions=[PerMessageDeflate()],
extra_headers=headers,
subprotocol=message.get("subprotocol"),
)
)
self.state = ASGIWebsocketState.CONNECTED
self.config.access_logger.access(
self.scope, {"status": 101, "headers": []}, time() - self.start_time
)
elif (
message["type"] == "websocket.http.response.start"
and self.state == ASGIWebsocketState.HANDSHAKE
):
self.response = message
self.config.access_logger.access(self.scope, self.response, time() - self.start_time)
elif message["type"] == "websocket.http.response.body" and self.state in {
ASGIWebsocketState.HANDSHAKE,
ASGIWebsocketState.RESPONSE,
}:
await self._asgi_send_rejection(message)
elif message["type"] == "websocket.send" and self.state == ASGIWebsocketState.CONNECTED:
data: Union[bytes, str]
if message.get("bytes") is not None:
await self.asend(BytesMessage(data=bytes(message["bytes"])))
elif not isinstance(message["text"], str):
raise TypeError(f"{message['text']} should be a str")
else:
await self.asend(TextMessage(data=message["text"]))
elif message["type"] == "websocket.close" and self.state == ASGIWebsocketState.HANDSHAKE:
await self.send_http_error(403)
self.state = ASGIWebsocketState.HTTPCLOSED
elif message["type"] == "websocket.close":
await self.asend(CloseConnection(code=int(message["code"])))
self.state = ASGIWebsocketState.CLOSED
else:
raise UnexpectedMessage(self.state, message["type"]) |
def stop_processing(self, warning=True):
"""
Registers the end of a processing operation.
:param warning: Emit warning message.
:type warning: int
:return: Method success.
:rtype: bool
"""
if not self.__is_processing:
warning and LOGGER.warning(
"!> {0} | Engine is not processing, 'stop_processing' request has been ignored!".format(
self.__class__.__name__))
return False
LOGGER.debug("> Stopping processing operation!")
self.__is_processing = False
self.Application_Progress_Status_processing.Processing_label.setText(QString())
self.Application_Progress_Status_processing.Processing_progressBar.setRange(0, 100)
self.Application_Progress_Status_processing.Processing_progressBar.setValue(0)
self.Application_Progress_Status_processing.hide()
return True | Registers the end of a processing operation.
:param warning: Emit warning message.
:type warning: int
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Registers the end of a processing operation.
:param warning: Emit warning message.
:type warning: int
:return: Method success.
:rtype: bool
### Response:
def stop_processing(self, warning=True):
"""
Registers the end of a processing operation.
:param warning: Emit warning message.
:type warning: int
:return: Method success.
:rtype: bool
"""
if not self.__is_processing:
warning and LOGGER.warning(
"!> {0} | Engine is not processing, 'stop_processing' request has been ignored!".format(
self.__class__.__name__))
return False
LOGGER.debug("> Stopping processing operation!")
self.__is_processing = False
self.Application_Progress_Status_processing.Processing_label.setText(QString())
self.Application_Progress_Status_processing.Processing_progressBar.setRange(0, 100)
self.Application_Progress_Status_processing.Processing_progressBar.setValue(0)
self.Application_Progress_Status_processing.hide()
return True |
def clean_prefix(self):
"""The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``."""
user = self.context.guild.me if self.context.guild else self.context.bot.user
# this breaks if the prefix mention is not the bot itself but I
# consider this to be an *incredibly* strange use case. I'd rather go
# for this common use case rather than waste performance for the
# odd one.
return self.context.prefix.replace(user.mention, '@' + user.display_name) | The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``. | Below is the the instruction that describes the task:
### Input:
The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``.
### Response:
def clean_prefix(self):
"""The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``."""
user = self.context.guild.me if self.context.guild else self.context.bot.user
# this breaks if the prefix mention is not the bot itself but I
# consider this to be an *incredibly* strange use case. I'd rather go
# for this common use case rather than waste performance for the
# odd one.
return self.context.prefix.replace(user.mention, '@' + user.display_name) |
def detect_opensearch(err, package, listed=False):
'Detect, parse, and validate an OpenSearch provider'
# Parse the file.
try:
# Check if it is a file object.
if hasattr(package, 'read'):
srch_prov = parse(package)
else:
# It's not a file object; open it (the XML parser is bad at this).
with open(package, 'rb') as package_file:
srch_prov = parse(package_file)
except DefusedXmlException:
url = 'https://pypi.python.org/pypi/defusedxml/0.3#attack-vectors'
err.error(
err_id=('opensearch', 'security_error'),
error='OpenSearch: XML Security Error',
description='The OpenSearch extension could not be parsed due to '
'a security error in the XML. See {url} for more '
'info.'.format(url=url))
return err
except ExpatError:
err.error(
err_id=('opensearch', 'parse_error'),
error='OpenSearch: XML Parse Error',
description='The OpenSearch extension could not be parsed due to '
'a syntax error in the XML.')
return err
# Make sure that the root element is OpenSearchDescription.
if srch_prov.documentElement.tagName != 'OpenSearchDescription':
err.error(
err_id=('opensearch', 'invalid_document_root'),
error='OpenSearch: Invalid Document Root',
description='The root element of the OpenSearch provider is not '
"'OpenSearchDescription'.")
# Per bug 617822
if not srch_prov.documentElement.hasAttribute('xmlns'):
err.error(
err_id=('opensearch', 'no_xmlns'),
error='OpenSearch: Missing XMLNS attribute',
description='The XML namespace attribute is missing from the '
'OpenSearch document.')
if ('xmlns' not in srch_prov.documentElement.attributes.keys() or
srch_prov.documentElement.attributes['xmlns'].value not in (
'http://a9.com/-/spec/opensearch/1.0/',
'http://a9.com/-/spec/opensearch/1.1/',
'http://a9.com/-/spec/opensearchdescription/1.1/',
'http://a9.com/-/spec/opensearchdescription/1.0/')):
err.error(
err_id=('opensearch', 'invalid_xmlns'),
error='OpenSearch: Bad XMLNS attribute',
description='The XML namespace attribute contains an '
'value.')
# Make sure that there is exactly one ShortName.
sn = srch_prov.documentElement.getElementsByTagName('ShortName')
if not sn:
err.error(
err_id=('opensearch', 'missing_shortname'),
error='OpenSearch: Missing <ShortName> elements',
description='ShortName elements are mandatory OpenSearch provider '
'elements.')
elif len(sn) > 1:
err.error(
err_id=('opensearch', 'extra_shortnames'),
error='OpenSearch: Too many <ShortName> elements',
description='Too many ShortName elements exist in the OpenSearch '
'provider.')
else:
sn_children = sn[0].childNodes
short_name = 0
for node in sn_children:
if node.nodeType == node.TEXT_NODE:
short_name += len(node.data)
if short_name > 16:
err.error(
err_id=('opensearch', 'big_shortname'),
error='OpenSearch: <ShortName> element too long',
description='The ShortName element must contains less than '
'seventeen characters.')
# Make sure that there is exactly one Description.
if len(srch_prov.documentElement.getElementsByTagName('Description')) != 1:
err.error(
err_id=('opensearch', 'missing_description'),
error='OpenSearch: Invalid number of <Description> elements',
description='There are too many or too few Description elements '
'in the OpenSearch provider.')
# Grab the URLs and make sure that there is at least one.
urls = srch_prov.documentElement.getElementsByTagName('Url')
if not urls:
err.error(
err_id=('opensearch', 'missing_url'),
error='OpenSearch: Missing <Url> elements',
description='The OpenSearch provider is missing a Url element.')
if listed and any(url.hasAttribute('rel') and
url.attributes['rel'].value == 'self' for
url in urls):
err.error(
err_id=('opensearch', 'rel_self'),
error='OpenSearch: <Url> elements may not be rel=self',
description='Per AMO guidelines, OpenSearch providers cannot '
"contain <Url /> elements with a 'rel' attribute "
"pointing to the URL's current location. It must be "
'removed before posting this provider to AMO.')
acceptable_mimes = ('text/html', 'application/xhtml+xml')
acceptable_urls = [url for url in urls if url.hasAttribute('type') and
url.attributes['type'].value in acceptable_mimes]
# At least one Url must be text/html
if not acceptable_urls:
err.error(
err_id=('opensearch', 'missing_url_texthtml'),
error="OpenSearch: Missing <Url> element with 'text/html' type",
description='OpenSearch providers must have at least one Url '
"element with a type attribute set to 'text/html'.")
# Make sure that each Url has the require attributes.
for url in acceptable_urls:
if url.hasAttribute('rel') and url.attributes['rel'].value == 'self':
continue
if url.hasAttribute('method') and \
url.attributes['method'].value.upper() not in ('GET', 'POST'):
err.error(
err_id=('opensearch', 'missing_method'),
error="OpenSearch: <Url> element with invalid 'method'",
description='A Url element in the OpenSearch provider lists a '
'method attribute, but the value is not GET or '
'POST.')
# Test for attribute presence.
if not url.hasAttribute('template'):
err.error(
err_id=('opensearch', 'missing_template'),
error='OpenSearch: <Url> element missing template attribute',
description='<Url> elements of OpenSearch providers must '
'include a template attribute.')
else:
url_template = url.attributes['template'].value
if url_template[:4] != 'http':
err.error(
err_id=('opensearch', 'invalid_template'),
error='OpenSearch: `<Url>` element with invalid '
'`template`',
description='A `<Url>` element in the OpenSearch '
'provider lists a template attribute, but '
'the value is not a valid HTTP URL.')
# Make sure that there is a {searchTerms} placeholder in the
# URL template.
found_template = url_template.count('{searchTerms}') > 0
# If we didn't find it in a simple parse of the template=""
# attribute, look deeper at the <Param /> elements.
if not found_template:
for param in url.getElementsByTagName('Param'):
# As long as we're in here and dependent on the
# attributes, we'd might as well validate them.
attribute_keys = param.attributes.keys()
if 'name' not in attribute_keys or \
'value' not in attribute_keys:
err.error(
err_id=('opensearch', 'param_missing_attrs'),
error='OpenSearch: `<Param>` element missing '
'name/value',
description='Param elements in the OpenSearch '
'provider must include a name and a '
'value attribute.')
param_value = (param.attributes['value'].value if
'value' in param.attributes.keys() else
'')
if param_value.count('{searchTerms}'):
found_template = True
# Since we're in a validating spirit, continue
# looking for more errors and don't break
# If the template still hasn't been found...
if not found_template:
tpl = url.attributes['template'].value
err.error(
err_id=('opensearch', 'template_not_found'),
error='OpenSearch: <Url> element missing template '
'placeholder',
description=('`<Url>` elements of OpenSearch providers '
'must include a template attribute or '
'specify a placeholder with '
'`{searchTerms}`.',
'Missing template: %s' % tpl))
# Make sure there are no updateURL elements
if srch_prov.getElementsByTagName('updateURL'):
err.error(
err_id=('opensearch', 'banned_updateurl'),
error='OpenSearch: <updateURL> elements are banned in OpenSearch '
'providers.',
description='OpenSearch providers may not contain <updateURL> '
'elements.')
# The OpenSearch provider is valid!
return err | Detect, parse, and validate an OpenSearch provider | Below is the the instruction that describes the task:
### Input:
Detect, parse, and validate an OpenSearch provider
### Response:
def detect_opensearch(err, package, listed=False):
'Detect, parse, and validate an OpenSearch provider'
# Parse the file.
try:
# Check if it is a file object.
if hasattr(package, 'read'):
srch_prov = parse(package)
else:
# It's not a file object; open it (the XML parser is bad at this).
with open(package, 'rb') as package_file:
srch_prov = parse(package_file)
except DefusedXmlException:
url = 'https://pypi.python.org/pypi/defusedxml/0.3#attack-vectors'
err.error(
err_id=('opensearch', 'security_error'),
error='OpenSearch: XML Security Error',
description='The OpenSearch extension could not be parsed due to '
'a security error in the XML. See {url} for more '
'info.'.format(url=url))
return err
except ExpatError:
err.error(
err_id=('opensearch', 'parse_error'),
error='OpenSearch: XML Parse Error',
description='The OpenSearch extension could not be parsed due to '
'a syntax error in the XML.')
return err
# Make sure that the root element is OpenSearchDescription.
if srch_prov.documentElement.tagName != 'OpenSearchDescription':
err.error(
err_id=('opensearch', 'invalid_document_root'),
error='OpenSearch: Invalid Document Root',
description='The root element of the OpenSearch provider is not '
"'OpenSearchDescription'.")
# Per bug 617822
if not srch_prov.documentElement.hasAttribute('xmlns'):
err.error(
err_id=('opensearch', 'no_xmlns'),
error='OpenSearch: Missing XMLNS attribute',
description='The XML namespace attribute is missing from the '
'OpenSearch document.')
if ('xmlns' not in srch_prov.documentElement.attributes.keys() or
srch_prov.documentElement.attributes['xmlns'].value not in (
'http://a9.com/-/spec/opensearch/1.0/',
'http://a9.com/-/spec/opensearch/1.1/',
'http://a9.com/-/spec/opensearchdescription/1.1/',
'http://a9.com/-/spec/opensearchdescription/1.0/')):
err.error(
err_id=('opensearch', 'invalid_xmlns'),
error='OpenSearch: Bad XMLNS attribute',
description='The XML namespace attribute contains an '
'value.')
# Make sure that there is exactly one ShortName.
sn = srch_prov.documentElement.getElementsByTagName('ShortName')
if not sn:
err.error(
err_id=('opensearch', 'missing_shortname'),
error='OpenSearch: Missing <ShortName> elements',
description='ShortName elements are mandatory OpenSearch provider '
'elements.')
elif len(sn) > 1:
err.error(
err_id=('opensearch', 'extra_shortnames'),
error='OpenSearch: Too many <ShortName> elements',
description='Too many ShortName elements exist in the OpenSearch '
'provider.')
else:
sn_children = sn[0].childNodes
short_name = 0
for node in sn_children:
if node.nodeType == node.TEXT_NODE:
short_name += len(node.data)
if short_name > 16:
err.error(
err_id=('opensearch', 'big_shortname'),
error='OpenSearch: <ShortName> element too long',
description='The ShortName element must contains less than '
'seventeen characters.')
# Make sure that there is exactly one Description.
if len(srch_prov.documentElement.getElementsByTagName('Description')) != 1:
err.error(
err_id=('opensearch', 'missing_description'),
error='OpenSearch: Invalid number of <Description> elements',
description='There are too many or too few Description elements '
'in the OpenSearch provider.')
# Grab the URLs and make sure that there is at least one.
urls = srch_prov.documentElement.getElementsByTagName('Url')
if not urls:
err.error(
err_id=('opensearch', 'missing_url'),
error='OpenSearch: Missing <Url> elements',
description='The OpenSearch provider is missing a Url element.')
if listed and any(url.hasAttribute('rel') and
url.attributes['rel'].value == 'self' for
url in urls):
err.error(
err_id=('opensearch', 'rel_self'),
error='OpenSearch: <Url> elements may not be rel=self',
description='Per AMO guidelines, OpenSearch providers cannot '
"contain <Url /> elements with a 'rel' attribute "
"pointing to the URL's current location. It must be "
'removed before posting this provider to AMO.')
acceptable_mimes = ('text/html', 'application/xhtml+xml')
acceptable_urls = [url for url in urls if url.hasAttribute('type') and
url.attributes['type'].value in acceptable_mimes]
# At least one Url must be text/html
if not acceptable_urls:
err.error(
err_id=('opensearch', 'missing_url_texthtml'),
error="OpenSearch: Missing <Url> element with 'text/html' type",
description='OpenSearch providers must have at least one Url '
"element with a type attribute set to 'text/html'.")
# Make sure that each Url has the require attributes.
for url in acceptable_urls:
if url.hasAttribute('rel') and url.attributes['rel'].value == 'self':
continue
if url.hasAttribute('method') and \
url.attributes['method'].value.upper() not in ('GET', 'POST'):
err.error(
err_id=('opensearch', 'missing_method'),
error="OpenSearch: <Url> element with invalid 'method'",
description='A Url element in the OpenSearch provider lists a '
'method attribute, but the value is not GET or '
'POST.')
# Test for attribute presence.
if not url.hasAttribute('template'):
err.error(
err_id=('opensearch', 'missing_template'),
error='OpenSearch: <Url> element missing template attribute',
description='<Url> elements of OpenSearch providers must '
'include a template attribute.')
else:
url_template = url.attributes['template'].value
if url_template[:4] != 'http':
err.error(
err_id=('opensearch', 'invalid_template'),
error='OpenSearch: `<Url>` element with invalid '
'`template`',
description='A `<Url>` element in the OpenSearch '
'provider lists a template attribute, but '
'the value is not a valid HTTP URL.')
# Make sure that there is a {searchTerms} placeholder in the
# URL template.
found_template = url_template.count('{searchTerms}') > 0
# If we didn't find it in a simple parse of the template=""
# attribute, look deeper at the <Param /> elements.
if not found_template:
for param in url.getElementsByTagName('Param'):
# As long as we're in here and dependent on the
# attributes, we'd might as well validate them.
attribute_keys = param.attributes.keys()
if 'name' not in attribute_keys or \
'value' not in attribute_keys:
err.error(
err_id=('opensearch', 'param_missing_attrs'),
error='OpenSearch: `<Param>` element missing '
'name/value',
description='Param elements in the OpenSearch '
'provider must include a name and a '
'value attribute.')
param_value = (param.attributes['value'].value if
'value' in param.attributes.keys() else
'')
if param_value.count('{searchTerms}'):
found_template = True
# Since we're in a validating spirit, continue
# looking for more errors and don't break
# If the template still hasn't been found...
if not found_template:
tpl = url.attributes['template'].value
err.error(
err_id=('opensearch', 'template_not_found'),
error='OpenSearch: <Url> element missing template '
'placeholder',
description=('`<Url>` elements of OpenSearch providers '
'must include a template attribute or '
'specify a placeholder with '
'`{searchTerms}`.',
'Missing template: %s' % tpl))
# Make sure there are no updateURL elements
if srch_prov.getElementsByTagName('updateURL'):
err.error(
err_id=('opensearch', 'banned_updateurl'),
error='OpenSearch: <updateURL> elements are banned in OpenSearch '
'providers.',
description='OpenSearch providers may not contain <updateURL> '
'elements.')
# The OpenSearch provider is valid!
return err |
def play(state):
""" Play sound for a given state.
:param state: a State value.
"""
filename = None
if state == SoundService.State.welcome:
filename = "pad_glow_welcome1.wav"
elif state == SoundService.State.goodbye:
filename = "pad_glow_power_off.wav"
elif state == SoundService.State.hotword_detected:
filename = "pad_soft_on.wav"
elif state == SoundService.State.asr_text_captured:
filename = "pad_soft_off.wav"
elif state == SoundService.State.error:
filename = "music_marimba_error_chord_2x.wav"
if filename is not None:
AudioPlayer.play_async("{}/{}".format(ABS_SOUND_DIR, filename)) | Play sound for a given state.
:param state: a State value. | Below is the the instruction that describes the task:
### Input:
Play sound for a given state.
:param state: a State value.
### Response:
def play(state):
""" Play sound for a given state.
:param state: a State value.
"""
filename = None
if state == SoundService.State.welcome:
filename = "pad_glow_welcome1.wav"
elif state == SoundService.State.goodbye:
filename = "pad_glow_power_off.wav"
elif state == SoundService.State.hotword_detected:
filename = "pad_soft_on.wav"
elif state == SoundService.State.asr_text_captured:
filename = "pad_soft_off.wav"
elif state == SoundService.State.error:
filename = "music_marimba_error_chord_2x.wav"
if filename is not None:
AudioPlayer.play_async("{}/{}".format(ABS_SOUND_DIR, filename)) |
def put(self, task, args, kwargs):
""" Add a new item to the queue. An item is a task and the
arguments needed to call it.
Do not call this directly, use Task.queue/Task.run instead.
"""
if self.num_threads == 0:
return task(*args, **kwargs)
if self.queue is None:
self._spawn()
self.queue.put((task, args, kwargs)) | Add a new item to the queue. An item is a task and the
arguments needed to call it.
Do not call this directly, use Task.queue/Task.run instead. | Below is the the instruction that describes the task:
### Input:
Add a new item to the queue. An item is a task and the
arguments needed to call it.
Do not call this directly, use Task.queue/Task.run instead.
### Response:
def put(self, task, args, kwargs):
""" Add a new item to the queue. An item is a task and the
arguments needed to call it.
Do not call this directly, use Task.queue/Task.run instead.
"""
if self.num_threads == 0:
return task(*args, **kwargs)
if self.queue is None:
self._spawn()
self.queue.put((task, args, kwargs)) |
def send_file(self, file_name, remote_destination=None, **kwargs):
"""Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
"""
if not remote_destination:
remote_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', file_name, '%s:%s' % (self.hostname, remote_destination)],
**kwargs) | Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task. | Below is the the instruction that describes the task:
### Input:
Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
### Response:
def send_file(self, file_name, remote_destination=None, **kwargs):
"""Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
"""
if not remote_destination:
remote_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', file_name, '%s:%s' % (self.hostname, remote_destination)],
**kwargs) |
def Hvap(counts):
r'''Estimates the enthalpy of vaporization of an organic compound at
its normal boiling point using the Joback method as a function of
chemical structure only.
.. math::
\Delta H_{vap} = 15.30 + \sum_i H_{vap,i}
In the above equation, enthalpy of fusion is calculated in
kJ/mol; it is converted to J/mol here.
For 368 compounds tested by Joback, the absolute average error was
303.5 cal/mol and standard deviation was 429 cal/mol; the average
relative error was 3.88%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hvap : float
Estimated enthalpy of vaporization of the compound at its normal
boiling point, [J/mol]
Examples
--------
>>> Joback.Hvap({1: 2, 24: 1})
29018.0
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Hvap*count
Hvap = 15.3 + tot
return Hvap*1000 | r'''Estimates the enthalpy of vaporization of an organic compound at
its normal boiling point using the Joback method as a function of
chemical structure only.
.. math::
\Delta H_{vap} = 15.30 + \sum_i H_{vap,i}
In the above equation, enthalpy of fusion is calculated in
kJ/mol; it is converted to J/mol here.
For 368 compounds tested by Joback, the absolute average error was
303.5 cal/mol and standard deviation was 429 cal/mol; the average
relative error was 3.88%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hvap : float
Estimated enthalpy of vaporization of the compound at its normal
boiling point, [J/mol]
Examples
--------
>>> Joback.Hvap({1: 2, 24: 1})
29018.0 | Below is the the instruction that describes the task:
### Input:
r'''Estimates the enthalpy of vaporization of an organic compound at
its normal boiling point using the Joback method as a function of
chemical structure only.
.. math::
\Delta H_{vap} = 15.30 + \sum_i H_{vap,i}
In the above equation, enthalpy of fusion is calculated in
kJ/mol; it is converted to J/mol here.
For 368 compounds tested by Joback, the absolute average error was
303.5 cal/mol and standard deviation was 429 cal/mol; the average
relative error was 3.88%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hvap : float
Estimated enthalpy of vaporization of the compound at its normal
boiling point, [J/mol]
Examples
--------
>>> Joback.Hvap({1: 2, 24: 1})
29018.0
### Response:
def Hvap(counts):
r'''Estimates the enthalpy of vaporization of an organic compound at
its normal boiling point using the Joback method as a function of
chemical structure only.
.. math::
\Delta H_{vap} = 15.30 + \sum_i H_{vap,i}
In the above equation, enthalpy of fusion is calculated in
kJ/mol; it is converted to J/mol here.
For 368 compounds tested by Joback, the absolute average error was
303.5 cal/mol and standard deviation was 429 cal/mol; the average
relative error was 3.88%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hvap : float
Estimated enthalpy of vaporization of the compound at its normal
boiling point, [J/mol]
Examples
--------
>>> Joback.Hvap({1: 2, 24: 1})
29018.0
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Hvap*count
Hvap = 15.3 + tot
return Hvap*1000 |
def read_crl(crl):
'''
Returns a dict containing details of a certificate revocation list.
Input can be a PEM string or file path.
:depends: - OpenSSL command line tool
csl:
A path or PEM encoded string containing the CSL to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_crl /etc/pki/mycrl.crl
'''
text = _text_or_file(crl)
text = get_pem_entry(text, pem_type='X509 CRL')
crltempfile = tempfile.NamedTemporaryFile()
crltempfile.write(salt.utils.stringutils.to_str(text))
crltempfile.flush()
crlparsed = _parse_openssl_crl(crltempfile.name)
crltempfile.close()
return crlparsed | Returns a dict containing details of a certificate revocation list.
Input can be a PEM string or file path.
:depends: - OpenSSL command line tool
csl:
A path or PEM encoded string containing the CSL to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_crl /etc/pki/mycrl.crl | Below is the the instruction that describes the task:
### Input:
Returns a dict containing details of a certificate revocation list.
Input can be a PEM string or file path.
:depends: - OpenSSL command line tool
csl:
A path or PEM encoded string containing the CSL to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_crl /etc/pki/mycrl.crl
### Response:
def read_crl(crl):
'''
Returns a dict containing details of a certificate revocation list.
Input can be a PEM string or file path.
:depends: - OpenSSL command line tool
csl:
A path or PEM encoded string containing the CSL to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_crl /etc/pki/mycrl.crl
'''
text = _text_or_file(crl)
text = get_pem_entry(text, pem_type='X509 CRL')
crltempfile = tempfile.NamedTemporaryFile()
crltempfile.write(salt.utils.stringutils.to_str(text))
crltempfile.flush()
crlparsed = _parse_openssl_crl(crltempfile.name)
crltempfile.close()
return crlparsed |
def parse_arguments(argv):
"""
Parse command line arguments and override local configuration
:params args: Array of command line arguments
"""
parser = argparse.ArgumentParser(description="GNS3 server version {}".format(__version__))
parser.add_argument("-v", "--version", help="show the version", action="version", version=__version__)
parser.add_argument("--host", help="run on the given host/IP address")
parser.add_argument("--port", help="run on the given port", type=int)
parser.add_argument("--ssl", action="store_true", help="run in SSL mode")
parser.add_argument("--no-ubridge", action="store_false", help="do not use ubridge to handle node connections")
parser.add_argument("--config", help="Configuration file")
parser.add_argument("--certfile", help="SSL cert file")
parser.add_argument("--certkey", help="SSL key file")
parser.add_argument("--record", help="save curl requests into a file (for developers)")
parser.add_argument("-L", "--local", action="store_true", help="local mode (allows some insecure operations)")
parser.add_argument("-A", "--allow", action="store_true", help="allow remote connections to local console ports")
parser.add_argument("-q", "--quiet", action="store_true", help="do not show logs on stdout")
parser.add_argument("-d", "--debug", action="store_true", help="show debug logs")
parser.add_argument("--shell", action="store_true", help="start a shell inside the server (debugging purpose only you need to install ptpython before)")
parser.add_argument("--log", help="send output to logfile instead of console")
parser.add_argument("--daemon", action="store_true", help="start as a daemon")
parser.add_argument("--pid", help="store process pid")
parser.add_argument("--profile", help="Settings profile (blank will use default settings files)")
args = parser.parse_args(argv)
if args.config:
Config.instance(files=[args.config], profile=args.profile)
else:
Config.instance(profile=args.profile)
config = Config.instance().get_section_config("Server")
defaults = {
"host": config.get("host", "0.0.0.0"),
"port": config.get("port", 3080),
"ssl": config.getboolean("ssl", False),
"certfile": config.get("certfile", ""),
"certkey": config.get("certkey", ""),
"record": config.get("record", ""),
"local": config.getboolean("local", False),
"allow": config.getboolean("allow_remote_console", False),
"quiet": config.getboolean("quiet", False),
"debug": config.getboolean("debug", False),
"logfile": config.getboolean("logfile", "")
}
parser.set_defaults(**defaults)
return parser.parse_args(argv) | Parse command line arguments and override local configuration
:params args: Array of command line arguments | Below is the the instruction that describes the task:
### Input:
Parse command line arguments and override local configuration
:params args: Array of command line arguments
### Response:
def parse_arguments(argv):
"""
Parse command line arguments and override local configuration
:params args: Array of command line arguments
"""
parser = argparse.ArgumentParser(description="GNS3 server version {}".format(__version__))
parser.add_argument("-v", "--version", help="show the version", action="version", version=__version__)
parser.add_argument("--host", help="run on the given host/IP address")
parser.add_argument("--port", help="run on the given port", type=int)
parser.add_argument("--ssl", action="store_true", help="run in SSL mode")
parser.add_argument("--no-ubridge", action="store_false", help="do not use ubridge to handle node connections")
parser.add_argument("--config", help="Configuration file")
parser.add_argument("--certfile", help="SSL cert file")
parser.add_argument("--certkey", help="SSL key file")
parser.add_argument("--record", help="save curl requests into a file (for developers)")
parser.add_argument("-L", "--local", action="store_true", help="local mode (allows some insecure operations)")
parser.add_argument("-A", "--allow", action="store_true", help="allow remote connections to local console ports")
parser.add_argument("-q", "--quiet", action="store_true", help="do not show logs on stdout")
parser.add_argument("-d", "--debug", action="store_true", help="show debug logs")
parser.add_argument("--shell", action="store_true", help="start a shell inside the server (debugging purpose only you need to install ptpython before)")
parser.add_argument("--log", help="send output to logfile instead of console")
parser.add_argument("--daemon", action="store_true", help="start as a daemon")
parser.add_argument("--pid", help="store process pid")
parser.add_argument("--profile", help="Settings profile (blank will use default settings files)")
args = parser.parse_args(argv)
if args.config:
Config.instance(files=[args.config], profile=args.profile)
else:
Config.instance(profile=args.profile)
config = Config.instance().get_section_config("Server")
defaults = {
"host": config.get("host", "0.0.0.0"),
"port": config.get("port", 3080),
"ssl": config.getboolean("ssl", False),
"certfile": config.get("certfile", ""),
"certkey": config.get("certkey", ""),
"record": config.get("record", ""),
"local": config.getboolean("local", False),
"allow": config.getboolean("allow_remote_console", False),
"quiet": config.getboolean("quiet", False),
"debug": config.getboolean("debug", False),
"logfile": config.getboolean("logfile", "")
}
parser.set_defaults(**defaults)
return parser.parse_args(argv) |
def _validate_calibration_params(strategy='accuracy', min_rate=None,
beta=1.):
"""Ensure that calibration parameters have allowed values"""
if strategy not in ('accuracy', 'f_beta', 'max_tpr',
'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or '
'"max_tpr" or "max_tnr". Got "{}" instead.'
.format(strategy))
if strategy == 'max_tpr' or strategy == 'max_tnr':
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in'
'[0, 1]. '
'Got {} instead.'.format(min_rate))
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. '
'Got {} instead.'.format(type(beta))) | Ensure that calibration parameters have allowed values | Below is the the instruction that describes the task:
### Input:
Ensure that calibration parameters have allowed values
### Response:
def _validate_calibration_params(strategy='accuracy', min_rate=None,
beta=1.):
"""Ensure that calibration parameters have allowed values"""
if strategy not in ('accuracy', 'f_beta', 'max_tpr',
'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or '
'"max_tpr" or "max_tnr". Got "{}" instead.'
.format(strategy))
if strategy == 'max_tpr' or strategy == 'max_tnr':
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in'
'[0, 1]. '
'Got {} instead.'.format(min_rate))
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. '
'Got {} instead.'.format(type(beta))) |
def run(self, *args):
"""Merge unique identities using a matching algorithm."""
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code | Merge unique identities using a matching algorithm. | Below is the the instruction that describes the task:
### Input:
Merge unique identities using a matching algorithm.
### Response:
def run(self, *args):
"""Merge unique identities using a matching algorithm."""
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code |
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
timestamp = self._fsapfs_file_entry.get_access_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) | dfdatetime.DateTimeValues: access time or None if not available. | Below is the the instruction that describes the task:
### Input:
dfdatetime.DateTimeValues: access time or None if not available.
### Response:
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
timestamp = self._fsapfs_file_entry.get_access_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) |
def p_expression_0(self, program):
"""
expression : expression '*' expression
| expression '/' expression
| expression '+' expression
| expression '-' expression
| expression '^' expression
"""
program[0] = node.BinaryOp([node.BinaryOperator(program[2]),
program[1], program[3]]) | expression : expression '*' expression
| expression '/' expression
| expression '+' expression
| expression '-' expression
| expression '^' expression | Below is the the instruction that describes the task:
### Input:
expression : expression '*' expression
| expression '/' expression
| expression '+' expression
| expression '-' expression
| expression '^' expression
### Response:
def p_expression_0(self, program):
"""
expression : expression '*' expression
| expression '/' expression
| expression '+' expression
| expression '-' expression
| expression '^' expression
"""
program[0] = node.BinaryOp([node.BinaryOperator(program[2]),
program[1], program[3]]) |
def match(self, selector, index):
"""Match the selector."""
pseudo = None
m = self.re_pseudo_name.match(selector, index)
if m:
name = util.lower(css_unescape(m.group('name')))
pattern = self.patterns.get(name)
if pattern:
pseudo = pattern.match(selector, index)
if pseudo:
self.matched_name = pattern
return pseudo | Match the selector. | Below is the the instruction that describes the task:
### Input:
Match the selector.
### Response:
def match(self, selector, index):
"""Match the selector."""
pseudo = None
m = self.re_pseudo_name.match(selector, index)
if m:
name = util.lower(css_unescape(m.group('name')))
pattern = self.patterns.get(name)
if pattern:
pseudo = pattern.match(selector, index)
if pseudo:
self.matched_name = pattern
return pseudo |
def trun_emph(trun):
"""Print essential info on"""
if trun["conf"]["VERBOSE"] > 1: # Print environment variables
cij.emph("rnr:CONF {")
for cvar in sorted(trun["conf"].keys()):
cij.emph(" % 16s: %r" % (cvar, trun["conf"][cvar]))
cij.emph("}")
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:INFO {")
cij.emph(" OUTPUT: %r" % trun["conf"]["OUTPUT"])
cij.emph(" yml_fpath: %r" % yml_fpath(trun["conf"]["OUTPUT"]))
cij.emph("}") | Print essential info on | Below is the the instruction that describes the task:
### Input:
Print essential info on
### Response:
def trun_emph(trun):
"""Print essential info on"""
if trun["conf"]["VERBOSE"] > 1: # Print environment variables
cij.emph("rnr:CONF {")
for cvar in sorted(trun["conf"].keys()):
cij.emph(" % 16s: %r" % (cvar, trun["conf"][cvar]))
cij.emph("}")
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:INFO {")
cij.emph(" OUTPUT: %r" % trun["conf"]["OUTPUT"])
cij.emph(" yml_fpath: %r" % yml_fpath(trun["conf"]["OUTPUT"]))
cij.emph("}") |
def search(self, term=None, category=None, pages=1, url=search_url,
sort=None, order=None):
"""Search a given URL for torrent results."""
if not self.current_url:
self.current_url = url
if self.current_url == Search.base_url:
# Searching home page so no formatting
results = self._get_results(self.current_url)
self._add_results(results)
else:
search = self._format_search(term, category)
sorting = self._format_sort(sort, order)
# Now get the results.
for i in range(pages):
results = self._get_results(search + "/" + str(self._current_page) +
"/" + sorting)
self._add_results(results)
self._current_page += 1
self._current_page -= 1 | Search a given URL for torrent results. | Below is the the instruction that describes the task:
### Input:
Search a given URL for torrent results.
### Response:
def search(self, term=None, category=None, pages=1, url=search_url,
sort=None, order=None):
"""Search a given URL for torrent results."""
if not self.current_url:
self.current_url = url
if self.current_url == Search.base_url:
# Searching home page so no formatting
results = self._get_results(self.current_url)
self._add_results(results)
else:
search = self._format_search(term, category)
sorting = self._format_sort(sort, order)
# Now get the results.
for i in range(pages):
results = self._get_results(search + "/" + str(self._current_page) +
"/" + sorting)
self._add_results(results)
self._current_page += 1
self._current_page -= 1 |
def _set_rightMargin(self, value):
"""
value will be an int or float.
Subclasses may override this method.
"""
bounds = self.bounds
if bounds is None:
self.width = value
else:
xMin, yMin, xMax, yMax = bounds
self.width = xMax + value | value will be an int or float.
Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
value will be an int or float.
Subclasses may override this method.
### Response:
def _set_rightMargin(self, value):
"""
value will be an int or float.
Subclasses may override this method.
"""
bounds = self.bounds
if bounds is None:
self.width = value
else:
xMin, yMin, xMax, yMax = bounds
self.width = xMax + value |
def validate(self, value):
"""Validate value."""
len_ = len(value)
if self.minimum_value is not None and len_ < self.minimum_value:
tpl = "Value '{val}' length is lower than allowed minimum '{min}'."
raise ValidationError(tpl.format(
val=value, min=self.minimum_value
))
if self.maximum_value is not None and len_ > self.maximum_value:
raise ValidationError(
"Value '{val}' length is bigger than "
"allowed maximum '{max}'.".format(
val=value,
max=self.maximum_value,
)) | Validate value. | Below is the the instruction that describes the task:
### Input:
Validate value.
### Response:
def validate(self, value):
"""Validate value."""
len_ = len(value)
if self.minimum_value is not None and len_ < self.minimum_value:
tpl = "Value '{val}' length is lower than allowed minimum '{min}'."
raise ValidationError(tpl.format(
val=value, min=self.minimum_value
))
if self.maximum_value is not None and len_ > self.maximum_value:
raise ValidationError(
"Value '{val}' length is bigger than "
"allowed maximum '{max}'.".format(
val=value,
max=self.maximum_value,
)) |
def to_script(self, wf_name='wf'):
"""Generated and print the scriptcwl script for the currunt workflow.
Args:
wf_name (str): string used for the WorkflowGenerator object in the
generated script (default: ``wf``).
"""
self._closed()
script = []
# Workflow documentation
# if self.documentation:
# if is_multiline(self.documentation):
# print('doc = """')
# print(self.documentation)
# print('"""')
# print('{}.set_documentation(doc)'.format(wf_name))
# else:
# print('{}.set_documentation(\'{}\')'.format(wf_name,
# self.documentation))
# Workflow inputs
params = []
returns = []
for name, typ in self.wf_inputs.items():
params.append('{}=\'{}\''.format(name, typ))
returns.append(name)
script.append('{} = {}.add_inputs({})'.format(
', '.join(returns), wf_name, ', '.join(params)))
# Workflow steps
returns = []
for name, step in self.wf_steps.items():
pyname = step.python_name
returns = ['{}_{}'.format(pyname, o) for o in step['out']]
params = ['{}={}'.format(name, python_name(param))
for name, param in step['in'].items()]
script.append('{} = {}.{}({})'.format(
', '.join(returns), wf_name, pyname, ', '.join(params)))
# Workflow outputs
params = []
for name, details in self.wf_outputs.items():
params.append('{}={}'.format(
name, python_name(details['outputSource'])))
script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))
return '\n'.join(script) | Generated and print the scriptcwl script for the currunt workflow.
Args:
wf_name (str): string used for the WorkflowGenerator object in the
generated script (default: ``wf``). | Below is the the instruction that describes the task:
### Input:
Generated and print the scriptcwl script for the currunt workflow.
Args:
wf_name (str): string used for the WorkflowGenerator object in the
generated script (default: ``wf``).
### Response:
def to_script(self, wf_name='wf'):
"""Generated and print the scriptcwl script for the currunt workflow.
Args:
wf_name (str): string used for the WorkflowGenerator object in the
generated script (default: ``wf``).
"""
self._closed()
script = []
# Workflow documentation
# if self.documentation:
# if is_multiline(self.documentation):
# print('doc = """')
# print(self.documentation)
# print('"""')
# print('{}.set_documentation(doc)'.format(wf_name))
# else:
# print('{}.set_documentation(\'{}\')'.format(wf_name,
# self.documentation))
# Workflow inputs
params = []
returns = []
for name, typ in self.wf_inputs.items():
params.append('{}=\'{}\''.format(name, typ))
returns.append(name)
script.append('{} = {}.add_inputs({})'.format(
', '.join(returns), wf_name, ', '.join(params)))
# Workflow steps
returns = []
for name, step in self.wf_steps.items():
pyname = step.python_name
returns = ['{}_{}'.format(pyname, o) for o in step['out']]
params = ['{}={}'.format(name, python_name(param))
for name, param in step['in'].items()]
script.append('{} = {}.{}({})'.format(
', '.join(returns), wf_name, pyname, ', '.join(params)))
# Workflow outputs
params = []
for name, details in self.wf_outputs.items():
params.append('{}={}'.format(
name, python_name(details['outputSource'])))
script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))
return '\n'.join(script) |
def _db_to_python(db_data: dict, table: LdapObjectClass, dn: str) -> LdapObject:
""" Convert a DbDate object to a LdapObject. """
fields = table.get_fields()
python_data = table({
name: field.to_python(db_data[name])
for name, field in fields.items()
if field.db_field
})
python_data = python_data.merge({
'dn': dn,
})
return python_data | Convert a DbDate object to a LdapObject. | Below is the the instruction that describes the task:
### Input:
Convert a DbDate object to a LdapObject.
### Response:
def _db_to_python(db_data: dict, table: LdapObjectClass, dn: str) -> LdapObject:
""" Convert a DbDate object to a LdapObject. """
fields = table.get_fields()
python_data = table({
name: field.to_python(db_data[name])
for name, field in fields.items()
if field.db_field
})
python_data = python_data.merge({
'dn': dn,
})
return python_data |
def _classifyState(self, state):
"""
Reclassifies given state.
"""
# Record is before wait period do not classifiy
if state.ROWID < self.getParameter('trainRecords'):
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
# Add threshold classification label if above threshold, else if
# classified to add the auto threshold classification.
if state.anomalyScore >= self.getParameter('anomalyThreshold'):
labelList.append(label)
elif label in labelList:
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state) | Reclassifies given state. | Below is the the instruction that describes the task:
### Input:
Reclassifies given state.
### Response:
def _classifyState(self, state):
"""
Reclassifies given state.
"""
# Record is before wait period do not classifiy
if state.ROWID < self.getParameter('trainRecords'):
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
# Add threshold classification label if above threshold, else if
# classified to add the auto threshold classification.
if state.anomalyScore >= self.getParameter('anomalyThreshold'):
labelList.append(label)
elif label in labelList:
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state) |
def protected_branches():
# type: () -> list[str]
""" Return branches protected by deletion.
By default those are master and devel branches as configured in pelconf.
Returns:
list[str]: Names of important branches that should not be deleted.
"""
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
return conf.get('git.protected_branches', (master, develop)) | Return branches protected by deletion.
By default those are master and devel branches as configured in pelconf.
Returns:
list[str]: Names of important branches that should not be deleted. | Below is the the instruction that describes the task:
### Input:
Return branches protected by deletion.
By default those are master and devel branches as configured in pelconf.
Returns:
list[str]: Names of important branches that should not be deleted.
### Response:
def protected_branches():
# type: () -> list[str]
""" Return branches protected by deletion.
By default those are master and devel branches as configured in pelconf.
Returns:
list[str]: Names of important branches that should not be deleted.
"""
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
return conf.get('git.protected_branches', (master, develop)) |
def saddr(address):
"""Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance.
"""
if isinstance(address, six.string_types):
return address
elif isinstance(address, tuple) and len(address) >= 2 and ':' in address[0]:
return '[{}]:{}'.format(address[0], address[1])
elif isinstance(address, tuple) and len(address) >= 2:
return '{}:{}'.format(*address)
else:
raise TypeError('illegal address type: {!s}'.format(type(address))) | Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance. | Below is the the instruction that describes the task:
### Input:
Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance.
### Response:
def saddr(address):
"""Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance.
"""
if isinstance(address, six.string_types):
return address
elif isinstance(address, tuple) and len(address) >= 2 and ':' in address[0]:
return '[{}]:{}'.format(address[0], address[1])
elif isinstance(address, tuple) and len(address) >= 2:
return '{}:{}'.format(*address)
else:
raise TypeError('illegal address type: {!s}'.format(type(address))) |
def clear_lock(self, remote=None, lock_type='update'):
'''
Clear update.lk for all remotes
'''
cleared = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except TypeError:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.clear_lock(lock_type=lock_type)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | Clear update.lk for all remotes | Below is the the instruction that describes the task:
### Input:
Clear update.lk for all remotes
### Response:
def clear_lock(self, remote=None, lock_type='update'):
'''
Clear update.lk for all remotes
'''
cleared = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except TypeError:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.clear_lock(lock_type=lock_type)
cleared.extend(success)
errors.extend(failed)
return cleared, errors |
def _initial_proposal_distribution(self, parameters, theta, size,
default_std=1e-4):
"""
Generate an initial proposal distribution around the point theta.
"""
missing_parameters = set(parameters).difference(theta)
if missing_parameters:
raise ValueError("cannot create initial proposal distribution "\
"because the following parameters are missing: {}".format(
", ".join(missing_parameters)))
std = np.ones(len(parameters), dtype=float)
initial_proposal_stds \
= self._configuration.get("initial_proposal_stds", {})
p0 = np.array([theta[p] for p in parameters])
std = np.array(map(float, [initial_proposal_stds.get(p, default_std) \
for p in parameters]))
return np.vstack([p0 + std * np.random.normal(size=len(p0)) \
for i in range(size)]) | Generate an initial proposal distribution around the point theta. | Below is the the instruction that describes the task:
### Input:
Generate an initial proposal distribution around the point theta.
### Response:
def _initial_proposal_distribution(self, parameters, theta, size,
default_std=1e-4):
"""
Generate an initial proposal distribution around the point theta.
"""
missing_parameters = set(parameters).difference(theta)
if missing_parameters:
raise ValueError("cannot create initial proposal distribution "\
"because the following parameters are missing: {}".format(
", ".join(missing_parameters)))
std = np.ones(len(parameters), dtype=float)
initial_proposal_stds \
= self._configuration.get("initial_proposal_stds", {})
p0 = np.array([theta[p] for p in parameters])
std = np.array(map(float, [initial_proposal_stds.get(p, default_std) \
for p in parameters]))
return np.vstack([p0 + std * np.random.normal(size=len(p0)) \
for i in range(size)]) |
def _trj_check_version(self, version, python, force):
"""Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted.
"""
curr_python = pypetconstants.python_version_string
if (version != VERSION or curr_python != python) and not force:
raise pex.VersionMismatchError('Current pypet version is %s used under python %s '
' but your trajectory'
' was created with version %s and python %s.'
' Use >>force=True<< to perform your load regardless'
' of version mismatch.' %
(VERSION, curr_python, version, python))
elif version != VERSION or curr_python != python:
self._logger.warning('Current pypet version is %s with python %s but your trajectory'
' was created with version %s under python %s.'
' Yet, you enforced the load, so I will'
' handle the trajectory despite the'
' version mismatch.' %
(VERSION, curr_python, version, python)) | Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted. | Below is the the instruction that describes the task:
### Input:
Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted.
### Response:
def _trj_check_version(self, version, python, force):
"""Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted.
"""
curr_python = pypetconstants.python_version_string
if (version != VERSION or curr_python != python) and not force:
raise pex.VersionMismatchError('Current pypet version is %s used under python %s '
' but your trajectory'
' was created with version %s and python %s.'
' Use >>force=True<< to perform your load regardless'
' of version mismatch.' %
(VERSION, curr_python, version, python))
elif version != VERSION or curr_python != python:
self._logger.warning('Current pypet version is %s with python %s but your trajectory'
' was created with version %s under python %s.'
' Yet, you enforced the load, so I will'
' handle the trajectory despite the'
' version mismatch.' %
(VERSION, curr_python, version, python)) |
def getWindows(): #https://sjohannes.wordpress.com/2012/03/23/win32-python-getting-all-window-titles/
"""Return dict: {'window title' : window handle} for all visible windows"""
titles = {}
def foreach_window(hwnd, lparam):
if IsWindowVisible(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
titles[buff.value] = hwnd
return True
EnumWindows(EnumWindowsProc(foreach_window), 0)
return titles | Return dict: {'window title' : window handle} for all visible windows | Below is the the instruction that describes the task:
### Input:
Return dict: {'window title' : window handle} for all visible windows
### Response:
def getWindows(): #https://sjohannes.wordpress.com/2012/03/23/win32-python-getting-all-window-titles/
"""Return dict: {'window title' : window handle} for all visible windows"""
titles = {}
def foreach_window(hwnd, lparam):
if IsWindowVisible(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
titles[buff.value] = hwnd
return True
EnumWindows(EnumWindowsProc(foreach_window), 0)
return titles |
def get_project_version_by_name(self, project, version_name):
"""Get a version Resource by its name present on a project.
:param project: ID or key of the project to get versions from
:type project: str
:param version_name: name of the version to search for
:type version_name: str
:rtype: Optional[Version]
"""
versions = self.project_versions(project)
for version in versions:
if version.name == version_name:
return version | Get a version Resource by its name present on a project.
:param project: ID or key of the project to get versions from
:type project: str
:param version_name: name of the version to search for
:type version_name: str
:rtype: Optional[Version] | Below is the the instruction that describes the task:
### Input:
Get a version Resource by its name present on a project.
:param project: ID or key of the project to get versions from
:type project: str
:param version_name: name of the version to search for
:type version_name: str
:rtype: Optional[Version]
### Response:
def get_project_version_by_name(self, project, version_name):
"""Get a version Resource by its name present on a project.
:param project: ID or key of the project to get versions from
:type project: str
:param version_name: name of the version to search for
:type version_name: str
:rtype: Optional[Version]
"""
versions = self.project_versions(project)
for version in versions:
if version.name == version_name:
return version |
def update_partition_static_route(self, org_name, part_name,
static_ip_list, vrf_prof=None,
service_node_ip=None):
"""Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address
"""
if part_name is None:
part_name = self._part_name
if vrf_prof is None:
vrf_prof = self.default_vrf_profile
operation = 'PUT'
url = (self._update_part_url % (org_name, part_name))
ip_str = ''
ip_cnt = 0
for ip in static_ip_list:
ip_sub = "$n0" + str(ip_cnt) + "=" + str(ip) + ";"
ip_str = ip_str + ip_sub
ip_cnt = ip_cnt + 1
cfg_args = {
"$vrfName=" + org_name + ':' + part_name + ";"
"$include_serviceNodeIpAddress=" + service_node_ip + ";" + ip_str
}
cfg_args = ';'.join(cfg_args)
payload = {
"partitionName": part_name,
"organizationName": org_name,
"dciExtensionStatus": "Not configured",
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"configArg": cfg_args}
res = self._send_request(operation, url, payload, 'partition')
return (res is not None and res.status_code in self._resp_ok) | Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address | Below is the the instruction that describes the task:
### Input:
Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address
### Response:
def update_partition_static_route(self, org_name, part_name,
static_ip_list, vrf_prof=None,
service_node_ip=None):
"""Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address
"""
if part_name is None:
part_name = self._part_name
if vrf_prof is None:
vrf_prof = self.default_vrf_profile
operation = 'PUT'
url = (self._update_part_url % (org_name, part_name))
ip_str = ''
ip_cnt = 0
for ip in static_ip_list:
ip_sub = "$n0" + str(ip_cnt) + "=" + str(ip) + ";"
ip_str = ip_str + ip_sub
ip_cnt = ip_cnt + 1
cfg_args = {
"$vrfName=" + org_name + ':' + part_name + ";"
"$include_serviceNodeIpAddress=" + service_node_ip + ";" + ip_str
}
cfg_args = ';'.join(cfg_args)
payload = {
"partitionName": part_name,
"organizationName": org_name,
"dciExtensionStatus": "Not configured",
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"configArg": cfg_args}
res = self._send_request(operation, url, payload, 'partition')
return (res is not None and res.status_code in self._resp_ok) |
def to_ubyte_array(barray):
"""Returns a c_ubyte_array filled with the given data of a bytearray or bytes"""
bs = (ctypes.c_ubyte * len(barray))()
pack_into('%ds' % len(barray), bs, 0, barray)
return bs | Returns a c_ubyte_array filled with the given data of a bytearray or bytes | Below is the the instruction that describes the task:
### Input:
Returns a c_ubyte_array filled with the given data of a bytearray or bytes
### Response:
def to_ubyte_array(barray):
"""Returns a c_ubyte_array filled with the given data of a bytearray or bytes"""
bs = (ctypes.c_ubyte * len(barray))()
pack_into('%ds' % len(barray), bs, 0, barray)
return bs |
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic,
'Token' : token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
response = self.make_request('ConfirmSubscription', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body) | Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription. | Below is the the instruction that describes the task:
### Input:
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
### Response:
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic,
'Token' : token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
response = self.make_request('ConfirmSubscription', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body) |
async def modify_otr_status(self, modify_otr_status_request):
"""Enable or disable message history in a conversation."""
response = hangouts_pb2.ModifyOTRStatusResponse()
await self._pb_request('conversations/modifyotrstatus',
modify_otr_status_request, response)
return response | Enable or disable message history in a conversation. | Below is the the instruction that describes the task:
### Input:
Enable or disable message history in a conversation.
### Response:
async def modify_otr_status(self, modify_otr_status_request):
"""Enable or disable message history in a conversation."""
response = hangouts_pb2.ModifyOTRStatusResponse()
await self._pb_request('conversations/modifyotrstatus',
modify_otr_status_request, response)
return response |
def same_col(c):
"""Return True if all Mentions in the given candidate are from the same Col.
:param c: The candidate whose Mentions are being compared
:rtype: boolean
"""
return same_table(c) and all(
is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
) | Return True if all Mentions in the given candidate are from the same Col.
:param c: The candidate whose Mentions are being compared
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Return True if all Mentions in the given candidate are from the same Col.
:param c: The candidate whose Mentions are being compared
:rtype: boolean
### Response:
def same_col(c):
"""Return True if all Mentions in the given candidate are from the same Col.
:param c: The candidate whose Mentions are being compared
:rtype: boolean
"""
return same_table(c) and all(
is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
) |
def sendEvents(self, events):
"""Send a Tensor Event to Riemann"""
self.pressure += 1
self.sendString(self.encodeMessage(events)) | Send a Tensor Event to Riemann | Below is the the instruction that describes the task:
### Input:
Send a Tensor Event to Riemann
### Response:
def sendEvents(self, events):
"""Send a Tensor Event to Riemann"""
self.pressure += 1
self.sendString(self.encodeMessage(events)) |
def distb(self, tb=None, file=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
self.disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file) | Disassemble a traceback (default: last traceback). | Below is the the instruction that describes the task:
### Input:
Disassemble a traceback (default: last traceback).
### Response:
def distb(self, tb=None, file=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
self.disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file) |
def _baseattrs(self):
"""A dict of members expressed in literals"""
result = super()._baseattrs
result["params"] = ", ".join(self.parameters)
return result | A dict of members expressed in literals | Below is the the instruction that describes the task:
### Input:
A dict of members expressed in literals
### Response:
def _baseattrs(self):
"""A dict of members expressed in literals"""
result = super()._baseattrs
result["params"] = ", ".join(self.parameters)
return result |
def get_next_image(img_dir):
"""Get the next image in a dir."""
images, current_wall = get_image_dir(img_dir)
images.sort(key=lambda img: [int(x) if x.isdigit() else x
for x in re.split('([0-9]+)', img)])
try:
next_index = images.index(current_wall) + 1
except ValueError:
next_index = 0
try:
image = images[next_index]
except IndexError:
image = images[0]
return os.path.join(img_dir, image) | Get the next image in a dir. | Below is the the instruction that describes the task:
### Input:
Get the next image in a dir.
### Response:
def get_next_image(img_dir):
"""Get the next image in a dir."""
images, current_wall = get_image_dir(img_dir)
images.sort(key=lambda img: [int(x) if x.isdigit() else x
for x in re.split('([0-9]+)', img)])
try:
next_index = images.index(current_wall) + 1
except ValueError:
next_index = 0
try:
image = images[next_index]
except IndexError:
image = images[0]
return os.path.join(img_dir, image) |
def now_date(str=False):
"""Get the current date."""
if str:
return datetime.datetime.now().strftime("%Y-%m-%d")
return datetime.date.today() | Get the current date. | Below is the the instruction that describes the task:
### Input:
Get the current date.
### Response:
def now_date(str=False):
"""Get the current date."""
if str:
return datetime.datetime.now().strftime("%Y-%m-%d")
return datetime.date.today() |
def migrate_database(adapter):
"""Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated
"""
all_variants = adapter.get_variants()
nr_variants = all_variants.count()
nr_updated = 0
with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar:
for variant in bar:
# Do not update if the variants have the correct format
if 'chrom' in variant:
continue
nr_updated += 1
splitted_id = variant['_id'].split('_')
chrom = splitted_id[0]
start = int(splitted_id[1])
ref = splitted_id[2]
alt = splitted_id[3]
# Calculate end
end = start + (max(len(ref), len(alt)) - 1)
adapter.db.variant.find_one_and_update(
{'_id': variant['_id']},
{
'$set': {
'chrom': chrom,
'start': start,
'end': end
}
}
)
return nr_updated | Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated | Below is the the instruction that describes the task:
### Input:
Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated
### Response:
def migrate_database(adapter):
"""Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated
"""
all_variants = adapter.get_variants()
nr_variants = all_variants.count()
nr_updated = 0
with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar:
for variant in bar:
# Do not update if the variants have the correct format
if 'chrom' in variant:
continue
nr_updated += 1
splitted_id = variant['_id'].split('_')
chrom = splitted_id[0]
start = int(splitted_id[1])
ref = splitted_id[2]
alt = splitted_id[3]
# Calculate end
end = start + (max(len(ref), len(alt)) - 1)
adapter.db.variant.find_one_and_update(
{'_id': variant['_id']},
{
'$set': {
'chrom': chrom,
'start': start,
'end': end
}
}
)
return nr_updated |
def _create_content_element(self, content, data_property_value):
"""
Create a element to show the content.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
content_element = self.html_parser.create_element('span')
content_element.set_attribute(
AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT,
'true'
)
content_element.set_attribute(
AccessibleCSSImplementation.DATA_SPEAK_AS,
data_property_value
)
content_element.append_text(content)
return content_element | Create a element to show the content.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement | Below is the the instruction that describes the task:
### Input:
Create a element to show the content.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
### Response:
def _create_content_element(self, content, data_property_value):
"""
Create a element to show the content.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
content_element = self.html_parser.create_element('span')
content_element.set_attribute(
AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT,
'true'
)
content_element.set_attribute(
AccessibleCSSImplementation.DATA_SPEAK_AS,
data_property_value
)
content_element.append_text(content)
return content_element |
def stationary_distributions(self, max_iter=200, tol=1e-5):
r"""
Compute the moments of the stationary distributions of :math:`x_t` and
:math:`y_t` if possible. Computation is by iteration, starting from
the initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of :math:`x_t`
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of :math:`y_t`
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of :math:`x_t`
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of :math:`y_t`
"""
# == Initialize iteration == #
m = self.moment_sequence()
mu_x, mu_y, Sigma_x, Sigma_y = next(m)
i = 0
error = tol + 1
# == Loop until convergence or failure == #
while error > tol:
if i > max_iter:
fail_message = 'Convergence failed after {} iterations'
raise ValueError(fail_message.format(max_iter))
else:
i += 1
mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)
error_mu = np.max(np.abs(mu_x1 - mu_x))
error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))
error = max(error_mu, error_Sigma)
mu_x, Sigma_x = mu_x1, Sigma_x1
# == Prepare return values == #
mu_x_star, Sigma_x_star = mu_x, Sigma_x
mu_y_star, Sigma_y_star = mu_y1, Sigma_y1
return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star | r"""
Compute the moments of the stationary distributions of :math:`x_t` and
:math:`y_t` if possible. Computation is by iteration, starting from
the initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of :math:`x_t`
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of :math:`y_t`
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of :math:`x_t`
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of :math:`y_t` | Below is the the instruction that describes the task:
### Input:
r"""
Compute the moments of the stationary distributions of :math:`x_t` and
:math:`y_t` if possible. Computation is by iteration, starting from
the initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of :math:`x_t`
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of :math:`y_t`
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of :math:`x_t`
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of :math:`y_t`
### Response:
def stationary_distributions(self, max_iter=200, tol=1e-5):
r"""
Compute the moments of the stationary distributions of :math:`x_t` and
:math:`y_t` if possible. Computation is by iteration, starting from
the initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of :math:`x_t`
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of :math:`y_t`
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of :math:`x_t`
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of :math:`y_t`
"""
# == Initialize iteration == #
m = self.moment_sequence()
mu_x, mu_y, Sigma_x, Sigma_y = next(m)
i = 0
error = tol + 1
# == Loop until convergence or failure == #
while error > tol:
if i > max_iter:
fail_message = 'Convergence failed after {} iterations'
raise ValueError(fail_message.format(max_iter))
else:
i += 1
mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)
error_mu = np.max(np.abs(mu_x1 - mu_x))
error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))
error = max(error_mu, error_Sigma)
mu_x, Sigma_x = mu_x1, Sigma_x1
# == Prepare return values == #
mu_x_star, Sigma_x_star = mu_x, Sigma_x
mu_y_star, Sigma_y_star = mu_y1, Sigma_y1
return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star |
def is_fw_present(self, fw_id):
"""Returns if firewall index by ID is present in dictionary. """
if self.fw_id is None or self.fw_id != fw_id:
return False
else:
return True | Returns if firewall index by ID is present in dictionary. | Below is the the instruction that describes the task:
### Input:
Returns if firewall index by ID is present in dictionary.
### Response:
def is_fw_present(self, fw_id):
"""Returns if firewall index by ID is present in dictionary. """
if self.fw_id is None or self.fw_id != fw_id:
return False
else:
return True |
def _validate_pos(df):
"""Validates the returned positional object
"""
assert isinstance(df, pd.DataFrame)
assert ["seqname", "position", "strand"] == df.columns.tolist()
assert df.position.dtype == np.dtype("int64")
assert df.strand.dtype == np.dtype("O")
assert df.seqname.dtype == np.dtype("O")
return df | Validates the returned positional object | Below is the the instruction that describes the task:
### Input:
Validates the returned positional object
### Response:
def _validate_pos(df):
"""Validates the returned positional object
"""
assert isinstance(df, pd.DataFrame)
assert ["seqname", "position", "strand"] == df.columns.tolist()
assert df.position.dtype == np.dtype("int64")
assert df.strand.dtype == np.dtype("O")
assert df.seqname.dtype == np.dtype("O")
return df |
def main():
"""
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
"""
# Define Parser object and add to Toil
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Generate subparsers
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')
group = parser_run.add_mutually_exclusive_group()
parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,
help='Path to the (filled in) config file, generated with "generate-config".')
group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,
help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s".')
group.add_argument('--sample', nargs='+', action=required_length(2, 3),
help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')
# Print docstring help if no arguments provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
# Parse subparsers related to generation of config and manifest
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))
if not args.sample:
args.sample = None
require(os.path.exists(args.manifest), '{} not found and no sample provided. '
'Please run "generate-manifest"'.format(args.manifest))
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
config = argparse.Namespace(**parsed_config)
config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint
samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)
# Sanity checks
require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))
require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))
# Launch Pipeline
Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args) | Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto | Below is the the instruction that describes the task:
### Input:
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
### Response:
def main():
"""
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
"""
# Define Parser object and add to Toil
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Generate subparsers
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')
group = parser_run.add_mutually_exclusive_group()
parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,
help='Path to the (filled in) config file, generated with "generate-config".')
group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,
help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s".')
group.add_argument('--sample', nargs='+', action=required_length(2, 3),
help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')
# Print docstring help if no arguments provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
# Parse subparsers related to generation of config and manifest
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))
if not args.sample:
args.sample = None
require(os.path.exists(args.manifest), '{} not found and no sample provided. '
'Please run "generate-manifest"'.format(args.manifest))
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
config = argparse.Namespace(**parsed_config)
config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint
samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)
# Sanity checks
require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))
require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))
# Launch Pipeline
Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args) |
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
'nodes configuration should be a list or a dict,'
' got {}'.format(type(nodes)))
conf_changed = False
for node in nodes:
conf = {
'hostname': node,
'instance': None,
'nodename': node,
'port': None,
'vnodes': self._default_vnodes,
'weight': 1
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf['weight'] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ['nodename', 'vnodes', 'weight']:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
'node configuration should be a dict or an int,'
' got {}'.format(type(node_conf)))
if self._weight_fn:
conf['weight'] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get('weight') != conf['weight']:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed | Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format). | Below is the the instruction that describes the task:
### Input:
Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
### Response:
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
'nodes configuration should be a list or a dict,'
' got {}'.format(type(nodes)))
conf_changed = False
for node in nodes:
conf = {
'hostname': node,
'instance': None,
'nodename': node,
'port': None,
'vnodes': self._default_vnodes,
'weight': 1
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf['weight'] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ['nodename', 'vnodes', 'weight']:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
'node configuration should be a dict or an int,'
' got {}'.format(type(node_conf)))
if self._weight_fn:
conf['weight'] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get('weight') != conf['weight']:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed |
def fromvars(cls, dataset, batch_size, train=None, **kwargs):
"""Create a Batch directly from a number of Variables."""
batch = cls()
batch.batch_size = batch_size
batch.dataset = dataset
batch.fields = dataset.fields.keys()
for k, v in kwargs.items():
setattr(batch, k, v)
return batch | Create a Batch directly from a number of Variables. | Below is the the instruction that describes the task:
### Input:
Create a Batch directly from a number of Variables.
### Response:
def fromvars(cls, dataset, batch_size, train=None, **kwargs):
"""Create a Batch directly from a number of Variables."""
batch = cls()
batch.batch_size = batch_size
batch.dataset = dataset
batch.fields = dataset.fields.keys()
for k, v in kwargs.items():
setattr(batch, k, v)
return batch |
def per_distro_data(self):
"""
Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for distro_name, distro_data in data['by_distro'].items():
if distro_name.lower() == 'red hat enterprise linux server':
distro_name = 'RHEL'
for distro_ver, count in distro_data.items():
ver = self._shorten_version(distro_ver, num_components=1)
if distro_name.lower() == 'os x':
ver = self._shorten_version(distro_ver,
num_components=2)
k = self._compound_column_value(distro_name, ver)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret | Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict
### Response:
def per_distro_data(self):
"""
Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for distro_name, distro_data in data['by_distro'].items():
if distro_name.lower() == 'red hat enterprise linux server':
distro_name = 'RHEL'
for distro_ver, count in distro_data.items():
ver = self._shorten_version(distro_ver, num_components=1)
if distro_name.lower() == 'os x':
ver = self._shorten_version(distro_ver,
num_components=2)
k = self._compound_column_value(distro_name, ver)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret |
def _init_nodes(self, config):
""" Gathers dependency sets onto _nodes """
if not isinstance(config, dict):
raise TypeError('"config" must be a dictionary')
for (name, conf) in six.iteritems(config):
args = [] if 'args' not in conf else conf['args']
kwargs = {} if 'kwargs' not in conf else conf['kwargs']
dependencies = set()
arg_deps = self._get_dependencies_from_args(args)
kwarg_deps = self._get_dependencies_from_kwargs(kwargs)
dependencies.update(arg_deps)
dependencies.update(kwarg_deps)
self._nodes[name] = dependencies | Gathers dependency sets onto _nodes | Below is the the instruction that describes the task:
### Input:
Gathers dependency sets onto _nodes
### Response:
def _init_nodes(self, config):
""" Gathers dependency sets onto _nodes """
if not isinstance(config, dict):
raise TypeError('"config" must be a dictionary')
for (name, conf) in six.iteritems(config):
args = [] if 'args' not in conf else conf['args']
kwargs = {} if 'kwargs' not in conf else conf['kwargs']
dependencies = set()
arg_deps = self._get_dependencies_from_args(args)
kwarg_deps = self._get_dependencies_from_kwargs(kwargs)
dependencies.update(arg_deps)
dependencies.update(kwarg_deps)
self._nodes[name] = dependencies |
def guest_start(self, userid):
"""Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None
"""
action = "start guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_start(userid) | Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None | Below is the the instruction that describes the task:
### Input:
Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None
### Response:
def guest_start(self, userid):
"""Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:returns: None
"""
action = "start guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_start(userid) |
def submit_form_id(self, id_):
"""
Submit the form with given id (used to disambiguate between multiple
forms).
"""
form = ElementSelector(
world.browser,
str('id("{id}")'.format(id=id_)),
)
assert form, "Cannot find a form with ID '{}' on the page.".format(id_)
form.submit() | Submit the form with given id (used to disambiguate between multiple
forms). | Below is the the instruction that describes the task:
### Input:
Submit the form with given id (used to disambiguate between multiple
forms).
### Response:
def submit_form_id(self, id_):
"""
Submit the form with given id (used to disambiguate between multiple
forms).
"""
form = ElementSelector(
world.browser,
str('id("{id}")'.format(id=id_)),
)
assert form, "Cannot find a form with ID '{}' on the page.".format(id_)
form.submit() |
def move_detections(label, dy, dx):
"""
Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return:
"""
for k in label.keys():
if k.startswith("detection"):
detections = label[k]
for detection in detections:
detection.move_image(-dx, -dy) | Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return: | Below is the the instruction that describes the task:
### Input:
Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return:
### Response:
def move_detections(label, dy, dx):
"""
Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return:
"""
for k in label.keys():
if k.startswith("detection"):
detections = label[k]
for detection in detections:
detection.move_image(-dx, -dy) |
def console_progress_callback(current, maximum, message=None):
"""Simple console based callback implementation for tests.
:param current: Current progress.
:type current: int
:param maximum: Maximum range (point at which task is complete.
:type maximum: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict
"""
# noinspection PyChainedComparisons
if maximum > 1000 and current % 1000 != 0 and current != maximum:
return
if message is not None:
LOGGER.info(message['description'])
LOGGER.info('Task progress: %i of %i' % (current, maximum)) | Simple console based callback implementation for tests.
:param current: Current progress.
:type current: int
:param maximum: Maximum range (point at which task is complete.
:type maximum: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict | Below is the the instruction that describes the task:
### Input:
Simple console based callback implementation for tests.
:param current: Current progress.
:type current: int
:param maximum: Maximum range (point at which task is complete.
:type maximum: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict
### Response:
def console_progress_callback(current, maximum, message=None):
"""Simple console based callback implementation for tests.
:param current: Current progress.
:type current: int
:param maximum: Maximum range (point at which task is complete.
:type maximum: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict
"""
# noinspection PyChainedComparisons
if maximum > 1000 and current % 1000 != 0 and current != maximum:
return
if message is not None:
LOGGER.info(message['description'])
LOGGER.info('Task progress: %i of %i' % (current, maximum)) |
def __fetch_issue_messages(self, issue_id):
"""Get messages of an issue"""
for messages_raw in self.client.issue_collection(issue_id, "messages"):
messages = json.loads(messages_raw)
for msg in messages['entries']:
msg['owner_data'] = self.__fetch_user_data('{OWNER}', msg['owner_link'])
yield msg | Get messages of an issue | Below is the the instruction that describes the task:
### Input:
Get messages of an issue
### Response:
def __fetch_issue_messages(self, issue_id):
"""Get messages of an issue"""
for messages_raw in self.client.issue_collection(issue_id, "messages"):
messages = json.loads(messages_raw)
for msg in messages['entries']:
msg['owner_data'] = self.__fetch_user_data('{OWNER}', msg['owner_link'])
yield msg |
def approve(self, template_address, account):
"""
Approve a template already proposed. The account needs to be owner of the templateManager
contract to be able of approve the template.
:param template_address: Address of the template contract, str
:param account: account approving the template, Account
:return: bool
"""
try:
approved = self._keeper.template_manager.approve_template(template_address, account)
return approved
except ValueError as err:
template_values = self._keeper.template_manager.get_template(template_address)
if not template_values:
logger.warning(f'Approve template failed: {err}')
return False
if template_values.state == 1:
logger.warning(f'Approve template failed, this template is '
f'currently in "proposed" state.')
return False
if template_values.state == 3:
logger.warning(f'Approve template failed, this template appears to be '
f'revoked.')
return False
if template_values.state == 2:
return True
return False | Approve a template already proposed. The account needs to be owner of the templateManager
contract to be able of approve the template.
:param template_address: Address of the template contract, str
:param account: account approving the template, Account
:return: bool | Below is the the instruction that describes the task:
### Input:
Approve a template already proposed. The account needs to be owner of the templateManager
contract to be able of approve the template.
:param template_address: Address of the template contract, str
:param account: account approving the template, Account
:return: bool
### Response:
def approve(self, template_address, account):
"""
Approve a template already proposed. The account needs to be owner of the templateManager
contract to be able of approve the template.
:param template_address: Address of the template contract, str
:param account: account approving the template, Account
:return: bool
"""
try:
approved = self._keeper.template_manager.approve_template(template_address, account)
return approved
except ValueError as err:
template_values = self._keeper.template_manager.get_template(template_address)
if not template_values:
logger.warning(f'Approve template failed: {err}')
return False
if template_values.state == 1:
logger.warning(f'Approve template failed, this template is '
f'currently in "proposed" state.')
return False
if template_values.state == 3:
logger.warning(f'Approve template failed, this template appears to be '
f'revoked.')
return False
if template_values.state == 2:
return True
return False |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapItemContext for this SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext
"""
if self._context is None:
self._context = SyncMapItemContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
key=self._solution['key'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapItemContext for this SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapItemContext for this SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapItemContext for this SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext
"""
if self._context is None:
self._context = SyncMapItemContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
key=self._solution['key'],
)
return self._context |
def get_pan(self, coord='data'):
"""Get pan positions.
Parameters
----------
coord : {'data', 'wcs'}
Indicates whether the pan positions are returned in
data or WCS space.
Returns
-------
positions : tuple
X and Y positions, in that order.
"""
pan_x, pan_y = self.t_['pan'][:2]
if coord == 'wcs':
if self.t_['pan_coord'] == 'data':
image = self.get_image()
if image is not None:
try:
return image.pixtoradec(pan_x, pan_y)
except Exception as e:
pass
# <-- data already in coordinates form
return (pan_x, pan_y)
# <-- requesting data coords
if self.t_['pan_coord'] == 'data':
return (pan_x, pan_y)
image = self.get_image()
if image is not None:
try:
return image.radectopix(pan_x, pan_y)
except Exception as e:
pass
return (pan_x, pan_y) | Get pan positions.
Parameters
----------
coord : {'data', 'wcs'}
Indicates whether the pan positions are returned in
data or WCS space.
Returns
-------
positions : tuple
X and Y positions, in that order. | Below is the the instruction that describes the task:
### Input:
Get pan positions.
Parameters
----------
coord : {'data', 'wcs'}
Indicates whether the pan positions are returned in
data or WCS space.
Returns
-------
positions : tuple
X and Y positions, in that order.
### Response:
def get_pan(self, coord='data'):
"""Get pan positions.
Parameters
----------
coord : {'data', 'wcs'}
Indicates whether the pan positions are returned in
data or WCS space.
Returns
-------
positions : tuple
X and Y positions, in that order.
"""
pan_x, pan_y = self.t_['pan'][:2]
if coord == 'wcs':
if self.t_['pan_coord'] == 'data':
image = self.get_image()
if image is not None:
try:
return image.pixtoradec(pan_x, pan_y)
except Exception as e:
pass
# <-- data already in coordinates form
return (pan_x, pan_y)
# <-- requesting data coords
if self.t_['pan_coord'] == 'data':
return (pan_x, pan_y)
image = self.get_image()
if image is not None:
try:
return image.radectopix(pan_x, pan_y)
except Exception as e:
pass
return (pan_x, pan_y) |
def get_footerReference(self, type_):
"""Return footerReference element of *type_* or None if not present."""
path = "./w:footerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
footerReferences = self.xpath(path)
if not footerReferences:
return None
return footerReferences[0] | Return footerReference element of *type_* or None if not present. | Below is the the instruction that describes the task:
### Input:
Return footerReference element of *type_* or None if not present.
### Response:
def get_footerReference(self, type_):
"""Return footerReference element of *type_* or None if not present."""
path = "./w:footerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
footerReferences = self.xpath(path)
if not footerReferences:
return None
return footerReferences[0] |
def netmiko_args(optional_args):
"""Check for Netmiko arguments that were passed in as NAPALM optional arguments.
Return a dictionary of these optional args that will be passed into the Netmiko
ConnectHandler call.
"""
fields = py23_compat.argspec(BaseConnection.__init__)
args = fields[0]
defaults = fields[3]
check_self = args.pop(0)
if check_self != "self":
raise ValueError("Error processing Netmiko arguments")
netmiko_argument_map = dict(zip(args, defaults))
# Netmiko arguments that are integrated into NAPALM already
netmiko_filter = ["ip", "host", "username", "password", "device_type", "timeout"]
# Filter out all of the arguments that are integrated into NAPALM
for k in netmiko_filter:
netmiko_argument_map.pop(k)
# Check if any of these arguments were passed in as NAPALM optional_args
netmiko_optional_args = {}
for k, v in netmiko_argument_map.items():
try:
netmiko_optional_args[k] = optional_args[k]
except KeyError:
pass
# Return these arguments for use with establishing Netmiko SSH connection
return netmiko_optional_args | Check for Netmiko arguments that were passed in as NAPALM optional arguments.
Return a dictionary of these optional args that will be passed into the Netmiko
ConnectHandler call. | Below is the the instruction that describes the task:
### Input:
Check for Netmiko arguments that were passed in as NAPALM optional arguments.
Return a dictionary of these optional args that will be passed into the Netmiko
ConnectHandler call.
### Response:
def netmiko_args(optional_args):
"""Check for Netmiko arguments that were passed in as NAPALM optional arguments.
Return a dictionary of these optional args that will be passed into the Netmiko
ConnectHandler call.
"""
fields = py23_compat.argspec(BaseConnection.__init__)
args = fields[0]
defaults = fields[3]
check_self = args.pop(0)
if check_self != "self":
raise ValueError("Error processing Netmiko arguments")
netmiko_argument_map = dict(zip(args, defaults))
# Netmiko arguments that are integrated into NAPALM already
netmiko_filter = ["ip", "host", "username", "password", "device_type", "timeout"]
# Filter out all of the arguments that are integrated into NAPALM
for k in netmiko_filter:
netmiko_argument_map.pop(k)
# Check if any of these arguments were passed in as NAPALM optional_args
netmiko_optional_args = {}
for k, v in netmiko_argument_map.items():
try:
netmiko_optional_args[k] = optional_args[k]
except KeyError:
pass
# Return these arguments for use with establishing Netmiko SSH connection
return netmiko_optional_args |
def triples(self, (s, p, o), context=None):
"""
Returns all triples in the current store.
"""
named_graph = _get_named_graph(context)
query_sets = _get_query_sets_for_object(o)
filter_parameters = dict()
if named_graph is not None:
filter_parameters['context_id'] = named_graph.id
if s:
filter_parameters['subject'] = s
if p:
filter_parameters['predicate'] = p
if o:
filter_parameters['object'] = o
query_sets = [qs.filter(**filter_parameters) for qs in query_sets] # pylint: disable=W0142
for qs in query_sets:
for statement in qs:
triple = statement.as_triple()
yield triple, context | Returns all triples in the current store. | Below is the the instruction that describes the task:
### Input:
Returns all triples in the current store.
### Response:
def triples(self, (s, p, o), context=None):
"""
Returns all triples in the current store.
"""
named_graph = _get_named_graph(context)
query_sets = _get_query_sets_for_object(o)
filter_parameters = dict()
if named_graph is not None:
filter_parameters['context_id'] = named_graph.id
if s:
filter_parameters['subject'] = s
if p:
filter_parameters['predicate'] = p
if o:
filter_parameters['object'] = o
query_sets = [qs.filter(**filter_parameters) for qs in query_sets] # pylint: disable=W0142
for qs in query_sets:
for statement in qs:
triple = statement.as_triple()
yield triple, context |
def getargnames(argspecs, with_unbox=False):
"""Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args.
"""
# todo: We can maybe make use of inspect.formatargspec
args = argspecs.args
vargs = argspecs.varargs
try:
kw = argspecs.keywords
except AttributeError:
kw = argspecs.varkw
try:
kwonly = argspecs.kwonlyargs
except AttributeError:
kwonly = None
res = []
if not args is None:
res.extend(args)
if not vargs is None:
res.append('*'+vargs if with_unbox else vargs)
if not kwonly is None:
res.extend(kwonly)
if not kw is None:
res.append('**'+kw if with_unbox else kw)
return res | Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args. | Below is the the instruction that describes the task:
### Input:
Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args.
### Response:
def getargnames(argspecs, with_unbox=False):
"""Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args.
"""
# todo: We can maybe make use of inspect.formatargspec
args = argspecs.args
vargs = argspecs.varargs
try:
kw = argspecs.keywords
except AttributeError:
kw = argspecs.varkw
try:
kwonly = argspecs.kwonlyargs
except AttributeError:
kwonly = None
res = []
if not args is None:
res.extend(args)
if not vargs is None:
res.append('*'+vargs if with_unbox else vargs)
if not kwonly is None:
res.extend(kwonly)
if not kw is None:
res.append('**'+kw if with_unbox else kw)
return res |
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Fetches an object.
"""
raise NotImplementedError | Fetches an object. | Below is the the instruction that describes the task:
### Input:
Fetches an object.
### Response:
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Fetches an object.
"""
raise NotImplementedError |
def _validate(self, data):
"""Helper to run validators on the field data."""
errors = {}
# if the validator is not enabled, return the empty error dict
if not self._enabled:
return errors
for field in self.validators:
field_errors = []
for validator in self.validators[field]:
try:
validator(data.get(field, None))
except ValidationError as e:
field_errors += e.messages
# if there were errors, cast to ErrorList for output convenience
if field_errors:
errors[field] = ErrorList(field_errors)
return errors | Helper to run validators on the field data. | Below is the the instruction that describes the task:
### Input:
Helper to run validators on the field data.
### Response:
def _validate(self, data):
"""Helper to run validators on the field data."""
errors = {}
# if the validator is not enabled, return the empty error dict
if not self._enabled:
return errors
for field in self.validators:
field_errors = []
for validator in self.validators[field]:
try:
validator(data.get(field, None))
except ValidationError as e:
field_errors += e.messages
# if there were errors, cast to ErrorList for output convenience
if field_errors:
errors[field] = ErrorList(field_errors)
return errors |
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session) | Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere | Below is the the instruction that describes the task:
### Input:
Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
### Response:
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session) |
def delete_mountpoints(self, paths=None, folder_ids=None):
"""
:param folder_ids: list of ids
:param path: list of folder's paths
"""
self.delete_folders(paths=paths, folder_ids=folder_ids, f_type='link') | :param folder_ids: list of ids
:param path: list of folder's paths | Below is the the instruction that describes the task:
### Input:
:param folder_ids: list of ids
:param path: list of folder's paths
### Response:
def delete_mountpoints(self, paths=None, folder_ids=None):
"""
:param folder_ids: list of ids
:param path: list of folder's paths
"""
self.delete_folders(paths=paths, folder_ids=folder_ids, f_type='link') |
async def listen(host, port, onlink, ssl=None):
'''
Listen on the given host/port and fire onlink(Link).
Returns a server object that contains the listening sockets
'''
async def onconn(reader, writer):
link = await Link.anit(reader, writer)
link.schedCoro(onlink(link))
server = await asyncio.start_server(onconn, host=host, port=port, ssl=ssl)
return server | Listen on the given host/port and fire onlink(Link).
Returns a server object that contains the listening sockets | Below is the the instruction that describes the task:
### Input:
Listen on the given host/port and fire onlink(Link).
Returns a server object that contains the listening sockets
### Response:
async def listen(host, port, onlink, ssl=None):
'''
Listen on the given host/port and fire onlink(Link).
Returns a server object that contains the listening sockets
'''
async def onconn(reader, writer):
link = await Link.anit(reader, writer)
link.schedCoro(onlink(link))
server = await asyncio.start_server(onconn, host=host, port=port, ssl=ssl)
return server |
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
return common.POWER_TYPE_AC | Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned. | Below is the the instruction that describes the task:
### Input:
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
### Response:
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
return common.POWER_TYPE_AC |
def classify_coincident(st_vals, coincident):
r"""Determine if coincident parameters are "unused".
.. note::
This is a helper for :func:`surface_intersections`.
In the case that ``coincident`` is :data:`True`, then we'll have two
sets of parameters :math:`(s_1, t_1)` and :math:`(s_2, t_2)`.
If one of :math:`s1 < s2` or :math:`t1 < t2` is not satisfied, the
coincident segments will be moving in opposite directions, hence don't
define an interior of an intersection.
.. warning::
In the "coincident" case, this assumes, but doesn't check, that
``st_vals`` is ``2 x 2``.
Args:
st_vals (numpy.ndarray): ``2 X N`` array of intersection parameters.
coincident (bool): Flag indicating if the intersections are the
endpoints of coincident segments of two curves.
Returns:
Optional[.IntersectionClassification]: The classification of the
intersections.
"""
if not coincident:
return None
if st_vals[0, 0] >= st_vals[0, 1] or st_vals[1, 0] >= st_vals[1, 1]:
return UNUSED_T
else:
return CLASSIFICATION_T.COINCIDENT | r"""Determine if coincident parameters are "unused".
.. note::
This is a helper for :func:`surface_intersections`.
In the case that ``coincident`` is :data:`True`, then we'll have two
sets of parameters :math:`(s_1, t_1)` and :math:`(s_2, t_2)`.
If one of :math:`s1 < s2` or :math:`t1 < t2` is not satisfied, the
coincident segments will be moving in opposite directions, hence don't
define an interior of an intersection.
.. warning::
In the "coincident" case, this assumes, but doesn't check, that
``st_vals`` is ``2 x 2``.
Args:
st_vals (numpy.ndarray): ``2 X N`` array of intersection parameters.
coincident (bool): Flag indicating if the intersections are the
endpoints of coincident segments of two curves.
Returns:
Optional[.IntersectionClassification]: The classification of the
intersections. | Below is the the instruction that describes the task:
### Input:
r"""Determine if coincident parameters are "unused".
.. note::
This is a helper for :func:`surface_intersections`.
In the case that ``coincident`` is :data:`True`, then we'll have two
sets of parameters :math:`(s_1, t_1)` and :math:`(s_2, t_2)`.
If one of :math:`s1 < s2` or :math:`t1 < t2` is not satisfied, the
coincident segments will be moving in opposite directions, hence don't
define an interior of an intersection.
.. warning::
In the "coincident" case, this assumes, but doesn't check, that
``st_vals`` is ``2 x 2``.
Args:
st_vals (numpy.ndarray): ``2 X N`` array of intersection parameters.
coincident (bool): Flag indicating if the intersections are the
endpoints of coincident segments of two curves.
Returns:
Optional[.IntersectionClassification]: The classification of the
intersections.
### Response:
def classify_coincident(st_vals, coincident):
r"""Determine if coincident parameters are "unused".
.. note::
This is a helper for :func:`surface_intersections`.
In the case that ``coincident`` is :data:`True`, then we'll have two
sets of parameters :math:`(s_1, t_1)` and :math:`(s_2, t_2)`.
If one of :math:`s1 < s2` or :math:`t1 < t2` is not satisfied, the
coincident segments will be moving in opposite directions, hence don't
define an interior of an intersection.
.. warning::
In the "coincident" case, this assumes, but doesn't check, that
``st_vals`` is ``2 x 2``.
Args:
st_vals (numpy.ndarray): ``2 X N`` array of intersection parameters.
coincident (bool): Flag indicating if the intersections are the
endpoints of coincident segments of two curves.
Returns:
Optional[.IntersectionClassification]: The classification of the
intersections.
"""
if not coincident:
return None
if st_vals[0, 0] >= st_vals[0, 1] or st_vals[1, 0] >= st_vals[1, 1]:
return UNUSED_T
else:
return CLASSIFICATION_T.COINCIDENT |
def main():
"""
NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site
"""
citation='This study'
MeasRecs=[]
units='cgs'
meth="LP-HYS"
version_num=pmag.get_version()
args=sys.argv
fmt='old'
er_sample_name,er_site_name,er_location_name="","",""
inst=""
er_location_name="unknown"
er_synthetic_name=""
user=""
er_site_name=""
dir_path='.'
dm=3
if "-WD" in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-ID" in args:
ind = args.index("-ID")
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
specfile = output_dir_path+'/er_specimens.txt'
output = output_dir_path+"/agm_measurements.txt"
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-bak" in args:
meth="LP-IRM-DCD"
output = output_dir_path+"/irm_measurements.txt"
if "-new" in args: fmt='new'
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
output = output_dir_path+'/'+args[ind+1]
if '-f' in args:
ind=args.index("-f")
agm_file= input_dir_path+'/'+args[ind+1]
er_specimen_name=args[ind+1].split('.')[0]
else:
print("agm_file field is required option")
print(main.__doc__)
sys.exit()
if '-Fsp' in args:
ind=args.index("-Fsp")
specfile= output_dir_path+'/'+args[ind+1]
specnum,samp_con,Z=0,'1',1
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if specnum!=0:specnum=-specnum
if "-spn" in args:
ind=args.index("-spn")
er_specimen_name=args[ind+1]
#elif "-syn" not in args:
# print "you must specify a specimen name"
# sys.exit()
if "-syn" in args:
ind=args.index("-syn")
er_synthetic_name=args[ind+1]
er_specimen_name=""
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
sampfile = input_dir_path+'/'+args[ind+1]
Samps,file_type=pmag.magic_read(sampfile)
print('sample_file successfully read in')
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="7"
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-u" in args:
ind=args.index("-u")
units=args[ind+1]
dm = pmag.get_named_arg("-DM", 2)
ErSpecRecs,filetype=pmag.magic_read(specfile)
ErSpecRec,MeasRec={},{}
ErSpecRec['er_citation_names']="This study"
ErSpecRec['er_specimen_name']=er_specimen_name
ErSpecRec['er_synthetic_name']=er_synthetic_name
if specnum!=0:
ErSpecRec["er_sample_name"]=er_specimen_name[:specnum]
else:
ErSpecRec["er_sample_name"]=er_specimen_name
if "-fsa" in args and er_synthetic_name=="":
for samp in Samps:
if samp["er_sample_name"] == ErSpecRec["er_sample_name"]:
ErSpecRec["er_location_name"]=samp["er_location_name"]
ErSpecRec["er_site_name"]=samp["er_site_name"]
break
elif int(samp_con)!=6 and int(samp_con)!=8:
site=pmag.parse_site(ErSpecRec['er_sample_name'],samp_con,Z)
ErSpecRec["er_site_name"]=site
ErSpecRec["er_location_name"]=er_location_name
ErSpecRec['er_scientist_mail_names']=user.strip()
insert=1
for rec in ErSpecRecs:
if rec['er_specimen_name']==er_specimen_name:
insert=0
break
if insert==1:
ErSpecRecs.append(ErSpecRec)
ErSpecRecs,keylist=pmag.fillkeys(ErSpecRecs)
pmag.magic_write(specfile,ErSpecRecs,'er_specimens')
print("specimen name put in ",specfile)
f=open(agm_file,'r')
Data=f.readlines()
if "ASCII" not in Data[0]:fmt='new'
measnum,start=1,""
if fmt=='new': # new Micromag formatted file
end=2
for skip in range(len(Data)):
line=Data[skip]
rec=line.split()
if 'Units' in line:units=rec[-1]
if "Raw" in rec:
start=skip+2
if "Field" in rec and "Moment" in rec and start=="":
start=skip+2
break
else:
start = 2
end=1
for i in range(start,len(Data)-end): # skip header stuff
MeasRec={}
for key in list(ErSpecRec.keys()):
MeasRec[key]=ErSpecRec[key]
MeasRec['magic_instrument_codes']=inst
MeasRec['magic_method_codes']=meth
if 'er_synthetic_name' in list(MeasRec.keys()) and MeasRec['er_synthetic_name']!="":
MeasRec['magic_experiment_name']=er_synthetic_name+':'+meth
else:
MeasRec['magic_experiment_name']=er_specimen_name+':'+meth
line=Data[i]
rec=line.split(',') # data comma delimited
if rec[0]!='\n':
if units=='cgs':
field =float(rec[0])*1e-4 # convert from oe to tesla
else:
field =float(rec[0]) # field in tesla
if meth=="LP-HYS":
MeasRec['measurement_lab_field_dc']='%10.3e'%(field)
MeasRec['treatment_dc_field']=''
else:
MeasRec['measurement_lab_field_dc']=''
MeasRec['treatment_dc_field']='%10.3e'%(field)
if units=='cgs':
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])*1e-3) # convert from emu to Am^2
else:
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])) # Am^2
MeasRec['treatment_temp']='273' # temp in kelvin
MeasRec['measurement_temp']='273' # temp in kelvin
MeasRec['measurement_flag']='g'
MeasRec['measurement_standard']='u'
MeasRec['measurement_number']='%i'%(measnum)
measnum+=1
MeasRec['magic_software_packages']=version_num
MeasRecs.append(MeasRec)
# now we have to relabel LP-HYS method codes. initial loop is LP-IMT, minor loops are LP-M - do this in measurements_methods function
if meth=='LP-HYS':
recnum=0
while float(MeasRecs[recnum]['measurement_lab_field_dc'])<float(MeasRecs[recnum+1]['measurement_lab_field_dc']) and recnum+1<len(MeasRecs): # this is LP-IMAG
MeasRecs[recnum]['magic_method_codes']='LP-IMAG'
MeasRecs[recnum]['magic_experiment_name']=MeasRecs[recnum]['er_specimen_name']+":"+'LP-IMAG'
recnum+=1
#
if int(dm)==2:
pmag.magic_write(output,MeasRecs,'magic_measurements')
else:
print ('MagIC 3 is not supported yet')
sys.exit()
pmag.magic_write(output,MeasRecs,'measurements')
print("results put in ", output) | NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site | Below is the the instruction that describes the task:
### Input:
NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site
### Response:
def main():
"""
NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site
"""
citation='This study'
MeasRecs=[]
units='cgs'
meth="LP-HYS"
version_num=pmag.get_version()
args=sys.argv
fmt='old'
er_sample_name,er_site_name,er_location_name="","",""
inst=""
er_location_name="unknown"
er_synthetic_name=""
user=""
er_site_name=""
dir_path='.'
dm=3
if "-WD" in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-ID" in args:
ind = args.index("-ID")
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
specfile = output_dir_path+'/er_specimens.txt'
output = output_dir_path+"/agm_measurements.txt"
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-bak" in args:
meth="LP-IRM-DCD"
output = output_dir_path+"/irm_measurements.txt"
if "-new" in args: fmt='new'
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
output = output_dir_path+'/'+args[ind+1]
if '-f' in args:
ind=args.index("-f")
agm_file= input_dir_path+'/'+args[ind+1]
er_specimen_name=args[ind+1].split('.')[0]
else:
print("agm_file field is required option")
print(main.__doc__)
sys.exit()
if '-Fsp' in args:
ind=args.index("-Fsp")
specfile= output_dir_path+'/'+args[ind+1]
specnum,samp_con,Z=0,'1',1
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if specnum!=0:specnum=-specnum
if "-spn" in args:
ind=args.index("-spn")
er_specimen_name=args[ind+1]
#elif "-syn" not in args:
# print "you must specify a specimen name"
# sys.exit()
if "-syn" in args:
ind=args.index("-syn")
er_synthetic_name=args[ind+1]
er_specimen_name=""
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
sampfile = input_dir_path+'/'+args[ind+1]
Samps,file_type=pmag.magic_read(sampfile)
print('sample_file successfully read in')
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="7"
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-u" in args:
ind=args.index("-u")
units=args[ind+1]
dm = pmag.get_named_arg("-DM", 2)
ErSpecRecs,filetype=pmag.magic_read(specfile)
ErSpecRec,MeasRec={},{}
ErSpecRec['er_citation_names']="This study"
ErSpecRec['er_specimen_name']=er_specimen_name
ErSpecRec['er_synthetic_name']=er_synthetic_name
if specnum!=0:
ErSpecRec["er_sample_name"]=er_specimen_name[:specnum]
else:
ErSpecRec["er_sample_name"]=er_specimen_name
if "-fsa" in args and er_synthetic_name=="":
for samp in Samps:
if samp["er_sample_name"] == ErSpecRec["er_sample_name"]:
ErSpecRec["er_location_name"]=samp["er_location_name"]
ErSpecRec["er_site_name"]=samp["er_site_name"]
break
elif int(samp_con)!=6 and int(samp_con)!=8:
site=pmag.parse_site(ErSpecRec['er_sample_name'],samp_con,Z)
ErSpecRec["er_site_name"]=site
ErSpecRec["er_location_name"]=er_location_name
ErSpecRec['er_scientist_mail_names']=user.strip()
insert=1
for rec in ErSpecRecs:
if rec['er_specimen_name']==er_specimen_name:
insert=0
break
if insert==1:
ErSpecRecs.append(ErSpecRec)
ErSpecRecs,keylist=pmag.fillkeys(ErSpecRecs)
pmag.magic_write(specfile,ErSpecRecs,'er_specimens')
print("specimen name put in ",specfile)
f=open(agm_file,'r')
Data=f.readlines()
if "ASCII" not in Data[0]:fmt='new'
measnum,start=1,""
if fmt=='new': # new Micromag formatted file
end=2
for skip in range(len(Data)):
line=Data[skip]
rec=line.split()
if 'Units' in line:units=rec[-1]
if "Raw" in rec:
start=skip+2
if "Field" in rec and "Moment" in rec and start=="":
start=skip+2
break
else:
start = 2
end=1
for i in range(start,len(Data)-end): # skip header stuff
MeasRec={}
for key in list(ErSpecRec.keys()):
MeasRec[key]=ErSpecRec[key]
MeasRec['magic_instrument_codes']=inst
MeasRec['magic_method_codes']=meth
if 'er_synthetic_name' in list(MeasRec.keys()) and MeasRec['er_synthetic_name']!="":
MeasRec['magic_experiment_name']=er_synthetic_name+':'+meth
else:
MeasRec['magic_experiment_name']=er_specimen_name+':'+meth
line=Data[i]
rec=line.split(',') # data comma delimited
if rec[0]!='\n':
if units=='cgs':
field =float(rec[0])*1e-4 # convert from oe to tesla
else:
field =float(rec[0]) # field in tesla
if meth=="LP-HYS":
MeasRec['measurement_lab_field_dc']='%10.3e'%(field)
MeasRec['treatment_dc_field']=''
else:
MeasRec['measurement_lab_field_dc']=''
MeasRec['treatment_dc_field']='%10.3e'%(field)
if units=='cgs':
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])*1e-3) # convert from emu to Am^2
else:
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])) # Am^2
MeasRec['treatment_temp']='273' # temp in kelvin
MeasRec['measurement_temp']='273' # temp in kelvin
MeasRec['measurement_flag']='g'
MeasRec['measurement_standard']='u'
MeasRec['measurement_number']='%i'%(measnum)
measnum+=1
MeasRec['magic_software_packages']=version_num
MeasRecs.append(MeasRec)
# now we have to relabel LP-HYS method codes. initial loop is LP-IMT, minor loops are LP-M - do this in measurements_methods function
if meth=='LP-HYS':
recnum=0
while float(MeasRecs[recnum]['measurement_lab_field_dc'])<float(MeasRecs[recnum+1]['measurement_lab_field_dc']) and recnum+1<len(MeasRecs): # this is LP-IMAG
MeasRecs[recnum]['magic_method_codes']='LP-IMAG'
MeasRecs[recnum]['magic_experiment_name']=MeasRecs[recnum]['er_specimen_name']+":"+'LP-IMAG'
recnum+=1
#
if int(dm)==2:
pmag.magic_write(output,MeasRecs,'magic_measurements')
else:
print ('MagIC 3 is not supported yet')
sys.exit()
pmag.magic_write(output,MeasRecs,'measurements')
print("results put in ", output) |
def add_missing_price_information_message(request, item):
"""
Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course.
"""
messages.warning(
request,
_(
'{strong_start}We could not gather price information for {em_start}{item}{em_end}.{strong_end} '
'{span_start}If you continue to have these issues, please contact '
'{link_start}{platform_name} support{link_end}.{span_end}'
).format(
item=item,
em_start='<em>',
em_end='</em>',
link_start='<a href="{support_link}" target="_blank">'.format(
support_link=get_configuration_value('ENTERPRISE_SUPPORT_URL', settings.ENTERPRISE_SUPPORT_URL),
),
platform_name=get_configuration_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_end='</a>',
span_start='<span>',
span_end='</span>',
strong_start='<strong>',
strong_end='</strong>',
)
) | Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course. | Below is the the instruction that describes the task:
### Input:
Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course.
### Response:
def add_missing_price_information_message(request, item):
"""
Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course.
"""
messages.warning(
request,
_(
'{strong_start}We could not gather price information for {em_start}{item}{em_end}.{strong_end} '
'{span_start}If you continue to have these issues, please contact '
'{link_start}{platform_name} support{link_end}.{span_end}'
).format(
item=item,
em_start='<em>',
em_end='</em>',
link_start='<a href="{support_link}" target="_blank">'.format(
support_link=get_configuration_value('ENTERPRISE_SUPPORT_URL', settings.ENTERPRISE_SUPPORT_URL),
),
platform_name=get_configuration_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_end='</a>',
span_start='<span>',
span_end='</span>',
strong_start='<strong>',
strong_end='</strong>',
)
) |
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count},
namespace='/test') | Example of how to send server generated events to clients. | Below is the the instruction that describes the task:
### Input:
Example of how to send server generated events to clients.
### Response:
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count},
namespace='/test') |
def responsive_sleep(self, seconds, wait_reason=''):
"""Sleep for the specified number of seconds, logging every
'wait_log_interval' seconds with progress info."""
for x in range(int(seconds)):
if (self.config.wait_log_interval and
not x % self.config.wait_log_interval):
print('%s: %dsec of %dsec' % (wait_reason,
x,
seconds))
time.sleep(1.0) | Sleep for the specified number of seconds, logging every
'wait_log_interval' seconds with progress info. | Below is the the instruction that describes the task:
### Input:
Sleep for the specified number of seconds, logging every
'wait_log_interval' seconds with progress info.
### Response:
def responsive_sleep(self, seconds, wait_reason=''):
"""Sleep for the specified number of seconds, logging every
'wait_log_interval' seconds with progress info."""
for x in range(int(seconds)):
if (self.config.wait_log_interval and
not x % self.config.wait_log_interval):
print('%s: %dsec of %dsec' % (wait_reason,
x,
seconds))
time.sleep(1.0) |
def login_create(login, new_login_password=None, new_login_domain='', new_login_roles=None, new_login_options=None, **kwargs):
'''
Creates a new login. Does not update password of existing logins. For
Windows authentication, provide ``new_login_domain``. For SQL Server
authentication, prvide ``new_login_password``. Since hashed passwords are
*varbinary* values, if the ``new_login_password`` is 'int / long', it will
be considered to be HASHED.
new_login_roles
a list of SERVER roles
new_login_options
a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.login_create LOGIN_NAME database=DBNAME [new_login_password=PASSWORD]
'''
# One and only one of password and domain should be specifies
if bool(new_login_password) == bool(new_login_domain):
return False
if login_exists(login, new_login_domain, **kwargs):
return False
if new_login_domain:
login = '{0}\\{1}'.format(new_login_domain, login)
if not new_login_roles:
new_login_roles = []
if not new_login_options:
new_login_options = []
sql = "CREATE LOGIN [{0}] ".format(login)
if new_login_domain:
sql += " FROM WINDOWS "
elif isinstance(new_login_password, six.integer_types):
new_login_options.insert(0, "PASSWORD=0x{0:x} HASHED".format(new_login_password))
else: # Plain test password
new_login_options.insert(0, "PASSWORD=N'{0}'".format(new_login_password))
if new_login_options:
sql += ' WITH ' + ', '.join(new_login_options)
conn = None
try:
conn = _get_connection(**kwargs)
conn.autocommit(True)
# cur = conn.cursor()
# cur.execute(sql)
conn.cursor().execute(sql)
for role in new_login_roles:
conn.cursor().execute('ALTER SERVER ROLE [{0}] ADD MEMBER [{1}]'.format(role, login))
except Exception as e:
return 'Could not create the login: {0}'.format(e)
finally:
if conn:
conn.autocommit(False)
conn.close()
return True | Creates a new login. Does not update password of existing logins. For
Windows authentication, provide ``new_login_domain``. For SQL Server
authentication, prvide ``new_login_password``. Since hashed passwords are
*varbinary* values, if the ``new_login_password`` is 'int / long', it will
be considered to be HASHED.
new_login_roles
a list of SERVER roles
new_login_options
a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.login_create LOGIN_NAME database=DBNAME [new_login_password=PASSWORD] | Below is the the instruction that describes the task:
### Input:
Creates a new login. Does not update password of existing logins. For
Windows authentication, provide ``new_login_domain``. For SQL Server
authentication, prvide ``new_login_password``. Since hashed passwords are
*varbinary* values, if the ``new_login_password`` is 'int / long', it will
be considered to be HASHED.
new_login_roles
a list of SERVER roles
new_login_options
a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.login_create LOGIN_NAME database=DBNAME [new_login_password=PASSWORD]
### Response:
def login_create(login, new_login_password=None, new_login_domain='', new_login_roles=None, new_login_options=None, **kwargs):
'''
Creates a new login. Does not update password of existing logins. For
Windows authentication, provide ``new_login_domain``. For SQL Server
authentication, prvide ``new_login_password``. Since hashed passwords are
*varbinary* values, if the ``new_login_password`` is 'int / long', it will
be considered to be HASHED.
new_login_roles
a list of SERVER roles
new_login_options
a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.login_create LOGIN_NAME database=DBNAME [new_login_password=PASSWORD]
'''
# One and only one of password and domain should be specifies
if bool(new_login_password) == bool(new_login_domain):
return False
if login_exists(login, new_login_domain, **kwargs):
return False
if new_login_domain:
login = '{0}\\{1}'.format(new_login_domain, login)
if not new_login_roles:
new_login_roles = []
if not new_login_options:
new_login_options = []
sql = "CREATE LOGIN [{0}] ".format(login)
if new_login_domain:
sql += " FROM WINDOWS "
elif isinstance(new_login_password, six.integer_types):
new_login_options.insert(0, "PASSWORD=0x{0:x} HASHED".format(new_login_password))
else: # Plain test password
new_login_options.insert(0, "PASSWORD=N'{0}'".format(new_login_password))
if new_login_options:
sql += ' WITH ' + ', '.join(new_login_options)
conn = None
try:
conn = _get_connection(**kwargs)
conn.autocommit(True)
# cur = conn.cursor()
# cur.execute(sql)
conn.cursor().execute(sql)
for role in new_login_roles:
conn.cursor().execute('ALTER SERVER ROLE [{0}] ADD MEMBER [{1}]'.format(role, login))
except Exception as e:
return 'Could not create the login: {0}'.format(e)
finally:
if conn:
conn.autocommit(False)
conn.close()
return True |
def _item_keys_match(crypto_config, item1, item2):
# type: (CryptoConfig, Dict, Dict) -> Bool
"""Determines whether the values in the primary and sort keys (if they exist) are the same
:param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items
:param dict item1: The first item to compare
:param dict item2: The second item to compare
:return: Bool response, True if the key attributes match
:rtype: bool
"""
partition_key_name = crypto_config.encryption_context.partition_key_name
sort_key_name = crypto_config.encryption_context.sort_key_name
partition_keys_match = item1[partition_key_name] == item2[partition_key_name]
if sort_key_name is None:
return partition_keys_match
return partition_keys_match and item1[sort_key_name] == item2[sort_key_name] | Determines whether the values in the primary and sort keys (if they exist) are the same
:param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items
:param dict item1: The first item to compare
:param dict item2: The second item to compare
:return: Bool response, True if the key attributes match
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Determines whether the values in the primary and sort keys (if they exist) are the same
:param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items
:param dict item1: The first item to compare
:param dict item2: The second item to compare
:return: Bool response, True if the key attributes match
:rtype: bool
### Response:
def _item_keys_match(crypto_config, item1, item2):
# type: (CryptoConfig, Dict, Dict) -> Bool
"""Determines whether the values in the primary and sort keys (if they exist) are the same
:param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items
:param dict item1: The first item to compare
:param dict item2: The second item to compare
:return: Bool response, True if the key attributes match
:rtype: bool
"""
partition_key_name = crypto_config.encryption_context.partition_key_name
sort_key_name = crypto_config.encryption_context.sort_key_name
partition_keys_match = item1[partition_key_name] == item2[partition_key_name]
if sort_key_name is None:
return partition_keys_match
return partition_keys_match and item1[sort_key_name] == item2[sort_key_name] |
def get_summary(config, bugnumber):
"""return a summary for this bug/issue. If it can't be found,
return None."""
bugzilla_url_regex = re.compile(
re.escape("https://bugzilla.mozilla.org/show_bug.cgi?id=") + r"(\d+)$"
)
# The user could have pasted in a bugzilla ID or a bugzilla URL
if bugzilla_url_regex.search(bugnumber.split("#")[0]):
# that's easy then!
bugzilla_id, = bugzilla_url_regex.search(bugnumber.split("#")[0]).groups()
bugzilla_id = int(bugzilla_id)
summary, url = bugzilla.get_summary(config, bugzilla_id)
return summary, bugzilla_id, url
# The user could have pasted in a GitHub issue URL
github_url_regex = re.compile(r"https://github.com/([^/]+)/([^/]+)/issues/(\d+)")
if github_url_regex.search(bugnumber.split("#")[0]):
# that's also easy
org, repo, id_, = github_url_regex.search(bugnumber.split("#")[0]).groups()
id_ = int(id_)
title, url = github.get_title(config, org, repo, id_)
return title, id_, url
# If it's a number it can be either a github issue or a bugzilla bug
if bugnumber.isdigit():
# try both and see if one of them turns up something interesting
repo = config.repo
state = read(config.configfile)
fork_name = state.get("FORK_NAME", getpass.getuser())
if config.verbose:
info_out("Using fork name: {}".format(fork_name))
candidates = []
# Looping over the remotes, let's figure out which one
# is the one that has issues. Let's try every one that isn't
# your fork remote.
for origin in repo.remotes:
if origin.name == fork_name:
continue
url = origin.url
org, repo = parse_remote_url(origin.url)
github_title, github_url = github.get_title(
config, org, repo, int(bugnumber)
)
if github_title:
candidates.append((github_title, int(bugnumber), github_url))
bugzilla_summary, bugzilla_url = bugzilla.get_summary(config, bugnumber)
if bugzilla_summary:
candidates.append((bugzilla_summary, int(bugnumber), bugzilla_url))
if len(candidates) > 1:
info_out(
"Input is ambiguous. Multiple possibilities found. "
"Please re-run with the full URL:"
)
for title, _, url in candidates:
info_out("\t{}".format(url))
info_out("\t{}\n".format(title))
error_out("Awaiting your choice")
elif len(candidates) == 1:
return candidates[0]
else:
error_out("ID could not be found on GitHub or Bugzilla")
raise Exception(bugnumber)
return bugnumber, None, None | return a summary for this bug/issue. If it can't be found,
return None. | Below is the the instruction that describes the task:
### Input:
return a summary for this bug/issue. If it can't be found,
return None.
### Response:
def get_summary(config, bugnumber):
"""return a summary for this bug/issue. If it can't be found,
return None."""
bugzilla_url_regex = re.compile(
re.escape("https://bugzilla.mozilla.org/show_bug.cgi?id=") + r"(\d+)$"
)
# The user could have pasted in a bugzilla ID or a bugzilla URL
if bugzilla_url_regex.search(bugnumber.split("#")[0]):
# that's easy then!
bugzilla_id, = bugzilla_url_regex.search(bugnumber.split("#")[0]).groups()
bugzilla_id = int(bugzilla_id)
summary, url = bugzilla.get_summary(config, bugzilla_id)
return summary, bugzilla_id, url
# The user could have pasted in a GitHub issue URL
github_url_regex = re.compile(r"https://github.com/([^/]+)/([^/]+)/issues/(\d+)")
if github_url_regex.search(bugnumber.split("#")[0]):
# that's also easy
org, repo, id_, = github_url_regex.search(bugnumber.split("#")[0]).groups()
id_ = int(id_)
title, url = github.get_title(config, org, repo, id_)
return title, id_, url
# If it's a number it can be either a github issue or a bugzilla bug
if bugnumber.isdigit():
# try both and see if one of them turns up something interesting
repo = config.repo
state = read(config.configfile)
fork_name = state.get("FORK_NAME", getpass.getuser())
if config.verbose:
info_out("Using fork name: {}".format(fork_name))
candidates = []
# Looping over the remotes, let's figure out which one
# is the one that has issues. Let's try every one that isn't
# your fork remote.
for origin in repo.remotes:
if origin.name == fork_name:
continue
url = origin.url
org, repo = parse_remote_url(origin.url)
github_title, github_url = github.get_title(
config, org, repo, int(bugnumber)
)
if github_title:
candidates.append((github_title, int(bugnumber), github_url))
bugzilla_summary, bugzilla_url = bugzilla.get_summary(config, bugnumber)
if bugzilla_summary:
candidates.append((bugzilla_summary, int(bugnumber), bugzilla_url))
if len(candidates) > 1:
info_out(
"Input is ambiguous. Multiple possibilities found. "
"Please re-run with the full URL:"
)
for title, _, url in candidates:
info_out("\t{}".format(url))
info_out("\t{}\n".format(title))
error_out("Awaiting your choice")
elif len(candidates) == 1:
return candidates[0]
else:
error_out("ID could not be found on GitHub or Bugzilla")
raise Exception(bugnumber)
return bugnumber, None, None |
def dehydrate_point(value):
""" Dehydrator for Point data.
:param value:
:type value: Point
:return:
"""
dim = len(value)
if dim == 2:
return Structure(b"X", value.srid, *value)
elif dim == 3:
return Structure(b"Y", value.srid, *value)
else:
raise ValueError("Cannot dehydrate Point with %d dimensions" % dim) | Dehydrator for Point data.
:param value:
:type value: Point
:return: | Below is the the instruction that describes the task:
### Input:
Dehydrator for Point data.
:param value:
:type value: Point
:return:
### Response:
def dehydrate_point(value):
""" Dehydrator for Point data.
:param value:
:type value: Point
:return:
"""
dim = len(value)
if dim == 2:
return Structure(b"X", value.srid, *value)
elif dim == 3:
return Structure(b"Y", value.srid, *value)
else:
raise ValueError("Cannot dehydrate Point with %d dimensions" % dim) |
def p_InDecrement(p):
'''
InDecrement : INDECREMENT Expression
| Expression INDECREMENT
'''
from .helper import isString
if isString(p[1]):
p[0] = InDecrement(p[1], p[2], False)
else:
p[0] = InDecrement(p[2], p[1], True) | InDecrement : INDECREMENT Expression
| Expression INDECREMENT | Below is the the instruction that describes the task:
### Input:
InDecrement : INDECREMENT Expression
| Expression INDECREMENT
### Response:
def p_InDecrement(p):
'''
InDecrement : INDECREMENT Expression
| Expression INDECREMENT
'''
from .helper import isString
if isString(p[1]):
p[0] = InDecrement(p[1], p[2], False)
else:
p[0] = InDecrement(p[2], p[1], True) |
def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index)) | Open build target's index.html in a browser (using 'open'). | Below is the the instruction that describes the task:
### Input:
Open build target's index.html in a browser (using 'open').
### Response:
def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index)) |
def summary(self, title=None, complexity=False):
"""
Generates a summary.
:param title: optional title
:type title: str
:param complexity: whether to print the complexity information as well
:type complexity: bool
:return: the summary
:rtype: str
"""
if title is None:
return javabridge.call(
self.jobject, "toSummaryString", "()Ljava/lang/String;")
else:
return javabridge.call(
self.jobject, "toSummaryString", "(Ljava/lang/String;Z)Ljava/lang/String;", title, complexity) | Generates a summary.
:param title: optional title
:type title: str
:param complexity: whether to print the complexity information as well
:type complexity: bool
:return: the summary
:rtype: str | Below is the the instruction that describes the task:
### Input:
Generates a summary.
:param title: optional title
:type title: str
:param complexity: whether to print the complexity information as well
:type complexity: bool
:return: the summary
:rtype: str
### Response:
def summary(self, title=None, complexity=False):
"""
Generates a summary.
:param title: optional title
:type title: str
:param complexity: whether to print the complexity information as well
:type complexity: bool
:return: the summary
:rtype: str
"""
if title is None:
return javabridge.call(
self.jobject, "toSummaryString", "()Ljava/lang/String;")
else:
return javabridge.call(
self.jobject, "toSummaryString", "(Ljava/lang/String;Z)Ljava/lang/String;", title, complexity) |
def extract_iface_name_from_path(path, name):
"""
Extract the 'real' interface name from the path name. Basically this
puts the '@' back in the name in place of the underscore, where the name
contains a '.' or contains 'macvtap' or 'macvlan'.
Examples:
+------------------+-----------------+
| real name | path name |
+==================+=================+
| bond0.104\@bond0 | bond0.104_bond0 |
+------------------+-----------------+
| __tmp1111 | __tmp1111 |
+------------------+-----------------+
| macvtap\@bond0 | macvlan_bond0 |
+------------------+-----------------+
| prod_bond | prod_bond |
+------------------+-----------------+
"""
if name in path:
ifname = os.path.basename(path).split("_", 2)[-1].strip()
if "." in ifname or "macvtap" in ifname or "macvlan" in ifname:
ifname = ifname.replace("_", "@")
return ifname | Extract the 'real' interface name from the path name. Basically this
puts the '@' back in the name in place of the underscore, where the name
contains a '.' or contains 'macvtap' or 'macvlan'.
Examples:
+------------------+-----------------+
| real name | path name |
+==================+=================+
| bond0.104\@bond0 | bond0.104_bond0 |
+------------------+-----------------+
| __tmp1111 | __tmp1111 |
+------------------+-----------------+
| macvtap\@bond0 | macvlan_bond0 |
+------------------+-----------------+
| prod_bond | prod_bond |
+------------------+-----------------+ | Below is the the instruction that describes the task:
### Input:
Extract the 'real' interface name from the path name. Basically this
puts the '@' back in the name in place of the underscore, where the name
contains a '.' or contains 'macvtap' or 'macvlan'.
Examples:
+------------------+-----------------+
| real name | path name |
+==================+=================+
| bond0.104\@bond0 | bond0.104_bond0 |
+------------------+-----------------+
| __tmp1111 | __tmp1111 |
+------------------+-----------------+
| macvtap\@bond0 | macvlan_bond0 |
+------------------+-----------------+
| prod_bond | prod_bond |
+------------------+-----------------+
### Response:
def extract_iface_name_from_path(path, name):
"""
Extract the 'real' interface name from the path name. Basically this
puts the '@' back in the name in place of the underscore, where the name
contains a '.' or contains 'macvtap' or 'macvlan'.
Examples:
+------------------+-----------------+
| real name | path name |
+==================+=================+
| bond0.104\@bond0 | bond0.104_bond0 |
+------------------+-----------------+
| __tmp1111 | __tmp1111 |
+------------------+-----------------+
| macvtap\@bond0 | macvlan_bond0 |
+------------------+-----------------+
| prod_bond | prod_bond |
+------------------+-----------------+
"""
if name in path:
ifname = os.path.basename(path).split("_", 2)[-1].strip()
if "." in ifname or "macvtap" in ifname or "macvlan" in ifname:
ifname = ifname.replace("_", "@")
return ifname |
def start(self):
"""Start the subscriber.
"""
def _get_addr_loop(service, timeout):
"""Try to get the address of *service* until for *timeout* seconds.
"""
then = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < then:
addrs = get_pub_address(service, nameserver=self._nameserver)
if addrs:
return [addr["URI"] for addr in addrs]
time.sleep(1)
return []
# Subscribe to those services and topics.
LOGGER.debug("Subscribing to topics %s", str(self._topics))
self._subscriber = Subscriber(self._addresses,
self._topics,
translate=self._translate)
if self._addr_listener:
self._addr_listener = _AddressListener(self._subscriber,
self._services,
nameserver=self._nameserver)
# Search for addresses corresponding to service.
for service in self._services:
addresses = _get_addr_loop(service, self._timeout)
if not addresses:
LOGGER.warning("Can't get any address for %s", service)
continue
else:
LOGGER.debug("Got address for %s: %s",
str(service), str(addresses))
for addr in addresses:
self._subscriber.add(addr)
return self._subscriber | Start the subscriber. | Below is the the instruction that describes the task:
### Input:
Start the subscriber.
### Response:
def start(self):
"""Start the subscriber.
"""
def _get_addr_loop(service, timeout):
"""Try to get the address of *service* until for *timeout* seconds.
"""
then = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < then:
addrs = get_pub_address(service, nameserver=self._nameserver)
if addrs:
return [addr["URI"] for addr in addrs]
time.sleep(1)
return []
# Subscribe to those services and topics.
LOGGER.debug("Subscribing to topics %s", str(self._topics))
self._subscriber = Subscriber(self._addresses,
self._topics,
translate=self._translate)
if self._addr_listener:
self._addr_listener = _AddressListener(self._subscriber,
self._services,
nameserver=self._nameserver)
# Search for addresses corresponding to service.
for service in self._services:
addresses = _get_addr_loop(service, self._timeout)
if not addresses:
LOGGER.warning("Can't get any address for %s", service)
continue
else:
LOGGER.debug("Got address for %s: %s",
str(service), str(addresses))
for addr in addresses:
self._subscriber.add(addr)
return self._subscriber |
def profile(self):
"""
Buffered result of :meth:`build_profile`
"""
if self._profile is None:
self._profile = self.build_profile()
return self._profile | Buffered result of :meth:`build_profile` | Below is the the instruction that describes the task:
### Input:
Buffered result of :meth:`build_profile`
### Response:
def profile(self):
"""
Buffered result of :meth:`build_profile`
"""
if self._profile is None:
self._profile = self.build_profile()
return self._profile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.