code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None):
"""
Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir
"""
root_dir = _get_package_name(book_id=book_id, prefix=prefix)
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
os.mkdir(root_dir)
original_dir = os.path.join(root_dir, "original")
metadata_dir = os.path.join(root_dir, "metadata")
os.mkdir(original_dir)
os.mkdir(metadata_dir)
return root_dir, original_dir, metadata_dir | Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir | Below is the the instruction that describes the task:
### Input:
Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir
### Response:
def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None):
"""
Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir
"""
root_dir = _get_package_name(book_id=book_id, prefix=prefix)
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
os.mkdir(root_dir)
original_dir = os.path.join(root_dir, "original")
metadata_dir = os.path.join(root_dir, "metadata")
os.mkdir(original_dir)
os.mkdir(metadata_dir)
return root_dir, original_dir, metadata_dir |
def filter_time_frame(start, delta):
"""Filter :class:`.Line` objects by their connection time.
:param start: a time expression (see -s argument on --help for its format)
to filter log lines that are before this time.
:type start: string
:param delta: a relative time expression (see -s argument on --help for
its format) to limit the amount of time log lines will be considered.
:type delta: string
:returns: a function that filters by the time a request is made.
:rtype: function
"""
start_value = start
delta_value = delta
end_value = None
if start_value is not '':
start_value = _date_str_to_datetime(start_value)
if delta_value is not '':
delta_value = _delta_str_to_timedelta(delta_value)
if start_value is not '' and delta_value is not '':
end_value = start_value + delta_value
def filter_func(log_line):
if start_value is '':
return True
elif start_value > log_line.accept_date:
return False
if end_value is None:
return True
elif end_value < log_line.accept_date:
return False
return True
return filter_func | Filter :class:`.Line` objects by their connection time.
:param start: a time expression (see -s argument on --help for its format)
to filter log lines that are before this time.
:type start: string
:param delta: a relative time expression (see -s argument on --help for
its format) to limit the amount of time log lines will be considered.
:type delta: string
:returns: a function that filters by the time a request is made.
:rtype: function | Below is the the instruction that describes the task:
### Input:
Filter :class:`.Line` objects by their connection time.
:param start: a time expression (see -s argument on --help for its format)
to filter log lines that are before this time.
:type start: string
:param delta: a relative time expression (see -s argument on --help for
its format) to limit the amount of time log lines will be considered.
:type delta: string
:returns: a function that filters by the time a request is made.
:rtype: function
### Response:
def filter_time_frame(start, delta):
"""Filter :class:`.Line` objects by their connection time.
:param start: a time expression (see -s argument on --help for its format)
to filter log lines that are before this time.
:type start: string
:param delta: a relative time expression (see -s argument on --help for
its format) to limit the amount of time log lines will be considered.
:type delta: string
:returns: a function that filters by the time a request is made.
:rtype: function
"""
start_value = start
delta_value = delta
end_value = None
if start_value is not '':
start_value = _date_str_to_datetime(start_value)
if delta_value is not '':
delta_value = _delta_str_to_timedelta(delta_value)
if start_value is not '' and delta_value is not '':
end_value = start_value + delta_value
def filter_func(log_line):
if start_value is '':
return True
elif start_value > log_line.accept_date:
return False
if end_value is None:
return True
elif end_value < log_line.accept_date:
return False
return True
return filter_func |
def extract_included(cls, fields, resource, resource_instance, included_resources,
included_cache):
"""
Adds related data to the top level included key when the request includes
?include=example,example_field2
"""
# this function may be called with an empty record (example: Browsable Interface)
if not resource_instance:
return
current_serializer = fields.serializer
context = current_serializer.context
included_serializers = utils.get_included_serializers(current_serializer)
included_resources = copy.copy(included_resources)
included_resources = [inflection.underscore(value) for value in included_resources]
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
# Skip fields without relations or serialized data
if not isinstance(
field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)
):
continue
try:
included_resources.remove(field_name)
except ValueError:
# Skip fields not in requested included resources
# If no child field, directly continue with the next field
if field_name not in [node.split('.')[0] for node in included_resources]:
continue
relation_instance = cls.extract_relation_instance(
field_name, field, resource_instance, current_serializer
)
if isinstance(relation_instance, Manager):
relation_instance = relation_instance.all()
serializer_data = resource.get(field_name)
if isinstance(field, relations.ManyRelatedField):
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=True, context=context)
serializer_data = field.data
if isinstance(field, relations.RelatedField):
if relation_instance is None or not serializer_data:
continue
many = field._kwargs.get('child_relation', None) is not None
if isinstance(field, ResourceRelatedField) and not many:
already_included = serializer_data['type'] in included_cache and \
serializer_data['id'] in included_cache[serializer_data['type']]
if already_included:
continue
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=many, context=context)
serializer_data = field.data
new_included_resources = [key.replace('%s.' % field_name, '', 1)
for key in included_resources
if field_name == key.split('.')[0]]
if isinstance(field, ListSerializer):
serializer = field.child
relation_type = utils.get_resource_type_from_serializer(serializer)
relation_queryset = list(relation_instance)
if serializer_data:
for position in range(len(serializer_data)):
serializer_resource = serializer_data[position]
nested_resource_instance = relation_queryset[position]
resource_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
serializer_fields = utils.get_serializer_fields(
serializer.__class__(
nested_resource_instance, context=serializer.context
)
)
new_item = cls.build_json_resource_obj(
serializer_fields,
serializer_resource,
nested_resource_instance,
resource_type,
getattr(serializer, '_poly_force_type_resolution', False)
)
included_cache[new_item['type']][new_item['id']] = \
utils._format_object(new_item)
cls.extract_included(
serializer_fields,
serializer_resource,
nested_resource_instance,
new_included_resources,
included_cache,
)
if isinstance(field, Serializer):
relation_type = utils.get_resource_type_from_serializer(field)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(field)
if serializer_data:
new_item = cls.build_json_resource_obj(
serializer_fields,
serializer_data,
relation_instance,
relation_type,
getattr(field, '_poly_force_type_resolution', False)
)
included_cache[new_item['type']][new_item['id']] = utils._format_object(
new_item
)
cls.extract_included(
serializer_fields,
serializer_data,
relation_instance,
new_included_resources,
included_cache,
) | Adds related data to the top level included key when the request includes
?include=example,example_field2 | Below is the the instruction that describes the task:
### Input:
Adds related data to the top level included key when the request includes
?include=example,example_field2
### Response:
def extract_included(cls, fields, resource, resource_instance, included_resources,
included_cache):
"""
Adds related data to the top level included key when the request includes
?include=example,example_field2
"""
# this function may be called with an empty record (example: Browsable Interface)
if not resource_instance:
return
current_serializer = fields.serializer
context = current_serializer.context
included_serializers = utils.get_included_serializers(current_serializer)
included_resources = copy.copy(included_resources)
included_resources = [inflection.underscore(value) for value in included_resources]
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
# Skip fields without relations or serialized data
if not isinstance(
field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)
):
continue
try:
included_resources.remove(field_name)
except ValueError:
# Skip fields not in requested included resources
# If no child field, directly continue with the next field
if field_name not in [node.split('.')[0] for node in included_resources]:
continue
relation_instance = cls.extract_relation_instance(
field_name, field, resource_instance, current_serializer
)
if isinstance(relation_instance, Manager):
relation_instance = relation_instance.all()
serializer_data = resource.get(field_name)
if isinstance(field, relations.ManyRelatedField):
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=True, context=context)
serializer_data = field.data
if isinstance(field, relations.RelatedField):
if relation_instance is None or not serializer_data:
continue
many = field._kwargs.get('child_relation', None) is not None
if isinstance(field, ResourceRelatedField) and not many:
already_included = serializer_data['type'] in included_cache and \
serializer_data['id'] in included_cache[serializer_data['type']]
if already_included:
continue
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=many, context=context)
serializer_data = field.data
new_included_resources = [key.replace('%s.' % field_name, '', 1)
for key in included_resources
if field_name == key.split('.')[0]]
if isinstance(field, ListSerializer):
serializer = field.child
relation_type = utils.get_resource_type_from_serializer(serializer)
relation_queryset = list(relation_instance)
if serializer_data:
for position in range(len(serializer_data)):
serializer_resource = serializer_data[position]
nested_resource_instance = relation_queryset[position]
resource_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
serializer_fields = utils.get_serializer_fields(
serializer.__class__(
nested_resource_instance, context=serializer.context
)
)
new_item = cls.build_json_resource_obj(
serializer_fields,
serializer_resource,
nested_resource_instance,
resource_type,
getattr(serializer, '_poly_force_type_resolution', False)
)
included_cache[new_item['type']][new_item['id']] = \
utils._format_object(new_item)
cls.extract_included(
serializer_fields,
serializer_resource,
nested_resource_instance,
new_included_resources,
included_cache,
)
if isinstance(field, Serializer):
relation_type = utils.get_resource_type_from_serializer(field)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(field)
if serializer_data:
new_item = cls.build_json_resource_obj(
serializer_fields,
serializer_data,
relation_instance,
relation_type,
getattr(field, '_poly_force_type_resolution', False)
)
included_cache[new_item['type']][new_item['id']] = utils._format_object(
new_item
)
cls.extract_included(
serializer_fields,
serializer_data,
relation_instance,
new_included_resources,
included_cache,
) |
def resource_collection_response(cls, offset=0, limit=20):
"""
This method is deprecated for version 1.1.0. Please use get_collection
"""
request_args = {'page[offset]': offset, 'page[limit]': limit}
return cls.get_collection(request_args) | This method is deprecated for version 1.1.0. Please use get_collection | Below is the the instruction that describes the task:
### Input:
This method is deprecated for version 1.1.0. Please use get_collection
### Response:
def resource_collection_response(cls, offset=0, limit=20):
"""
This method is deprecated for version 1.1.0. Please use get_collection
"""
request_args = {'page[offset]': offset, 'page[limit]': limit}
return cls.get_collection(request_args) |
def line_nbr_from_position(self, y_pos):
"""
Returns the line number from the y_pos.
:param y_pos: Y pos in the editor
:return: Line number (0 based), -1 if out of range
"""
editor = self._editor
height = editor.fontMetrics().height()
for top, line, block in editor.visible_blocks:
if top <= y_pos <= top + height:
return line
return -1 | Returns the line number from the y_pos.
:param y_pos: Y pos in the editor
:return: Line number (0 based), -1 if out of range | Below is the the instruction that describes the task:
### Input:
Returns the line number from the y_pos.
:param y_pos: Y pos in the editor
:return: Line number (0 based), -1 if out of range
### Response:
def line_nbr_from_position(self, y_pos):
"""
Returns the line number from the y_pos.
:param y_pos: Y pos in the editor
:return: Line number (0 based), -1 if out of range
"""
editor = self._editor
height = editor.fontMetrics().height()
for top, line, block in editor.visible_blocks:
if top <= y_pos <= top + height:
return line
return -1 |
def clustering_coefficient_weighted(user, interaction=None):
"""
The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix.
"""
matrix = matrix_undirected_weighted(user, interaction=interaction)
weights = [weight for g in matrix for weight in g if weight is not None]
if len(weights) == 0:
return None
max_weight = max(weights)
triplet_weight = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b and a_c and b_c:
triplet_weight += (a_b * a_c * b_c) ** (1 / 3) / max_weight
d_ego = sum(1 for i in matrix[0] if i > 0)
return 2 * triplet_weight / (d_ego * (d_ego - 1)) if d_ego > 1 else 0 | The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix. | Below is the the instruction that describes the task:
### Input:
The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix.
### Response:
def clustering_coefficient_weighted(user, interaction=None):
"""
The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix.
"""
matrix = matrix_undirected_weighted(user, interaction=interaction)
weights = [weight for g in matrix for weight in g if weight is not None]
if len(weights) == 0:
return None
max_weight = max(weights)
triplet_weight = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b and a_c and b_c:
triplet_weight += (a_b * a_c * b_c) ** (1 / 3) / max_weight
d_ego = sum(1 for i in matrix[0] if i > 0)
return 2 * triplet_weight / (d_ego * (d_ego - 1)) if d_ego > 1 else 0 |
def multiline_repr(text, special_chars=('\n', '"')):
"""Get string representation for triple quoted context.
Make string representation as normal except do not transform
"special characters" into an escaped representation to support
use of the representation in a triple quoted multi-line string
context (to avoid escaping newlines and double quotes).
Pass ``RAW_MULTILINE_CHARS`` as the ``special_chars`` when use
context is a "raw" triple quoted string (to also avoid excaping
backslashes).
:param text: string
:type text: str or unicode
:param iterable special_chars: characters to remove/restore
:returns: representation
:rtype: str
"""
try:
char = special_chars[0]
except IndexError:
text = ascii(text)[2 if PY2 else 1:-1]
else:
text = char.join(
multiline_repr(s, special_chars[1:]) for s in text.split(char))
return text | Get string representation for triple quoted context.
Make string representation as normal except do not transform
"special characters" into an escaped representation to support
use of the representation in a triple quoted multi-line string
context (to avoid escaping newlines and double quotes).
Pass ``RAW_MULTILINE_CHARS`` as the ``special_chars`` when use
context is a "raw" triple quoted string (to also avoid excaping
backslashes).
:param text: string
:type text: str or unicode
:param iterable special_chars: characters to remove/restore
:returns: representation
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get string representation for triple quoted context.
Make string representation as normal except do not transform
"special characters" into an escaped representation to support
use of the representation in a triple quoted multi-line string
context (to avoid escaping newlines and double quotes).
Pass ``RAW_MULTILINE_CHARS`` as the ``special_chars`` when use
context is a "raw" triple quoted string (to also avoid excaping
backslashes).
:param text: string
:type text: str or unicode
:param iterable special_chars: characters to remove/restore
:returns: representation
:rtype: str
### Response:
def multiline_repr(text, special_chars=('\n', '"')):
"""Get string representation for triple quoted context.
Make string representation as normal except do not transform
"special characters" into an escaped representation to support
use of the representation in a triple quoted multi-line string
context (to avoid escaping newlines and double quotes).
Pass ``RAW_MULTILINE_CHARS`` as the ``special_chars`` when use
context is a "raw" triple quoted string (to also avoid excaping
backslashes).
:param text: string
:type text: str or unicode
:param iterable special_chars: characters to remove/restore
:returns: representation
:rtype: str
"""
try:
char = special_chars[0]
except IndexError:
text = ascii(text)[2 if PY2 else 1:-1]
else:
text = char.join(
multiline_repr(s, special_chars[1:]) for s in text.split(char))
return text |
def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths | Removes non-leafs from a list of directory paths | Below is the the instruction that describes the task:
### Input:
Removes non-leafs from a list of directory paths
### Response:
def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths |
def set_terms(self,*terms, **kw_terms):
"""
Create or set top level terms in the section. After python 3.6.0, the terms entries
should maintain the same order as the argument list. The term arguments can have any of these forms:
* For position argument, a Term object
* For kw arguments:
- 'TermName=TermValue'
- 'TermName=(TermValue, PropertyDict)
Positional arguments are processed before keyword arguments, and are passed into .add_term()
:param terms: Term arguments
:return:
"""
for t in terms:
self.add_term(t)
for k,v in kw_terms.items():
try:
value, props = v
except (ValueError, TypeError) as e:
value, props = v,{}
self.new_term(k,value,**props) | Create or set top level terms in the section. After python 3.6.0, the terms entries
should maintain the same order as the argument list. The term arguments can have any of these forms:
* For position argument, a Term object
* For kw arguments:
- 'TermName=TermValue'
- 'TermName=(TermValue, PropertyDict)
Positional arguments are processed before keyword arguments, and are passed into .add_term()
:param terms: Term arguments
:return: | Below is the the instruction that describes the task:
### Input:
Create or set top level terms in the section. After python 3.6.0, the terms entries
should maintain the same order as the argument list. The term arguments can have any of these forms:
* For position argument, a Term object
* For kw arguments:
- 'TermName=TermValue'
- 'TermName=(TermValue, PropertyDict)
Positional arguments are processed before keyword arguments, and are passed into .add_term()
:param terms: Term arguments
:return:
### Response:
def set_terms(self,*terms, **kw_terms):
"""
Create or set top level terms in the section. After python 3.6.0, the terms entries
should maintain the same order as the argument list. The term arguments can have any of these forms:
* For position argument, a Term object
* For kw arguments:
- 'TermName=TermValue'
- 'TermName=(TermValue, PropertyDict)
Positional arguments are processed before keyword arguments, and are passed into .add_term()
:param terms: Term arguments
:return:
"""
for t in terms:
self.add_term(t)
for k,v in kw_terms.items():
try:
value, props = v
except (ValueError, TypeError) as e:
value, props = v,{}
self.new_term(k,value,**props) |
def render_to_string(self, request, template, context, content_instance=None):
"""
Render a custom template with the :class:`~PluginContext` as context instance.
"""
if not content_instance:
content_instance = PluginContext(request)
content_instance.update(context)
return render_to_string(template, content_instance.flatten(), request=request) | Render a custom template with the :class:`~PluginContext` as context instance. | Below is the the instruction that describes the task:
### Input:
Render a custom template with the :class:`~PluginContext` as context instance.
### Response:
def render_to_string(self, request, template, context, content_instance=None):
"""
Render a custom template with the :class:`~PluginContext` as context instance.
"""
if not content_instance:
content_instance = PluginContext(request)
content_instance.update(context)
return render_to_string(template, content_instance.flatten(), request=request) |
def _whole_basis_types(basis):
'''
Get a list of all the types of features in this basis set.
'''
all_types = set()
for v in basis['elements'].values():
if 'electron_shells' in v:
for sh in v['electron_shells']:
all_types.add(sh['function_type'])
if 'ecp_potentials' in v:
for pot in v['ecp_potentials']:
all_types.add(pot['ecp_type'])
return sorted(list(all_types)) | Get a list of all the types of features in this basis set. | Below is the the instruction that describes the task:
### Input:
Get a list of all the types of features in this basis set.
### Response:
def _whole_basis_types(basis):
'''
Get a list of all the types of features in this basis set.
'''
all_types = set()
for v in basis['elements'].values():
if 'electron_shells' in v:
for sh in v['electron_shells']:
all_types.add(sh['function_type'])
if 'ecp_potentials' in v:
for pot in v['ecp_potentials']:
all_types.add(pot['ecp_type'])
return sorted(list(all_types)) |
def get_nodes_positions(self):
"""
Getter method for nodes positions.
:return: A dictionary with nodes as keys and positions as values
"""
nodes = self.get_nodes()
output = {}
for node in nodes:
output[node[0]] = (float(node[1][consts.Consts.x]), float(node[1][consts.Consts.y]))
return output | Getter method for nodes positions.
:return: A dictionary with nodes as keys and positions as values | Below is the the instruction that describes the task:
### Input:
Getter method for nodes positions.
:return: A dictionary with nodes as keys and positions as values
### Response:
def get_nodes_positions(self):
"""
Getter method for nodes positions.
:return: A dictionary with nodes as keys and positions as values
"""
nodes = self.get_nodes()
output = {}
for node in nodes:
output[node[0]] = (float(node[1][consts.Consts.x]), float(node[1][consts.Consts.y]))
return output |
def set_placeholder(self, key, value):
"""Placeholders are custom magic variables defined during configuration
time.
.. note:: These are accessible, like any uWSGI option, in your application code via
``.runtime.environ.uwsgi_env.config``.
:param str|unicode key:
:param str|unicode value:
"""
self._set('set-placeholder', '%s=%s' % (key, value), multi=True)
return self | Placeholders are custom magic variables defined during configuration
time.
.. note:: These are accessible, like any uWSGI option, in your application code via
``.runtime.environ.uwsgi_env.config``.
:param str|unicode key:
:param str|unicode value: | Below is the the instruction that describes the task:
### Input:
Placeholders are custom magic variables defined during configuration
time.
.. note:: These are accessible, like any uWSGI option, in your application code via
``.runtime.environ.uwsgi_env.config``.
:param str|unicode key:
:param str|unicode value:
### Response:
def set_placeholder(self, key, value):
"""Placeholders are custom magic variables defined during configuration
time.
.. note:: These are accessible, like any uWSGI option, in your application code via
``.runtime.environ.uwsgi_env.config``.
:param str|unicode key:
:param str|unicode value:
"""
self._set('set-placeholder', '%s=%s' % (key, value), multi=True)
return self |
def fso_rmtree(self, path, ignore_errors=False, onerror=None):
'overlays shutil.rmtree()'
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if self.fso_islink(path):
# symlinks to directories are forbidden, see shutil bug #1669
raise OSError('Cannot call rmtree on a symbolic link')
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = self.fso_listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = self.fso_lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
self.fso_rmtree(fullname, ignore_errors, onerror)
else:
try:
self.fso_remove(fullname)
except OSError as err:
onerror(os.remove, fullname, sys.exc_info())
try:
self.fso_rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info()) | overlays shutil.rmtree() | Below is the the instruction that describes the task:
### Input:
overlays shutil.rmtree()
### Response:
def fso_rmtree(self, path, ignore_errors=False, onerror=None):
'overlays shutil.rmtree()'
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if self.fso_islink(path):
# symlinks to directories are forbidden, see shutil bug #1669
raise OSError('Cannot call rmtree on a symbolic link')
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = self.fso_listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = self.fso_lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
self.fso_rmtree(fullname, ignore_errors, onerror)
else:
try:
self.fso_remove(fullname)
except OSError as err:
onerror(os.remove, fullname, sys.exc_info())
try:
self.fso_rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info()) |
def getusers(self, context, request):
"""/@@API/getusers: Return users belonging to specified roles
Required parameters:
- roles: The role of which users to return
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
users: list of dictionaries: {fullname: fullname, userid: userid}
}
>>> portal = layer['portal']
>>> portal_url = portal.absolute_url()
>>> from plone.app.testing import SITE_OWNER_NAME
>>> from plone.app.testing import SITE_OWNER_PASSWORD
>>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
>>> browser.open(portal_url+"/@@API/getusers?roles:list=Manager&roles:list=Analyst")
>>> browser.contents
'{...test_labmanager1...}'
>>> browser.contents
'{...test_analyst1...}'
>>> browser.open(portal_url+"/@@API/getusers")
>>> browser.contents
'No roles specified'
"""
roles = request.get('roles','')
if len(roles) == 0:
raise BadRequest("No roles specified")
mtool = getToolByName(context, 'portal_membership')
users = []
for user in mtool.searchForMembers(roles=roles):
uid = user.getId()
fullname = user.getProperty('fullname')
if not fullname:
fullname = uid
users.append({'fullname':fullname, 'userid': uid})
ret = {
"url": router.url_for("remove", force_external=True),
"success": True,
"error": False,
'users': users,
}
return ret | /@@API/getusers: Return users belonging to specified roles
Required parameters:
- roles: The role of which users to return
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
users: list of dictionaries: {fullname: fullname, userid: userid}
}
>>> portal = layer['portal']
>>> portal_url = portal.absolute_url()
>>> from plone.app.testing import SITE_OWNER_NAME
>>> from plone.app.testing import SITE_OWNER_PASSWORD
>>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
>>> browser.open(portal_url+"/@@API/getusers?roles:list=Manager&roles:list=Analyst")
>>> browser.contents
'{...test_labmanager1...}'
>>> browser.contents
'{...test_analyst1...}'
>>> browser.open(portal_url+"/@@API/getusers")
>>> browser.contents
'No roles specified' | Below is the the instruction that describes the task:
### Input:
/@@API/getusers: Return users belonging to specified roles
Required parameters:
- roles: The role of which users to return
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
users: list of dictionaries: {fullname: fullname, userid: userid}
}
>>> portal = layer['portal']
>>> portal_url = portal.absolute_url()
>>> from plone.app.testing import SITE_OWNER_NAME
>>> from plone.app.testing import SITE_OWNER_PASSWORD
>>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
>>> browser.open(portal_url+"/@@API/getusers?roles:list=Manager&roles:list=Analyst")
>>> browser.contents
'{...test_labmanager1...}'
>>> browser.contents
'{...test_analyst1...}'
>>> browser.open(portal_url+"/@@API/getusers")
>>> browser.contents
'No roles specified'
### Response:
def getusers(self, context, request):
"""/@@API/getusers: Return users belonging to specified roles
Required parameters:
- roles: The role of which users to return
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
users: list of dictionaries: {fullname: fullname, userid: userid}
}
>>> portal = layer['portal']
>>> portal_url = portal.absolute_url()
>>> from plone.app.testing import SITE_OWNER_NAME
>>> from plone.app.testing import SITE_OWNER_PASSWORD
>>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
>>> browser.open(portal_url+"/@@API/getusers?roles:list=Manager&roles:list=Analyst")
>>> browser.contents
'{...test_labmanager1...}'
>>> browser.contents
'{...test_analyst1...}'
>>> browser.open(portal_url+"/@@API/getusers")
>>> browser.contents
'No roles specified'
"""
roles = request.get('roles','')
if len(roles) == 0:
raise BadRequest("No roles specified")
mtool = getToolByName(context, 'portal_membership')
users = []
for user in mtool.searchForMembers(roles=roles):
uid = user.getId()
fullname = user.getProperty('fullname')
if not fullname:
fullname = uid
users.append({'fullname':fullname, 'userid': uid})
ret = {
"url": router.url_for("remove", force_external=True),
"success": True,
"error": False,
'users': users,
}
return ret |
def generate_full_symmops(symmops, tol):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
# Uses an algorithm described in:
# Gregory Butler. Fundamental Algorithms for Permutation Groups.
# Lecture Notes in Computer Science (Book 559). Springer, 1991. page 15
UNIT = np.eye(4)
generators = [op.affine_matrix for op in symmops
if not np.allclose(op.affine_matrix, UNIT)]
if not generators:
# C1 symmetry breaks assumptions in the algorithm afterwards
return symmops
else:
full = list(generators)
for g in full:
for s in generators:
op = np.dot(g, s)
d = np.abs(full - op) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(op)
d = np.abs(full - UNIT) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(UNIT)
return [SymmOp(op) for op in full] | Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations. | Below is the the instruction that describes the task:
### Input:
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
### Response:
def generate_full_symmops(symmops, tol):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
# Uses an algorithm described in:
# Gregory Butler. Fundamental Algorithms for Permutation Groups.
# Lecture Notes in Computer Science (Book 559). Springer, 1991. page 15
UNIT = np.eye(4)
generators = [op.affine_matrix for op in symmops
if not np.allclose(op.affine_matrix, UNIT)]
if not generators:
# C1 symmetry breaks assumptions in the algorithm afterwards
return symmops
else:
full = list(generators)
for g in full:
for s in generators:
op = np.dot(g, s)
d = np.abs(full - op) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(op)
d = np.abs(full - UNIT) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(UNIT)
return [SymmOp(op) for op in full] |
def enqueue(self, pipeline):
""" Start a pipeline.
:param pipeline: Start this pipeline.
"""
copied = Pipeline().append(pipeline)
copied.group = self
self._queue.put(copied) | Start a pipeline.
:param pipeline: Start this pipeline. | Below is the the instruction that describes the task:
### Input:
Start a pipeline.
:param pipeline: Start this pipeline.
### Response:
def enqueue(self, pipeline):
""" Start a pipeline.
:param pipeline: Start this pipeline.
"""
copied = Pipeline().append(pipeline)
copied.group = self
self._queue.put(copied) |
def locateChild(self, context, segments):
"""
Delegate dispatch to a sharing resource if the request is for a user
subdomain, otherwise fall back to the wrapped resource's C{locateChild}
implementation.
"""
request = IRequest(context)
hostname = request.getHeader('host')
info = self.subdomain(hostname)
if info is not None:
username, domain = info
index = UserIndexPage(IRealm(self.siteStore),
self.webViewer)
resource = index.locateChild(None, [username])[0]
return resource, segments
return self.wrapped.locateChild(context, segments) | Delegate dispatch to a sharing resource if the request is for a user
subdomain, otherwise fall back to the wrapped resource's C{locateChild}
implementation. | Below is the the instruction that describes the task:
### Input:
Delegate dispatch to a sharing resource if the request is for a user
subdomain, otherwise fall back to the wrapped resource's C{locateChild}
implementation.
### Response:
def locateChild(self, context, segments):
"""
Delegate dispatch to a sharing resource if the request is for a user
subdomain, otherwise fall back to the wrapped resource's C{locateChild}
implementation.
"""
request = IRequest(context)
hostname = request.getHeader('host')
info = self.subdomain(hostname)
if info is not None:
username, domain = info
index = UserIndexPage(IRealm(self.siteStore),
self.webViewer)
resource = index.locateChild(None, [username])[0]
return resource, segments
return self.wrapped.locateChild(context, segments) |
def as_artist(self, origin=(0, 0), **kwargs):
"""
Matplotlib patch object for this region (`matplotlib.patches.Rectangle`).
Parameters:
-----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.patches.Rectangle` object accepts
Returns
-------
patch : `~matplotlib.patches.Rectangle`
Matplotlib circle patch
"""
from matplotlib.patches import Rectangle
xy = self._lower_left_xy()
xy = xy[0] - origin[0], xy[1] - origin[1]
width = self.width
height = self.height
# From the docstring: MPL expects "rotation in degrees (anti-clockwise)"
angle = self.angle.to('deg').value
mpl_params = self.mpl_properties_default('patch')
mpl_params.update(kwargs)
return Rectangle(xy=xy, width=width, height=height,
angle=angle, **mpl_params) | Matplotlib patch object for this region (`matplotlib.patches.Rectangle`).
Parameters:
-----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.patches.Rectangle` object accepts
Returns
-------
patch : `~matplotlib.patches.Rectangle`
Matplotlib circle patch | Below is the the instruction that describes the task:
### Input:
Matplotlib patch object for this region (`matplotlib.patches.Rectangle`).
Parameters:
-----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.patches.Rectangle` object accepts
Returns
-------
patch : `~matplotlib.patches.Rectangle`
Matplotlib circle patch
### Response:
def as_artist(self, origin=(0, 0), **kwargs):
"""
Matplotlib patch object for this region (`matplotlib.patches.Rectangle`).
Parameters:
-----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.patches.Rectangle` object accepts
Returns
-------
patch : `~matplotlib.patches.Rectangle`
Matplotlib circle patch
"""
from matplotlib.patches import Rectangle
xy = self._lower_left_xy()
xy = xy[0] - origin[0], xy[1] - origin[1]
width = self.width
height = self.height
# From the docstring: MPL expects "rotation in degrees (anti-clockwise)"
angle = self.angle.to('deg').value
mpl_params = self.mpl_properties_default('patch')
mpl_params.update(kwargs)
return Rectangle(xy=xy, width=width, height=height,
angle=angle, **mpl_params) |
def gtk_reload():
"""Reload GTK2 themes."""
events = gtk.gdk.Event(gtk.gdk.CLIENT_EVENT)
data = gtk.gdk.atom_intern("_GTK_READ_RCFILES", False)
events.data_format = 8
events.send_event = True
events.message_type = data
events.send_clientmessage_toall() | Reload GTK2 themes. | Below is the the instruction that describes the task:
### Input:
Reload GTK2 themes.
### Response:
def gtk_reload():
"""Reload GTK2 themes."""
events = gtk.gdk.Event(gtk.gdk.CLIENT_EVENT)
data = gtk.gdk.atom_intern("_GTK_READ_RCFILES", False)
events.data_format = 8
events.send_event = True
events.message_type = data
events.send_clientmessage_toall() |
def add(self, key, val, priority=None):
"""add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val
"""
if key in self.item_finder:
self.remove(key)
else:
# keep the queue contained
if self.full():
raise OverflowError("Queue is full")
if priority is None:
priority = next(self.counter)
item = [priority, key, val]
self.item_finder[key] = item
heapq.heappush(self.pq, item) | add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val | Below is the the instruction that describes the task:
### Input:
add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val
### Response:
def add(self, key, val, priority=None):
"""add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val
"""
if key in self.item_finder:
self.remove(key)
else:
# keep the queue contained
if self.full():
raise OverflowError("Queue is full")
if priority is None:
priority = next(self.counter)
item = [priority, key, val]
self.item_finder[key] = item
heapq.heappush(self.pq, item) |
def __applytns(self, root):
"""Make sure included schema has the same target namespace."""
TNS = "targetNamespace"
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, "%s mismatch" % TNS | Make sure included schema has the same target namespace. | Below is the the instruction that describes the task:
### Input:
Make sure included schema has the same target namespace.
### Response:
def __applytns(self, root):
"""Make sure included schema has the same target namespace."""
TNS = "targetNamespace"
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, "%s mismatch" % TNS |
def get_bounding_box(self):
"""
Compute bounding box for each surface element, and then return
the bounding box of all surface elements' bounding boxes.
:return:
A tuple of four items. These items represent western, eastern,
northern and southern borders of the bounding box respectively.
Values are floats in decimal degrees.
"""
lons = []
lats = []
for surf in self.surfaces:
west, east, north, south = surf.get_bounding_box()
lons.extend([west, east])
lats.extend([north, south])
return utils.get_spherical_bounding_box(lons, lats) | Compute bounding box for each surface element, and then return
the bounding box of all surface elements' bounding boxes.
:return:
A tuple of four items. These items represent western, eastern,
northern and southern borders of the bounding box respectively.
Values are floats in decimal degrees. | Below is the the instruction that describes the task:
### Input:
Compute bounding box for each surface element, and then return
the bounding box of all surface elements' bounding boxes.
:return:
A tuple of four items. These items represent western, eastern,
northern and southern borders of the bounding box respectively.
Values are floats in decimal degrees.
### Response:
def get_bounding_box(self):
"""
Compute bounding box for each surface element, and then return
the bounding box of all surface elements' bounding boxes.
:return:
A tuple of four items. These items represent western, eastern,
northern and southern borders of the bounding box respectively.
Values are floats in decimal degrees.
"""
lons = []
lats = []
for surf in self.surfaces:
west, east, north, south = surf.get_bounding_box()
lons.extend([west, east])
lats.extend([north, south])
return utils.get_spherical_bounding_box(lons, lats) |
def debugger(self,force=False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = debugger.Pdb(
self.color_scheme_table.active_scheme_name)
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self,'tb') and self.tb is not None:
etb = self.tb
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
self.pdb.interaction(self.tb.tb_frame, self.tb)
if hasattr(self,'tb'):
del self.tb | Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler. | Below is the the instruction that describes the task:
### Input:
Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler.
### Response:
def debugger(self,force=False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = debugger.Pdb(
self.color_scheme_table.active_scheme_name)
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self,'tb') and self.tb is not None:
etb = self.tb
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
self.pdb.interaction(self.tb.tb_frame, self.tb)
if hasattr(self,'tb'):
del self.tb |
def is_hidden(path):
"""Whether file or directory is hidden"""
for name in path.split(os.sep):
if name != '.' and name != '..' and name and name[0] == '.':
return True
return False | Whether file or directory is hidden | Below is the the instruction that describes the task:
### Input:
Whether file or directory is hidden
### Response:
def is_hidden(path):
"""Whether file or directory is hidden"""
for name in path.split(os.sep):
if name != '.' and name != '..' and name and name[0] == '.':
return True
return False |
def write(self, path):
'''Write assembly oligos and (if applicable) primers to csv.
:param path: path to csv file, including .csv extension.
:type path: str
'''
with open(path, 'wb') as oligo_file:
oligo_writer = csv.writer(oligo_file, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
oligo_writer.writerow(['name', 'oligo', 'notes'])
for i, oligo in enumerate(self.oligos):
name = 'oligo {}'.format(i + 1)
oligo_len = len(oligo)
if i != len(self.oligos) - 1:
oligo_tm = self.overlap_tms[i]
notes = 'oligo length: {}, '.format(oligo_len) + \
'overlap Tm: {:.2f}'.format(oligo_tm)
else:
notes = 'oligo length: {}'.format(oligo_len)
oligo_writer.writerow([name, oligo, notes])
if self.primers:
for i, (primer, melt) in enumerate(self.primers):
oligo_writer.writerow(['primer {}'.format(i + 1),
primer,
'Tm: {:.2f}'.format(melt)]) | Write assembly oligos and (if applicable) primers to csv.
:param path: path to csv file, including .csv extension.
:type path: str | Below is the the instruction that describes the task:
### Input:
Write assembly oligos and (if applicable) primers to csv.
:param path: path to csv file, including .csv extension.
:type path: str
### Response:
def write(self, path):
'''Write assembly oligos and (if applicable) primers to csv.
:param path: path to csv file, including .csv extension.
:type path: str
'''
with open(path, 'wb') as oligo_file:
oligo_writer = csv.writer(oligo_file, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
oligo_writer.writerow(['name', 'oligo', 'notes'])
for i, oligo in enumerate(self.oligos):
name = 'oligo {}'.format(i + 1)
oligo_len = len(oligo)
if i != len(self.oligos) - 1:
oligo_tm = self.overlap_tms[i]
notes = 'oligo length: {}, '.format(oligo_len) + \
'overlap Tm: {:.2f}'.format(oligo_tm)
else:
notes = 'oligo length: {}'.format(oligo_len)
oligo_writer.writerow([name, oligo, notes])
if self.primers:
for i, (primer, melt) in enumerate(self.primers):
oligo_writer.writerow(['primer {}'.format(i + 1),
primer,
'Tm: {:.2f}'.format(melt)]) |
def sipdir_is_finished(sipdir):
"""Return the state of modeling and inversion for a given SIP dir. The
result does not take into account sensitivities or potentials, as
optionally generated by CRMod.
Parameters
----------
sipdir: string
Directory to check
Returns
-------
crmod_is_finished: bool
True if all tomodirs of this SIP directory contain finished modeling
results.
crtomo_is_finished: bool
True if all tomodirs of this SIP directory contain finished inversion
results.
"""
if not is_sipdir(sipdir):
raise Exception('Directory is not a valid SIP directory!')
subdirs_raw = sorted(glob.glob(sipdir + os.sep + 'invmod' + os.sep + '*'))
subdirs = [x for x in subdirs_raw if os.path.isdir(x)]
crmod_finished = True
crtomo_finished = True
for subdir in subdirs:
subcrmod, subcrtomo = td_is_finished(subdir)
if not subcrmod:
crmod_finished = False
if not subcrtomo:
crtomo_finished = False
return crmod_finished, crtomo_finished | Return the state of modeling and inversion for a given SIP dir. The
result does not take into account sensitivities or potentials, as
optionally generated by CRMod.
Parameters
----------
sipdir: string
Directory to check
Returns
-------
crmod_is_finished: bool
True if all tomodirs of this SIP directory contain finished modeling
results.
crtomo_is_finished: bool
True if all tomodirs of this SIP directory contain finished inversion
results. | Below is the the instruction that describes the task:
### Input:
Return the state of modeling and inversion for a given SIP dir. The
result does not take into account sensitivities or potentials, as
optionally generated by CRMod.
Parameters
----------
sipdir: string
Directory to check
Returns
-------
crmod_is_finished: bool
True if all tomodirs of this SIP directory contain finished modeling
results.
crtomo_is_finished: bool
True if all tomodirs of this SIP directory contain finished inversion
results.
### Response:
def sipdir_is_finished(sipdir):
"""Return the state of modeling and inversion for a given SIP dir. The
result does not take into account sensitivities or potentials, as
optionally generated by CRMod.
Parameters
----------
sipdir: string
Directory to check
Returns
-------
crmod_is_finished: bool
True if all tomodirs of this SIP directory contain finished modeling
results.
crtomo_is_finished: bool
True if all tomodirs of this SIP directory contain finished inversion
results.
"""
if not is_sipdir(sipdir):
raise Exception('Directory is not a valid SIP directory!')
subdirs_raw = sorted(glob.glob(sipdir + os.sep + 'invmod' + os.sep + '*'))
subdirs = [x for x in subdirs_raw if os.path.isdir(x)]
crmod_finished = True
crtomo_finished = True
for subdir in subdirs:
subcrmod, subcrtomo = td_is_finished(subdir)
if not subcrmod:
crmod_finished = False
if not subcrtomo:
crtomo_finished = False
return crmod_finished, crtomo_finished |
def scale(requestContext, seriesList, factor):
"""
Takes one metric or a wildcard seriesList followed by a constant, and
multiplies the datapoint by the constant provided at each point.
Example::
&target=scale(Server.instance01.threads.busy,10)
&target=scale(Server.instance*.threads.busy,10)
"""
for series in seriesList:
series.name = "scale(%s,%g)" % (series.name, float(factor))
series.pathExpression = series.name
for i, value in enumerate(series):
series[i] = safeMul(value, factor)
return seriesList | Takes one metric or a wildcard seriesList followed by a constant, and
multiplies the datapoint by the constant provided at each point.
Example::
&target=scale(Server.instance01.threads.busy,10)
&target=scale(Server.instance*.threads.busy,10) | Below is the the instruction that describes the task:
### Input:
Takes one metric or a wildcard seriesList followed by a constant, and
multiplies the datapoint by the constant provided at each point.
Example::
&target=scale(Server.instance01.threads.busy,10)
&target=scale(Server.instance*.threads.busy,10)
### Response:
def scale(requestContext, seriesList, factor):
"""
Takes one metric or a wildcard seriesList followed by a constant, and
multiplies the datapoint by the constant provided at each point.
Example::
&target=scale(Server.instance01.threads.busy,10)
&target=scale(Server.instance*.threads.busy,10)
"""
for series in seriesList:
series.name = "scale(%s,%g)" % (series.name, float(factor))
series.pathExpression = series.name
for i, value in enumerate(series):
series[i] = safeMul(value, factor)
return seriesList |
def dlogprior(self, param):
"""Value of derivative of prior depends on value of `prior`."""
assert param in self.freeparams, "Invalid param: {0}".format(param)
return self._dlogprior[param] | Value of derivative of prior depends on value of `prior`. | Below is the the instruction that describes the task:
### Input:
Value of derivative of prior depends on value of `prior`.
### Response:
def dlogprior(self, param):
"""Value of derivative of prior depends on value of `prior`."""
assert param in self.freeparams, "Invalid param: {0}".format(param)
return self._dlogprior[param] |
def _build(self, memory, query, memory_mask=None):
"""Perform a differentiable read.
Args:
memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of
dtype float32. This represents, for each example and memory slot, a
single embedding to attend over.
query: [batch_size, query_word_size]-shaped Tensor of dtype float32.
Represents, for each example, a single embedding representing a query.
memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype
bool. An entry of False indicates that a memory slot should not enter
the resulting weighted sum. If None, all memory is used.
Returns:
An AttentionOutput instance containing:
read: [batch_size, memory_word_size]-shaped Tensor of dtype float32.
This represents, for each example, a weighted sum of the contents of
the memory.
weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This
represents, for each example and memory slot, the attention weights
used to compute the read.
weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32.
This represents, for each example and memory slot, the logits of the
attention weights, that is, `weights` is calculated by taking the
softmax of the weight logits.
Raises:
UnderspecifiedError: if memory_word_size or query_word_size can not be
inferred.
IncompatibleShapeError: if memory, query, memory_mask, or output of
attention_logit_mod do not match expected shapes.
"""
if len(memory.get_shape()) != 3:
raise base.IncompatibleShapeError(
"memory must have shape [batch_size, memory_size, memory_word_size].")
if len(query.get_shape()) != 2:
raise base.IncompatibleShapeError(
"query must have shape [batch_size, query_word_size].")
if memory_mask is not None and len(memory_mask.get_shape()) != 2:
raise base.IncompatibleShapeError(
"memory_mask must have shape [batch_size, memory_size].")
# Ensure final dimensions are defined, else the attention logit module will
# be unable to infer input size when constructing variables.
inferred_memory_word_size = memory.get_shape()[2].value
inferred_query_word_size = query.get_shape()[1].value
if inferred_memory_word_size is None or inferred_query_word_size is None:
raise base.UnderspecifiedError(
"memory_word_size and query_word_size must be known at graph "
"construction time.")
memory_shape = tf.shape(memory)
batch_size = memory_shape[0]
memory_size = memory_shape[1]
query_shape = tf.shape(query)
query_batch_size = query_shape[0]
# Transform query to have same number of words as memory.
#
# expanded_query: [batch_size, memory_size, query_word_size].
expanded_query = tf.tile(tf.expand_dims(query, dim=1), [1, memory_size, 1])
# Compute attention weights for each memory slot.
#
# attention_weight_logits: [batch_size, memory_size]
with tf.control_dependencies(
[tf.assert_equal(batch_size, query_batch_size)]):
concatenated_embeddings = tf.concat(
values=[memory, expanded_query], axis=2)
batch_apply_attention_logit = basic.BatchApply(
self._attention_logit_mod, n_dims=2, name="batch_apply_attention_logit")
attention_weight_logits = batch_apply_attention_logit(
concatenated_embeddings)
# Note: basic.BatchApply() will automatically reshape the [batch_size *
# memory_size, 1]-shaped result of self._attention_logit_mod(...) into a
# [batch_size, memory_size, 1]-shaped Tensor. If
# self._attention_logit_mod(...) returns something with more dimensions,
# then attention_weight_logits will have extra dimensions, too.
if len(attention_weight_logits.get_shape()) != 3:
raise base.IncompatibleShapeError(
"attention_weight_logits must be a rank-3 Tensor. Are you sure that "
"attention_logit_mod() returned [batch_size * memory_size, 1]-shaped"
" Tensor?")
# Remove final length-1 dimension.
attention_weight_logits = tf.squeeze(attention_weight_logits, [2])
# Mask out ignored memory slots by assigning them very small logits. Ensures
# that every example has at least one valid memory slot, else we'd end up
# averaging all memory slots equally.
if memory_mask is not None:
num_remaining_memory_slots = tf.reduce_sum(
tf.cast(memory_mask, dtype=tf.int32), axis=[1])
with tf.control_dependencies(
[tf.assert_positive(num_remaining_memory_slots)]):
finfo = np.finfo(np.float32)
kept_indices = tf.cast(memory_mask, dtype=tf.float32)
ignored_indices = tf.cast(tf.logical_not(memory_mask), dtype=tf.float32)
lower_bound = finfo.max * kept_indices + finfo.min * ignored_indices
attention_weight_logits = tf.minimum(attention_weight_logits,
lower_bound)
# attended_memory: [batch_size, memory_word_size].
attention_weight = tf.reshape(
tf.nn.softmax(attention_weight_logits),
shape=[batch_size, memory_size, 1])
# The multiplication is elementwise and relies on broadcasting the weights
# across memory_word_size. Then we sum across the memory slots.
attended_memory = tf.reduce_sum(memory * attention_weight, axis=[1])
# Infer shape of result as much as possible.
inferred_batch_size, _, inferred_memory_word_size = (
memory.get_shape().as_list())
attended_memory.set_shape([inferred_batch_size, inferred_memory_word_size])
return AttentionOutput(
read=attended_memory,
weights=tf.squeeze(attention_weight, [2]),
weight_logits=attention_weight_logits) | Perform a differentiable read.
Args:
memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of
dtype float32. This represents, for each example and memory slot, a
single embedding to attend over.
query: [batch_size, query_word_size]-shaped Tensor of dtype float32.
Represents, for each example, a single embedding representing a query.
memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype
bool. An entry of False indicates that a memory slot should not enter
the resulting weighted sum. If None, all memory is used.
Returns:
An AttentionOutput instance containing:
read: [batch_size, memory_word_size]-shaped Tensor of dtype float32.
This represents, for each example, a weighted sum of the contents of
the memory.
weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This
represents, for each example and memory slot, the attention weights
used to compute the read.
weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32.
This represents, for each example and memory slot, the logits of the
attention weights, that is, `weights` is calculated by taking the
softmax of the weight logits.
Raises:
UnderspecifiedError: if memory_word_size or query_word_size can not be
inferred.
IncompatibleShapeError: if memory, query, memory_mask, or output of
attention_logit_mod do not match expected shapes. | Below is the the instruction that describes the task:
### Input:
Perform a differentiable read.
Args:
memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of
dtype float32. This represents, for each example and memory slot, a
single embedding to attend over.
query: [batch_size, query_word_size]-shaped Tensor of dtype float32.
Represents, for each example, a single embedding representing a query.
memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype
bool. An entry of False indicates that a memory slot should not enter
the resulting weighted sum. If None, all memory is used.
Returns:
An AttentionOutput instance containing:
read: [batch_size, memory_word_size]-shaped Tensor of dtype float32.
This represents, for each example, a weighted sum of the contents of
the memory.
weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This
represents, for each example and memory slot, the attention weights
used to compute the read.
weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32.
This represents, for each example and memory slot, the logits of the
attention weights, that is, `weights` is calculated by taking the
softmax of the weight logits.
Raises:
UnderspecifiedError: if memory_word_size or query_word_size can not be
inferred.
IncompatibleShapeError: if memory, query, memory_mask, or output of
attention_logit_mod do not match expected shapes.
### Response:
def _build(self, memory, query, memory_mask=None):
"""Perform a differentiable read.
Args:
memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of
dtype float32. This represents, for each example and memory slot, a
single embedding to attend over.
query: [batch_size, query_word_size]-shaped Tensor of dtype float32.
Represents, for each example, a single embedding representing a query.
memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype
bool. An entry of False indicates that a memory slot should not enter
the resulting weighted sum. If None, all memory is used.
Returns:
An AttentionOutput instance containing:
read: [batch_size, memory_word_size]-shaped Tensor of dtype float32.
This represents, for each example, a weighted sum of the contents of
the memory.
weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This
represents, for each example and memory slot, the attention weights
used to compute the read.
weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32.
This represents, for each example and memory slot, the logits of the
attention weights, that is, `weights` is calculated by taking the
softmax of the weight logits.
Raises:
UnderspecifiedError: if memory_word_size or query_word_size can not be
inferred.
IncompatibleShapeError: if memory, query, memory_mask, or output of
attention_logit_mod do not match expected shapes.
"""
if len(memory.get_shape()) != 3:
raise base.IncompatibleShapeError(
"memory must have shape [batch_size, memory_size, memory_word_size].")
if len(query.get_shape()) != 2:
raise base.IncompatibleShapeError(
"query must have shape [batch_size, query_word_size].")
if memory_mask is not None and len(memory_mask.get_shape()) != 2:
raise base.IncompatibleShapeError(
"memory_mask must have shape [batch_size, memory_size].")
# Ensure final dimensions are defined, else the attention logit module will
# be unable to infer input size when constructing variables.
inferred_memory_word_size = memory.get_shape()[2].value
inferred_query_word_size = query.get_shape()[1].value
if inferred_memory_word_size is None or inferred_query_word_size is None:
raise base.UnderspecifiedError(
"memory_word_size and query_word_size must be known at graph "
"construction time.")
memory_shape = tf.shape(memory)
batch_size = memory_shape[0]
memory_size = memory_shape[1]
query_shape = tf.shape(query)
query_batch_size = query_shape[0]
# Transform query to have same number of words as memory.
#
# expanded_query: [batch_size, memory_size, query_word_size].
expanded_query = tf.tile(tf.expand_dims(query, dim=1), [1, memory_size, 1])
# Compute attention weights for each memory slot.
#
# attention_weight_logits: [batch_size, memory_size]
with tf.control_dependencies(
[tf.assert_equal(batch_size, query_batch_size)]):
concatenated_embeddings = tf.concat(
values=[memory, expanded_query], axis=2)
batch_apply_attention_logit = basic.BatchApply(
self._attention_logit_mod, n_dims=2, name="batch_apply_attention_logit")
attention_weight_logits = batch_apply_attention_logit(
concatenated_embeddings)
# Note: basic.BatchApply() will automatically reshape the [batch_size *
# memory_size, 1]-shaped result of self._attention_logit_mod(...) into a
# [batch_size, memory_size, 1]-shaped Tensor. If
# self._attention_logit_mod(...) returns something with more dimensions,
# then attention_weight_logits will have extra dimensions, too.
if len(attention_weight_logits.get_shape()) != 3:
raise base.IncompatibleShapeError(
"attention_weight_logits must be a rank-3 Tensor. Are you sure that "
"attention_logit_mod() returned [batch_size * memory_size, 1]-shaped"
" Tensor?")
# Remove final length-1 dimension.
attention_weight_logits = tf.squeeze(attention_weight_logits, [2])
# Mask out ignored memory slots by assigning them very small logits. Ensures
# that every example has at least one valid memory slot, else we'd end up
# averaging all memory slots equally.
if memory_mask is not None:
num_remaining_memory_slots = tf.reduce_sum(
tf.cast(memory_mask, dtype=tf.int32), axis=[1])
with tf.control_dependencies(
[tf.assert_positive(num_remaining_memory_slots)]):
finfo = np.finfo(np.float32)
kept_indices = tf.cast(memory_mask, dtype=tf.float32)
ignored_indices = tf.cast(tf.logical_not(memory_mask), dtype=tf.float32)
lower_bound = finfo.max * kept_indices + finfo.min * ignored_indices
attention_weight_logits = tf.minimum(attention_weight_logits,
lower_bound)
# attended_memory: [batch_size, memory_word_size].
attention_weight = tf.reshape(
tf.nn.softmax(attention_weight_logits),
shape=[batch_size, memory_size, 1])
# The multiplication is elementwise and relies on broadcasting the weights
# across memory_word_size. Then we sum across the memory slots.
attended_memory = tf.reduce_sum(memory * attention_weight, axis=[1])
# Infer shape of result as much as possible.
inferred_batch_size, _, inferred_memory_word_size = (
memory.get_shape().as_list())
attended_memory.set_shape([inferred_batch_size, inferred_memory_word_size])
return AttentionOutput(
read=attended_memory,
weights=tf.squeeze(attention_weight, [2]),
weight_logits=attention_weight_logits) |
def cross_validate(self, ax):
'''
Performs the cross-validation step.
'''
# The CDPP to beat
cdpp_opt = self.get_cdpp_arr()
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-validating chunk %d/%d..." %
(b + 1, len(self.breakpoints)))
# Mask for current chunk
m = self.get_masked_chunk(b)
# Mask transits and outliers
time = self.time[m]
flux = self.fraw[m]
ferr = self.fraw_err[m]
med = np.nanmedian(self.fraw)
# Setup the GP
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(time, ferr)
# The masks
masks = list(Chunks(np.arange(0, len(time)),
len(time) // self.cdivs))
# The pre-computed matrices
pre_v = [self.cv_precompute(mask, b) for mask in masks]
# Initialize with the nPLD solution
log_lam_opt = np.log10(self.lam[b])
scatter_opt = self.validation_scatter(
log_lam_opt, b, masks, pre_v, gp, flux, time, med)
log.info("Iter 0/%d: " % (self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
# Do `piter` iterations
for p in range(self.piter):
# Perturb the initial condition a bit
log_lam = np.array(
np.log10(self.lam[b])) * \
(1 + self.ppert * np.random.randn(len(self.lam[b])))
scatter = self.validation_scatter(
log_lam, b, masks, pre_v, gp, flux, time, med)
log.info("Initializing at: " +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# Call the minimizer
log_lam, scatter, _, _, _, _ = \
fmin_powell(self.validation_scatter, log_lam,
args=(b, masks, pre_v, gp, flux, time, med),
maxfun=self.pmaxf, disp=False,
full_output=True)
# Did it improve the CDPP?
tmp = np.array(self.lam[b])
self.lam[b] = 10 ** log_lam
self.compute()
cdpp = self.get_cdpp_arr()[b]
self.lam[b] = tmp
if cdpp < cdpp_opt[b]:
cdpp_opt[b] = cdpp
log_lam_opt = log_lam
# Log it
log.info("Iter %d/%d: " % (p + 1, self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# The best solution
log.info("Found minimum: logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
self.lam[b] = 10 ** log_lam_opt
# We're just going to plot lambda as a function of chunk number
bs = np.arange(len(self.breakpoints))
color = ['k', 'b', 'r', 'g', 'y']
for n in range(self.pld_order):
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '.', color=color[n])
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '-', color=color[n], alpha=0.25)
ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5)
ax[0].margins(0.1, 0.1)
ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1))
ax[0].set_xticklabels([])
# Now plot the CDPP
cdpp_arr = self.get_cdpp_arr()
ax[1].plot(bs + 1, cdpp_arr, 'b.')
ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25)
ax[1].margins(0.1, 0.1)
ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5)
ax[1].set_xlabel(r'Chunk', fontsize=5)
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) | Performs the cross-validation step. | Below is the the instruction that describes the task:
### Input:
Performs the cross-validation step.
### Response:
def cross_validate(self, ax):
'''
Performs the cross-validation step.
'''
# The CDPP to beat
cdpp_opt = self.get_cdpp_arr()
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-validating chunk %d/%d..." %
(b + 1, len(self.breakpoints)))
# Mask for current chunk
m = self.get_masked_chunk(b)
# Mask transits and outliers
time = self.time[m]
flux = self.fraw[m]
ferr = self.fraw_err[m]
med = np.nanmedian(self.fraw)
# Setup the GP
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(time, ferr)
# The masks
masks = list(Chunks(np.arange(0, len(time)),
len(time) // self.cdivs))
# The pre-computed matrices
pre_v = [self.cv_precompute(mask, b) for mask in masks]
# Initialize with the nPLD solution
log_lam_opt = np.log10(self.lam[b])
scatter_opt = self.validation_scatter(
log_lam_opt, b, masks, pre_v, gp, flux, time, med)
log.info("Iter 0/%d: " % (self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
# Do `piter` iterations
for p in range(self.piter):
# Perturb the initial condition a bit
log_lam = np.array(
np.log10(self.lam[b])) * \
(1 + self.ppert * np.random.randn(len(self.lam[b])))
scatter = self.validation_scatter(
log_lam, b, masks, pre_v, gp, flux, time, med)
log.info("Initializing at: " +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# Call the minimizer
log_lam, scatter, _, _, _, _ = \
fmin_powell(self.validation_scatter, log_lam,
args=(b, masks, pre_v, gp, flux, time, med),
maxfun=self.pmaxf, disp=False,
full_output=True)
# Did it improve the CDPP?
tmp = np.array(self.lam[b])
self.lam[b] = 10 ** log_lam
self.compute()
cdpp = self.get_cdpp_arr()[b]
self.lam[b] = tmp
if cdpp < cdpp_opt[b]:
cdpp_opt[b] = cdpp
log_lam_opt = log_lam
# Log it
log.info("Iter %d/%d: " % (p + 1, self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# The best solution
log.info("Found minimum: logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
self.lam[b] = 10 ** log_lam_opt
# We're just going to plot lambda as a function of chunk number
bs = np.arange(len(self.breakpoints))
color = ['k', 'b', 'r', 'g', 'y']
for n in range(self.pld_order):
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '.', color=color[n])
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '-', color=color[n], alpha=0.25)
ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5)
ax[0].margins(0.1, 0.1)
ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1))
ax[0].set_xticklabels([])
# Now plot the CDPP
cdpp_arr = self.get_cdpp_arr()
ax[1].plot(bs + 1, cdpp_arr, 'b.')
ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25)
ax[1].margins(0.1, 0.1)
ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5)
ax[1].set_xlabel(r'Chunk', fontsize=5)
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) |
def experiment_status(args):
'''Show the status of experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
result, response = check_rest_server_quick(rest_port)
if not result:
print_normal('Restful server is not running...')
else:
print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) | Show the status of experiment | Below is the the instruction that describes the task:
### Input:
Show the status of experiment
### Response:
def experiment_status(args):
'''Show the status of experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
result, response = check_rest_server_quick(rest_port)
if not result:
print_normal('Restful server is not running...')
else:
print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) |
def parseFeed(filename, yesterday):
"""Parse an RSS feed and filter only entries that are newer than yesterday."""
dom = xml.dom.minidom.parse(filename)
getText = lambda node, tag: node.getElementsByTagName(tag)[0].childNodes[0].data
getNode = lambda tag: dom.getElementsByTagName(tag)
content = getNode('channel')[0] # Only one channel node
feedTitle = getText(content, 'title')
feedLink = getText(content, 'link')
feedDesc = getText(content, 'description')
feed = Feed(feedTitle, feedLink, feedDesc)
for item in getNode('item'):
itemDate = time.strptime(getText(item, 'pubDate'), '%a, %d %b %Y %H:%M:%S GMT')
if (itemDate > yesterday): # If newer than yesterday
feed.addItem(getText(item, 'title'),
getText(item, 'link'),
getText(item, 'description'),
getText(item, 'pubDate'))
return feed | Parse an RSS feed and filter only entries that are newer than yesterday. | Below is the the instruction that describes the task:
### Input:
Parse an RSS feed and filter only entries that are newer than yesterday.
### Response:
def parseFeed(filename, yesterday):
"""Parse an RSS feed and filter only entries that are newer than yesterday."""
dom = xml.dom.minidom.parse(filename)
getText = lambda node, tag: node.getElementsByTagName(tag)[0].childNodes[0].data
getNode = lambda tag: dom.getElementsByTagName(tag)
content = getNode('channel')[0] # Only one channel node
feedTitle = getText(content, 'title')
feedLink = getText(content, 'link')
feedDesc = getText(content, 'description')
feed = Feed(feedTitle, feedLink, feedDesc)
for item in getNode('item'):
itemDate = time.strptime(getText(item, 'pubDate'), '%a, %d %b %Y %H:%M:%S GMT')
if (itemDate > yesterday): # If newer than yesterday
feed.addItem(getText(item, 'title'),
getText(item, 'link'),
getText(item, 'description'),
getText(item, 'pubDate'))
return feed |
def trace_memory_start(self):
""" Starts measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
gc.collect()
self._memory_start = self.worker.get_memory()["total"] | Starts measuring memory consumption | Below is the the instruction that describes the task:
### Input:
Starts measuring memory consumption
### Response:
def trace_memory_start(self):
""" Starts measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
gc.collect()
self._memory_start = self.worker.get_memory()["total"] |
def get_sys_path(rcpath, app_name, section_name=None):
"""Return a folder path if it exists.
First will check if it is an existing system path, if it is, will return it
expanded and absoluted.
If this fails will look for the rcpath variable in the app_name rcfiles or
exclusively within the given section_name, if given.
Parameters
----------
rcpath: str
Existing folder path or variable name in app_name rcfile with an
existing one.
section_name: str
Name of a section in the app_name rcfile to look exclusively there for
variable names.
app_name: str
Name of the application to look for rcfile configuration files.
Returns
-------
sys_path: str
A expanded absolute file or folder path if the path exists.
Raises
------
IOError if the proposed sys_path does not exist.
"""
# first check if it is an existing path
if op.exists(rcpath):
return op.realpath(op.expanduser(rcpath))
# look for the rcfile
try:
settings = rcfile(app_name, section_name)
except:
raise
# look for the variable within the rcfile configutarions
try:
sys_path = op.expanduser(settings[rcpath])
except KeyError:
raise IOError('Could not find an existing variable with name {0} in'
' section {1} of {2}rc config setup. Maybe it is a '
' folder that could not be found.'.format(rcpath,
section_name,
app_name))
# found the variable, now check if it is an existing path
else:
if not op.exists(sys_path):
raise IOError('Could not find the path {3} indicated by the '
'variable {0} in section {1} of {2}rc config '
'setup.'.format(rcpath, section_name, app_name,
sys_path))
# expand the path and return
return op.realpath(op.expanduser(sys_path)) | Return a folder path if it exists.
First will check if it is an existing system path, if it is, will return it
expanded and absoluted.
If this fails will look for the rcpath variable in the app_name rcfiles or
exclusively within the given section_name, if given.
Parameters
----------
rcpath: str
Existing folder path or variable name in app_name rcfile with an
existing one.
section_name: str
Name of a section in the app_name rcfile to look exclusively there for
variable names.
app_name: str
Name of the application to look for rcfile configuration files.
Returns
-------
sys_path: str
A expanded absolute file or folder path if the path exists.
Raises
------
IOError if the proposed sys_path does not exist. | Below is the the instruction that describes the task:
### Input:
Return a folder path if it exists.
First will check if it is an existing system path, if it is, will return it
expanded and absoluted.
If this fails will look for the rcpath variable in the app_name rcfiles or
exclusively within the given section_name, if given.
Parameters
----------
rcpath: str
Existing folder path or variable name in app_name rcfile with an
existing one.
section_name: str
Name of a section in the app_name rcfile to look exclusively there for
variable names.
app_name: str
Name of the application to look for rcfile configuration files.
Returns
-------
sys_path: str
A expanded absolute file or folder path if the path exists.
Raises
------
IOError if the proposed sys_path does not exist.
### Response:
def get_sys_path(rcpath, app_name, section_name=None):
"""Return a folder path if it exists.
First will check if it is an existing system path, if it is, will return it
expanded and absoluted.
If this fails will look for the rcpath variable in the app_name rcfiles or
exclusively within the given section_name, if given.
Parameters
----------
rcpath: str
Existing folder path or variable name in app_name rcfile with an
existing one.
section_name: str
Name of a section in the app_name rcfile to look exclusively there for
variable names.
app_name: str
Name of the application to look for rcfile configuration files.
Returns
-------
sys_path: str
A expanded absolute file or folder path if the path exists.
Raises
------
IOError if the proposed sys_path does not exist.
"""
# first check if it is an existing path
if op.exists(rcpath):
return op.realpath(op.expanduser(rcpath))
# look for the rcfile
try:
settings = rcfile(app_name, section_name)
except:
raise
# look for the variable within the rcfile configutarions
try:
sys_path = op.expanduser(settings[rcpath])
except KeyError:
raise IOError('Could not find an existing variable with name {0} in'
' section {1} of {2}rc config setup. Maybe it is a '
' folder that could not be found.'.format(rcpath,
section_name,
app_name))
# found the variable, now check if it is an existing path
else:
if not op.exists(sys_path):
raise IOError('Could not find the path {3} indicated by the '
'variable {0} in section {1} of {2}rc config '
'setup.'.format(rcpath, section_name, app_name,
sys_path))
# expand the path and return
return op.realpath(op.expanduser(sys_path)) |
def byte(self):
"""Return a byte representation of ControlFlags."""
flags = int(self._in_use) << 7 \
| int(self._controller) << 6 \
| int(self._bit5) << 5 \
| int(self._bit4) << 4 \
| int(self._used_before) << 1
return flags | Return a byte representation of ControlFlags. | Below is the the instruction that describes the task:
### Input:
Return a byte representation of ControlFlags.
### Response:
def byte(self):
"""Return a byte representation of ControlFlags."""
flags = int(self._in_use) << 7 \
| int(self._controller) << 6 \
| int(self._bit5) << 5 \
| int(self._bit4) << 4 \
| int(self._used_before) << 1
return flags |
def _sign_data(secret, data):
"""
Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key.
"""
sha1_hash = hmac.new(secret.encode(), data.encode(), sha1)
return binascii.b2a_base64(sha1_hash.digest())[:-1].decode('utf8') | Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key. | Below is the the instruction that describes the task:
### Input:
Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key.
### Response:
def _sign_data(secret, data):
"""
Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key.
"""
sha1_hash = hmac.new(secret.encode(), data.encode(), sha1)
return binascii.b2a_base64(sha1_hash.digest())[:-1].decode('utf8') |
def t_filelist(self, s):
r' @\w+'
self.rv.append(Token(type='FILELIST', attr=s[1:])) | r' @\w+ | Below is the the instruction that describes the task:
### Input:
r' @\w+
### Response:
def t_filelist(self, s):
r' @\w+'
self.rv.append(Token(type='FILELIST', attr=s[1:])) |
def _force_xyz(self,x,y,z,i):
"""Evaluation of the i-th force component as a function of (x,y,z)"""
return -4.*numpy.pi*self._b*self._c\
*_forceInt(x,y,z,
lambda m: self._mdens(m),
self._b2,self._c2,i,glx=self._glx,glw=self._glw) | Evaluation of the i-th force component as a function of (x,y,z) | Below is the the instruction that describes the task:
### Input:
Evaluation of the i-th force component as a function of (x,y,z)
### Response:
def _force_xyz(self,x,y,z,i):
"""Evaluation of the i-th force component as a function of (x,y,z)"""
return -4.*numpy.pi*self._b*self._c\
*_forceInt(x,y,z,
lambda m: self._mdens(m),
self._b2,self._c2,i,glx=self._glx,glw=self._glw) |
def uniq(iterable):
"""
Yield the unique items of an iterable, preserving order.
http://mail.python.org/pipermail/tutor/2002-March/012930.html
Example:
>>> x = uniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 6, 5]
"""
temp_dict = {}
for e in iterable:
if e not in temp_dict:
yield temp_dict.setdefault(e, e) | Yield the unique items of an iterable, preserving order.
http://mail.python.org/pipermail/tutor/2002-March/012930.html
Example:
>>> x = uniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 6, 5] | Below is the the instruction that describes the task:
### Input:
Yield the unique items of an iterable, preserving order.
http://mail.python.org/pipermail/tutor/2002-March/012930.html
Example:
>>> x = uniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 6, 5]
### Response:
def uniq(iterable):
"""
Yield the unique items of an iterable, preserving order.
http://mail.python.org/pipermail/tutor/2002-March/012930.html
Example:
>>> x = uniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 6, 5]
"""
temp_dict = {}
for e in iterable:
if e not in temp_dict:
yield temp_dict.setdefault(e, e) |
def send_velocity_world_setpoint(self, vx, vy, vz, yawrate):
"""
Send Velocity in the world frame of reference setpoint.
vx, vy, vz are in m/s
yawrate is in degrees/s
"""
pk = CRTPPacket()
pk.port = CRTPPort.COMMANDER_GENERIC
pk.data = struct.pack('<Bffff', TYPE_VELOCITY_WORLD,
vx, vy, vz, yawrate)
self._cf.send_packet(pk) | Send Velocity in the world frame of reference setpoint.
vx, vy, vz are in m/s
yawrate is in degrees/s | Below is the the instruction that describes the task:
### Input:
Send Velocity in the world frame of reference setpoint.
vx, vy, vz are in m/s
yawrate is in degrees/s
### Response:
def send_velocity_world_setpoint(self, vx, vy, vz, yawrate):
"""
Send Velocity in the world frame of reference setpoint.
vx, vy, vz are in m/s
yawrate is in degrees/s
"""
pk = CRTPPacket()
pk.port = CRTPPort.COMMANDER_GENERIC
pk.data = struct.pack('<Bffff', TYPE_VELOCITY_WORLD,
vx, vy, vz, yawrate)
self._cf.send_packet(pk) |
def build_logger(
name=os.getenv(
"LOG_NAME",
"client"),
config="logging.json",
log_level=logging.INFO,
log_config_path="{}/logging.json".format(
os.getenv(
"LOG_CFG",
os.path.dirname(os.path.realpath(__file__))))):
"""build_logger
:param name: name that shows in the logger
:param config: name of the config file
:param log_level: level to log
:param log_config_path: path to log config file
"""
use_config = ("./log/{}").format(
"{}".format(
config))
if not os.path.exists(use_config):
use_config = log_config_path
if not os.path.exists(use_config):
use_config = ("./antinex_client/log/{}").format(
"logging.json")
# find the log processing
setup_logging(
default_level=log_level,
default_path=use_config)
return logging.getLogger(name) | build_logger
:param name: name that shows in the logger
:param config: name of the config file
:param log_level: level to log
:param log_config_path: path to log config file | Below is the the instruction that describes the task:
### Input:
build_logger
:param name: name that shows in the logger
:param config: name of the config file
:param log_level: level to log
:param log_config_path: path to log config file
### Response:
def build_logger(
name=os.getenv(
"LOG_NAME",
"client"),
config="logging.json",
log_level=logging.INFO,
log_config_path="{}/logging.json".format(
os.getenv(
"LOG_CFG",
os.path.dirname(os.path.realpath(__file__))))):
"""build_logger
:param name: name that shows in the logger
:param config: name of the config file
:param log_level: level to log
:param log_config_path: path to log config file
"""
use_config = ("./log/{}").format(
"{}".format(
config))
if not os.path.exists(use_config):
use_config = log_config_path
if not os.path.exists(use_config):
use_config = ("./antinex_client/log/{}").format(
"logging.json")
# find the log processing
setup_logging(
default_level=log_level,
default_path=use_config)
return logging.getLogger(name) |
def deleteUnused(self):
""" Delete any old snapshots in path, if not kept. """
(count, size) = (0, 0)
for (diff, path) in self.extraKeys.items():
if path.startswith("/"):
continue
keyName = self._keyName(diff.toUUID, diff.fromUUID, path)
count += 1
size += diff.size
if self._skipDryRun(logger, 'INFO')("Trash: %s", diff):
continue
try:
self.bucket.copy_key(theTrashPrefix + keyName, self.bucket.name, keyName)
self.bucket.delete_key(keyName)
except boto.exception.S3ResponseError as error:
logger.error("%s: %s", error.code, error.message)
try:
keyName = os.path.dirname(keyName) + Store.theInfoExtension
self.bucket.copy_key(theTrashPrefix + keyName, self.bucket.name, keyName)
self.bucket.delete_key(keyName)
except boto.exception.S3ResponseError as error:
logger.debug("%s: %s", error.code, error.message)
logger.info("Trashed %d diffs (%s)", count, humanize(size)) | Delete any old snapshots in path, if not kept. | Below is the the instruction that describes the task:
### Input:
Delete any old snapshots in path, if not kept.
### Response:
def deleteUnused(self):
""" Delete any old snapshots in path, if not kept. """
(count, size) = (0, 0)
for (diff, path) in self.extraKeys.items():
if path.startswith("/"):
continue
keyName = self._keyName(diff.toUUID, diff.fromUUID, path)
count += 1
size += diff.size
if self._skipDryRun(logger, 'INFO')("Trash: %s", diff):
continue
try:
self.bucket.copy_key(theTrashPrefix + keyName, self.bucket.name, keyName)
self.bucket.delete_key(keyName)
except boto.exception.S3ResponseError as error:
logger.error("%s: %s", error.code, error.message)
try:
keyName = os.path.dirname(keyName) + Store.theInfoExtension
self.bucket.copy_key(theTrashPrefix + keyName, self.bucket.name, keyName)
self.bucket.delete_key(keyName)
except boto.exception.S3ResponseError as error:
logger.debug("%s: %s", error.code, error.message)
logger.info("Trashed %d diffs (%s)", count, humanize(size)) |
def protocol_list(self):
"""
:return: visible protocols
:rtype: list of ProtocolAnalyzer
"""
result = []
for group in self.groups:
result.extend(group.protocols)
return result | :return: visible protocols
:rtype: list of ProtocolAnalyzer | Below is the the instruction that describes the task:
### Input:
:return: visible protocols
:rtype: list of ProtocolAnalyzer
### Response:
def protocol_list(self):
"""
:return: visible protocols
:rtype: list of ProtocolAnalyzer
"""
result = []
for group in self.groups:
result.extend(group.protocols)
return result |
def _returnRequestError(fn):
''' Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
return "server returned status %s: %s" % (e.response.status_code, e.message)
return wrapped | Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). | Below is the the instruction that describes the task:
### Input:
Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None).
### Response:
def _returnRequestError(fn):
''' Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
return "server returned status %s: %s" % (e.response.status_code, e.message)
return wrapped |
def pants_setup_py(name, description, additional_classifiers=None, **kwargs):
"""Creates the setup_py for a pants artifact.
:param str name: The name of the package.
:param str description: A brief description of what the package provides.
:param list additional_classifiers: Any additional trove classifiers that apply to the package,
see: https://pypi.org/pypi?%3Aaction=list_classifiers
:param kwargs: Any additional keyword arguments to be passed to `setuptools.setup
<https://pythonhosted.org/setuptools/setuptools.html>`_.
:returns: A setup_py suitable for building and publishing pants components.
"""
if not name.startswith('pantsbuild.pants'):
raise ValueError("Pants distribution package names must start with 'pantsbuild.pants', "
"given {}".format(name))
standard_classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
# We know for a fact these OSs work but, for example, know Windows
# does not work yet. Take the conservative approach and only list OSs
# we know pants works with for now.
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development :: Build Tools']
classifiers = OrderedSet(standard_classifiers + (additional_classifiers or []))
notes = PantsReleases.global_instance().notes_for_version(PANTS_SEMVER)
return PythonArtifact(
name=name,
version=VERSION,
description=description,
long_description=(_read_contents('src/python/pants/ABOUT.rst') + notes),
url='https://github.com/pantsbuild/pants',
license='Apache License, Version 2.0',
zip_safe=True,
classifiers=list(classifiers),
**kwargs) | Creates the setup_py for a pants artifact.
:param str name: The name of the package.
:param str description: A brief description of what the package provides.
:param list additional_classifiers: Any additional trove classifiers that apply to the package,
see: https://pypi.org/pypi?%3Aaction=list_classifiers
:param kwargs: Any additional keyword arguments to be passed to `setuptools.setup
<https://pythonhosted.org/setuptools/setuptools.html>`_.
:returns: A setup_py suitable for building and publishing pants components. | Below is the the instruction that describes the task:
### Input:
Creates the setup_py for a pants artifact.
:param str name: The name of the package.
:param str description: A brief description of what the package provides.
:param list additional_classifiers: Any additional trove classifiers that apply to the package,
see: https://pypi.org/pypi?%3Aaction=list_classifiers
:param kwargs: Any additional keyword arguments to be passed to `setuptools.setup
<https://pythonhosted.org/setuptools/setuptools.html>`_.
:returns: A setup_py suitable for building and publishing pants components.
### Response:
def pants_setup_py(name, description, additional_classifiers=None, **kwargs):
"""Creates the setup_py for a pants artifact.
:param str name: The name of the package.
:param str description: A brief description of what the package provides.
:param list additional_classifiers: Any additional trove classifiers that apply to the package,
see: https://pypi.org/pypi?%3Aaction=list_classifiers
:param kwargs: Any additional keyword arguments to be passed to `setuptools.setup
<https://pythonhosted.org/setuptools/setuptools.html>`_.
:returns: A setup_py suitable for building and publishing pants components.
"""
if not name.startswith('pantsbuild.pants'):
raise ValueError("Pants distribution package names must start with 'pantsbuild.pants', "
"given {}".format(name))
standard_classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
# We know for a fact these OSs work but, for example, know Windows
# does not work yet. Take the conservative approach and only list OSs
# we know pants works with for now.
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development :: Build Tools']
classifiers = OrderedSet(standard_classifiers + (additional_classifiers or []))
notes = PantsReleases.global_instance().notes_for_version(PANTS_SEMVER)
return PythonArtifact(
name=name,
version=VERSION,
description=description,
long_description=(_read_contents('src/python/pants/ABOUT.rst') + notes),
url='https://github.com/pantsbuild/pants',
license='Apache License, Version 2.0',
zip_safe=True,
classifiers=list(classifiers),
**kwargs) |
def handle_relative(self, event):
"""Get the position of the mouse on the screen."""
delta_x, delta_y, delta_z = self._get_deltas(event)
if delta_x:
self.events.append(
self.emulate_rel(0x00,
delta_x,
self.timeval))
if delta_y:
self.events.append(
self.emulate_rel(0x01,
delta_y,
self.timeval))
if delta_z:
self.events.append(
self.emulate_rel(0x02,
delta_z,
self.timeval)) | Get the position of the mouse on the screen. | Below is the the instruction that describes the task:
### Input:
Get the position of the mouse on the screen.
### Response:
def handle_relative(self, event):
"""Get the position of the mouse on the screen."""
delta_x, delta_y, delta_z = self._get_deltas(event)
if delta_x:
self.events.append(
self.emulate_rel(0x00,
delta_x,
self.timeval))
if delta_y:
self.events.append(
self.emulate_rel(0x01,
delta_y,
self.timeval))
if delta_z:
self.events.append(
self.emulate_rel(0x02,
delta_z,
self.timeval)) |
def set_storage(self, storage):
"""Set storage backend for downloader
For full list of storage backend supported, please see :mod:`storage`.
Args:
storage (dict or BaseStorage): storage backend configuration or instance
"""
if isinstance(storage, BaseStorage):
self.storage = storage
elif isinstance(storage, dict):
if 'backend' not in storage and 'root_dir' in storage:
storage['backend'] = 'FileSystem'
try:
backend_cls = getattr(storage_package, storage['backend'])
except AttributeError:
try:
backend_cls = import_module(storage['backend'])
except ImportError:
self.logger.error('cannot find backend module %s',
storage['backend'])
sys.exit()
kwargs = storage.copy()
del kwargs['backend']
self.storage = backend_cls(**kwargs)
else:
raise TypeError('"storage" must be a storage object or dict') | Set storage backend for downloader
For full list of storage backend supported, please see :mod:`storage`.
Args:
storage (dict or BaseStorage): storage backend configuration or instance | Below is the the instruction that describes the task:
### Input:
Set storage backend for downloader
For full list of storage backend supported, please see :mod:`storage`.
Args:
storage (dict or BaseStorage): storage backend configuration or instance
### Response:
def set_storage(self, storage):
"""Set storage backend for downloader
For full list of storage backend supported, please see :mod:`storage`.
Args:
storage (dict or BaseStorage): storage backend configuration or instance
"""
if isinstance(storage, BaseStorage):
self.storage = storage
elif isinstance(storage, dict):
if 'backend' not in storage and 'root_dir' in storage:
storage['backend'] = 'FileSystem'
try:
backend_cls = getattr(storage_package, storage['backend'])
except AttributeError:
try:
backend_cls = import_module(storage['backend'])
except ImportError:
self.logger.error('cannot find backend module %s',
storage['backend'])
sys.exit()
kwargs = storage.copy()
del kwargs['backend']
self.storage = backend_cls(**kwargs)
else:
raise TypeError('"storage" must be a storage object or dict') |
def load_module(name):
"""Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module
"""
spec = importlib.util.find_spec(name)
mod = importlib.util.module_from_spec(spec)
mod.__spec__ = spec
mod.__loader__ = spec.loader
spec.loader.exec_module(mod)
return mod | Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module | Below is the the instruction that describes the task:
### Input:
Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module
### Response:
def load_module(name):
"""Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module
"""
spec = importlib.util.find_spec(name)
mod = importlib.util.module_from_spec(spec)
mod.__spec__ = spec
mod.__loader__ = spec.loader
spec.loader.exec_module(mod)
return mod |
def seq_save(self):
"""Save the current sequence
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_seq:
return
desc = self.seq_desc_pte.toPlainText()
self.cur_seq.description = desc
self.cur_seq.save() | Save the current sequence
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Save the current sequence
:returns: None
:rtype: None
:raises: None
### Response:
def seq_save(self):
"""Save the current sequence
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_seq:
return
desc = self.seq_desc_pte.toPlainText()
self.cur_seq.description = desc
self.cur_seq.save() |
def drug_matches_criteria(drug: Drug, **criteria: Dict[str, bool]) -> bool:
"""
Determines whether a drug, passed as an instance of :class:`.Drug`, matches
the specified criteria.
Args:
drug: a :class:`.Drug` instance
criteria: ``name=value`` pairs to match against the attributes of
the :class:`Drug` class. For example, you can include keyword
arguments like ``antidepressant=True``.
"""
for attribute, value in criteria.items():
if getattr(drug, attribute) != value:
return False
return True | Determines whether a drug, passed as an instance of :class:`.Drug`, matches
the specified criteria.
Args:
drug: a :class:`.Drug` instance
criteria: ``name=value`` pairs to match against the attributes of
the :class:`Drug` class. For example, you can include keyword
arguments like ``antidepressant=True``. | Below is the the instruction that describes the task:
### Input:
Determines whether a drug, passed as an instance of :class:`.Drug`, matches
the specified criteria.
Args:
drug: a :class:`.Drug` instance
criteria: ``name=value`` pairs to match against the attributes of
the :class:`Drug` class. For example, you can include keyword
arguments like ``antidepressant=True``.
### Response:
def drug_matches_criteria(drug: Drug, **criteria: Dict[str, bool]) -> bool:
"""
Determines whether a drug, passed as an instance of :class:`.Drug`, matches
the specified criteria.
Args:
drug: a :class:`.Drug` instance
criteria: ``name=value`` pairs to match against the attributes of
the :class:`Drug` class. For example, you can include keyword
arguments like ``antidepressant=True``.
"""
for attribute, value in criteria.items():
if getattr(drug, attribute) != value:
return False
return True |
def optimize_batch(self, batchsize=10, returns='best', paralell=True):
"""
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
"""
if returns not in ('best', 'all'):
raise ValueError('returns must be either "best" or "all"')
starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)]
if paralell:
with Pool() as p:
results = p.map(self.optimize, starts)
else:
results = map(self.optimize, starts)
results = sorted(results, key=lambda x: x.stress)
return results if returns == 'all' else results[0] | Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress. | Below is the the instruction that describes the task:
### Input:
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
### Response:
def optimize_batch(self, batchsize=10, returns='best', paralell=True):
"""
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
"""
if returns not in ('best', 'all'):
raise ValueError('returns must be either "best" or "all"')
starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)]
if paralell:
with Pool() as p:
results = p.map(self.optimize, starts)
else:
results = map(self.optimize, starts)
results = sorted(results, key=lambda x: x.stress)
return results if returns == 'all' else results[0] |
def x509_rsa_load(txt):
""" So I get the same output format as loads produces
:param txt:
:return:
"""
pub_key = import_rsa_key(txt)
if isinstance(pub_key, rsa.RSAPublicKey):
return [("rsa", pub_key)] | So I get the same output format as loads produces
:param txt:
:return: | Below is the the instruction that describes the task:
### Input:
So I get the same output format as loads produces
:param txt:
:return:
### Response:
def x509_rsa_load(txt):
""" So I get the same output format as loads produces
:param txt:
:return:
"""
pub_key = import_rsa_key(txt)
if isinstance(pub_key, rsa.RSAPublicKey):
return [("rsa", pub_key)] |
def list_nodes_full(call=None):
'''
List devices, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full packet-provider
..
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
for device in get_devices_by_token():
ret[device.hostname] = device.__dict__
return ret | List devices, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full packet-provider
.. | Below is the the instruction that describes the task:
### Input:
List devices, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full packet-provider
..
### Response:
def list_nodes_full(call=None):
'''
List devices, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full packet-provider
..
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
for device in get_devices_by_token():
ret[device.hostname] = device.__dict__
return ret |
def _check_error(self, response, json_response=None):
''' Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code
'''
# If status code is 4xx or 5xx, that should be an error
if response.status_code >= 400:
json_response = json_response or self._get_json_response(response)
err_cls = self._check_http_error_code(response.status_code)
try:
raise err_cls("%s error: %s" % (response.status_code, json_response["error"]["error_msg"]), response.status_code)
# This is to catch error when we post get oauth data
except TypeError:
raise err_cls("%s error: %s" % (response.status_code, json_response["error_description"]), response.status_code)
# Return True if everything is OK
return True | Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code | Below is the the instruction that describes the task:
### Input:
Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code
### Response:
def _check_error(self, response, json_response=None):
''' Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code
'''
# If status code is 4xx or 5xx, that should be an error
if response.status_code >= 400:
json_response = json_response or self._get_json_response(response)
err_cls = self._check_http_error_code(response.status_code)
try:
raise err_cls("%s error: %s" % (response.status_code, json_response["error"]["error_msg"]), response.status_code)
# This is to catch error when we post get oauth data
except TypeError:
raise err_cls("%s error: %s" % (response.status_code, json_response["error_description"]), response.status_code)
# Return True if everything is OK
return True |
def set_max_connections_per_host(self, host_distance, max_connections):
"""
Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
self._max_connections_per_host[host_distance] = max_connections | Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`. | Below is the the instruction that describes the task:
### Input:
Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
### Response:
def set_max_connections_per_host(self, host_distance, max_connections):
"""
Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
self._max_connections_per_host[host_distance] = max_connections |
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode():
management_address_len = len(self.management_address)
if management_address_len == 0 or management_address_len > 31:
raise LLDPInvalidLengthField(
'management address must be 1..31 characters long - '
'got string of size {}'.format(management_address_len)) | run layer specific checks | Below is the the instruction that describes the task:
### Input:
run layer specific checks
### Response:
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode():
management_address_len = len(self.management_address)
if management_address_len == 0 or management_address_len > 31:
raise LLDPInvalidLengthField(
'management address must be 1..31 characters long - '
'got string of size {}'.format(management_address_len)) |
def call(self, method, params, callback=None):
"""Call a method on the server
Arguments:
method - the remote server method
params - an array of commands to send to the method
Keyword Arguments:
callback - a callback function containing the return data"""
cur_id = self._next_id()
if callback:
self._callbacks[cur_id] = callback
self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params}) | Call a method on the server
Arguments:
method - the remote server method
params - an array of commands to send to the method
Keyword Arguments:
callback - a callback function containing the return data | Below is the the instruction that describes the task:
### Input:
Call a method on the server
Arguments:
method - the remote server method
params - an array of commands to send to the method
Keyword Arguments:
callback - a callback function containing the return data
### Response:
def call(self, method, params, callback=None):
"""Call a method on the server
Arguments:
method - the remote server method
params - an array of commands to send to the method
Keyword Arguments:
callback - a callback function containing the return data"""
cur_id = self._next_id()
if callback:
self._callbacks[cur_id] = callback
self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params}) |
def remove_files():
"""
Removes any pre-existing tracks that were not just downloaded
"""
logger.info("Removing local track files that were not downloaded...")
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
if f not in fileToKeep:
os.remove(f) | Removes any pre-existing tracks that were not just downloaded | Below is the the instruction that describes the task:
### Input:
Removes any pre-existing tracks that were not just downloaded
### Response:
def remove_files():
"""
Removes any pre-existing tracks that were not just downloaded
"""
logger.info("Removing local track files that were not downloaded...")
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
if f not in fileToKeep:
os.remove(f) |
def dirichlet_covariance(alpha):
r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix
"""
alpha0 = alpha.sum()
norm = alpha0 ** 2 * (alpha0 + 1.0)
"""Non normalized covariance"""
Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :]
"""Correct diagonal"""
ind = np.diag_indices(Z.shape[0])
Z[ind] += alpha0 * alpha
"""Covariance matrix"""
cov = Z / norm
return cov | r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix | Below is the the instruction that describes the task:
### Input:
r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix
### Response:
def dirichlet_covariance(alpha):
r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix
"""
alpha0 = alpha.sum()
norm = alpha0 ** 2 * (alpha0 + 1.0)
"""Non normalized covariance"""
Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :]
"""Correct diagonal"""
ind = np.diag_indices(Z.shape[0])
Z[ind] += alpha0 * alpha
"""Covariance matrix"""
cov = Z / norm
return cov |
def astuple(self, encoding=None):
"""
Return a tuple suitable for import into a database.
Attributes field and extra field jsonified into strings. The order of
fields is such that they can be supplied as arguments for the query
defined in :attr:`gffutils.constants._INSERT`.
If `encoding` is not None, then convert string fields to unicode using
the provided encoding.
Returns
-------
Tuple
"""
if not encoding:
return (
self.id, self.seqid, self.source, self.featuretype, self.start,
self.end, self.score, self.strand, self.frame,
helpers._jsonify(self.attributes),
helpers._jsonify(self.extra), self.calc_bin()
)
return (
self.id.decode(encoding), self.seqid.decode(encoding),
self.source.decode(encoding), self.featuretype.decode(encoding),
self.start, self.end, self.score.decode(encoding),
self.strand.decode(encoding), self.frame.decode(encoding),
helpers._jsonify(self.attributes).decode(encoding),
helpers._jsonify(self.extra).decode(encoding), self.calc_bin()
) | Return a tuple suitable for import into a database.
Attributes field and extra field jsonified into strings. The order of
fields is such that they can be supplied as arguments for the query
defined in :attr:`gffutils.constants._INSERT`.
If `encoding` is not None, then convert string fields to unicode using
the provided encoding.
Returns
-------
Tuple | Below is the the instruction that describes the task:
### Input:
Return a tuple suitable for import into a database.
Attributes field and extra field jsonified into strings. The order of
fields is such that they can be supplied as arguments for the query
defined in :attr:`gffutils.constants._INSERT`.
If `encoding` is not None, then convert string fields to unicode using
the provided encoding.
Returns
-------
Tuple
### Response:
def astuple(self, encoding=None):
"""
Return a tuple suitable for import into a database.
Attributes field and extra field jsonified into strings. The order of
fields is such that they can be supplied as arguments for the query
defined in :attr:`gffutils.constants._INSERT`.
If `encoding` is not None, then convert string fields to unicode using
the provided encoding.
Returns
-------
Tuple
"""
if not encoding:
return (
self.id, self.seqid, self.source, self.featuretype, self.start,
self.end, self.score, self.strand, self.frame,
helpers._jsonify(self.attributes),
helpers._jsonify(self.extra), self.calc_bin()
)
return (
self.id.decode(encoding), self.seqid.decode(encoding),
self.source.decode(encoding), self.featuretype.decode(encoding),
self.start, self.end, self.score.decode(encoding),
self.strand.decode(encoding), self.frame.decode(encoding),
helpers._jsonify(self.attributes).decode(encoding),
helpers._jsonify(self.extra).decode(encoding), self.calc_bin()
) |
def set(self, item, column=None, value=None):
"""
Query or set the value of given item.
With one argument, return a dictionary of column/value pairs for the
specified item. With two arguments, return the current value of the
specified column. With three arguments, set the value of given column
in given item to the specified value.
:param item: item's identifier
:type item: str
:param column: column's identifier
:type column: str, int or None
:param value: new value
"""
if value is not None:
self._visual_drag.set(item, ttk.Treeview.column(self, column, 'id'), value)
return ttk.Treeview.set(self, item, column, value) | Query or set the value of given item.
With one argument, return a dictionary of column/value pairs for the
specified item. With two arguments, return the current value of the
specified column. With three arguments, set the value of given column
in given item to the specified value.
:param item: item's identifier
:type item: str
:param column: column's identifier
:type column: str, int or None
:param value: new value | Below is the the instruction that describes the task:
### Input:
Query or set the value of given item.
With one argument, return a dictionary of column/value pairs for the
specified item. With two arguments, return the current value of the
specified column. With three arguments, set the value of given column
in given item to the specified value.
:param item: item's identifier
:type item: str
:param column: column's identifier
:type column: str, int or None
:param value: new value
### Response:
def set(self, item, column=None, value=None):
"""
Query or set the value of given item.
With one argument, return a dictionary of column/value pairs for the
specified item. With two arguments, return the current value of the
specified column. With three arguments, set the value of given column
in given item to the specified value.
:param item: item's identifier
:type item: str
:param column: column's identifier
:type column: str, int or None
:param value: new value
"""
if value is not None:
self._visual_drag.set(item, ttk.Treeview.column(self, column, 'id'), value)
return ttk.Treeview.set(self, item, column, value) |
def has_register(self, register):
"""
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
"""
has_reg = False
if (isinstance(register, QuantumRegister) and
register in self.qregs):
has_reg = True
elif (isinstance(register, ClassicalRegister) and
register in self.cregs):
has_reg = True
return has_reg | Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit. | Below is the the instruction that describes the task:
### Input:
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
### Response:
def has_register(self, register):
"""
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
"""
has_reg = False
if (isinstance(register, QuantumRegister) and
register in self.qregs):
has_reg = True
elif (isinstance(register, ClassicalRegister) and
register in self.cregs):
has_reg = True
return has_reg |
def get_packagers_of_package(config, package):
""" Retrieve the list of users who have commit on a package.
:arg config: a dict containing the fedmsg config
:arg package: the package you are interested in.
:return: a set listing all the fas usernames that have some ACL on package.
"""
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_packagers_of_package, package)
creator = lambda: _get_pkgdb2_packagers_for(config, package)
return _cache.get_or_create(key, creator) | Retrieve the list of users who have commit on a package.
:arg config: a dict containing the fedmsg config
:arg package: the package you are interested in.
:return: a set listing all the fas usernames that have some ACL on package. | Below is the the instruction that describes the task:
### Input:
Retrieve the list of users who have commit on a package.
:arg config: a dict containing the fedmsg config
:arg package: the package you are interested in.
:return: a set listing all the fas usernames that have some ACL on package.
### Response:
def get_packagers_of_package(config, package):
""" Retrieve the list of users who have commit on a package.
:arg config: a dict containing the fedmsg config
:arg package: the package you are interested in.
:return: a set listing all the fas usernames that have some ACL on package.
"""
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_packagers_of_package, package)
creator = lambda: _get_pkgdb2_packagers_for(config, package)
return _cache.get_or_create(key, creator) |
def trim(self: 'Variable', lower=None, upper=None) -> None:
"""Trim the value(s) of a |Variable| instance.
Usually, users do not need to apply function |trim| directly.
Instead, some |Variable| subclasses implement their own `trim`
methods relying on function |trim|. Model developers should
implement individual `trim` methods for their |Parameter| or
|Sequence| subclasses when their boundary values depend on the
actual project configuration (one example is soil moisture;
its lowest possible value should possibly be zero in all cases,
but its highest possible value could depend on another parameter
defining the maximum storage capacity).
For the following examples, we prepare a simple (not fully
functional) |Variable| subclass, making use of function |trim|
without any modifications. Function |trim| works slightly
different for variables handling |float|, |int|, and |bool|
values. We start with the most common content type |float|:
>>> from hydpy.core.variabletools import trim, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... SPAN = 1.0, 3.0
... trim = trim
... initinfo = 2.0, False
... __hydpy__connect_variable2subgroup__ = None
First, we enable the printing of warning messages raised by function
|trim|:
>>> from hydpy import pub
>>> pub.options.warntrim = True
When not passing boundary values, function |trim| extracts them from
class attribute `SPAN` of the given |Variable| instance, if available:
>>> var = Var(None)
>>> var.value = 2.0
>>> var.trim()
>>> var
var(2.0)
>>> var.value = 0.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `0.0` and `1.0`, respectively.
>>> var
var(1.0)
>>> var.value = 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
>>> var
var(3.0)
In the examples above, outlier values are set to the respective
boundary value, accompanied by suitable warning messages. For very
tiny deviations, which might be due to precision problems only,
outliers are trimmed but not reported:
>>> var.value = 1.0 - 1e-15
>>> var == 1.0
False
>>> trim(var)
>>> var == 1.0
True
>>> var.value = 3.0 + 1e-15
>>> var == 3.0
False
>>> var.trim()
>>> var == 3.0
True
Use arguments `lower` and `upper` to override the (eventually)
available `SPAN` entries:
>>> var.trim(lower=4.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `3.0` and `4.0`, respectively.
>>> var.trim(upper=3.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
Function |trim| interprets both |None| and |numpy.nan| values as if
no boundary value exists:
>>> import numpy
>>> var.value = 0.0
>>> var.trim(lower=numpy.nan)
>>> var.value = 5.0
>>> var.trim(upper=numpy.nan)
You can disable function |trim| via option |Options.trimvariables|:
>>> with pub.options.trimvariables(False):
... var.value = 5.0
... var.trim()
>>> var
var(5.0)
Alternatively, you can omit the warning messages only:
>>> with pub.options.warntrim(False):
... var.value = 5.0
... var.trim()
>>> var
var(3.0)
If a |Variable| subclass does not have (fixed) boundaries, give it
either no `SPAN` attribute or a |tuple| containing |None| values:
>>> del Var.SPAN
>>> var.value = 5.0
>>> var.trim()
>>> var
var(5.0)
>>> Var.SPAN = (None, None)
>>> var.trim()
>>> var
var(5.0)
The above examples deal with a 0-dimensional |Variable| subclass.
The following examples repeat the most relevant examples for a
2-dimensional subclass:
>>> Var.SPAN = 1.0, 3.0
>>> Var.NDIM = 2
>>> var.shape = 1, 3
>>> var.values = 2.0
>>> var.trim()
>>> var.values = 0.0, 1.0, 2.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 1. 2.]]` and `[[ 1. 1. 2.]]`, \
respectively.
>>> var
var([[1.0, 1.0, 2.0]])
>>> var.values = 2.0, 3.0, 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 2. 3. 4.]]` and `[[ 2. 3. 3.]]`, \
respectively.
>>> var
var([[2.0, 3.0, 3.0]])
>>> var.values = 1.0-1e-15, 2.0, 3.0+1e-15
>>> var.values == (1.0, 2.0, 3.0)
array([[False, True, False]], dtype=bool)
>>> var.trim()
>>> var.values == (1.0, 2.0, 3.0)
array([[ True, True, True]], dtype=bool)
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(lower=numpy.nan, upper=numpy.nan)
>>> var
var([[0.0, 2.0, 4.0]])
>>> var.trim(lower=[numpy.nan, 3.0, 3.0])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 0. 3. 3.]]`, \
respectively.
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(upper=[numpy.nan, 1.0, numpy.nan])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 1. 1. 4.]]`, \
respectively.
For |Variable| subclasses handling |float| values, setting outliers
to the respective boundary value might often be an acceptable approach.
However, this is often not the case for subclasses handling |int|
values, which often serve as option flags (e.g. to enable/disable
a certain hydrological process for different land-use types). Hence,
function |trim| raises an exception instead of a warning and does
not modify the wrong |int| value:
>>> Var.TYPE = int
>>> Var.NDIM = 0
>>> Var.SPAN = 1, 3
>>> var.value = 2
>>> var.trim()
>>> var
var(2)
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> var
var(4)
>>> from hydpy import INT_NAN
>>> var.value = 0
>>> var.trim(lower=0)
>>> var.trim(lower=INT_NAN)
>>> var.value = 4
>>> var.trim(upper=4)
>>> var.trim(upper=INT_NAN)
>>> Var.SPAN = 1, None
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> Var.SPAN = None, 3
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> del Var.SPAN
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
>>> Var.SPAN = 1, 3
>>> Var.NDIM = 2
>>> var.shape = (1, 3)
>>> var.values = 2
>>> var.trim()
>>> var.values = 0, 1, 2
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[0, 1, 2]])
>>> var.values = 2, 3, 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[2, 3, 4]])
>>> var.values = 0, 0, 2
>>> var.trim(lower=[0, INT_NAN, 2])
>>> var.values = 2, 4, 4
>>> var.trim(upper=[2, INT_NAN, 4])
For |bool| values, defining outliers does not make much sense,
which is why function |trim| does nothing when applied on
variables handling |bool| values:
>>> Var.TYPE = bool
>>> var.trim()
If function |trim| encounters an unmanageable type, it raises an
exception like the following:
>>> Var.TYPE = str
>>> var.trim()
Traceback (most recent call last):
...
NotImplementedError: Method `trim` can only be applied on parameters \
handling floating point, integer, or boolean values, but the "value type" \
of parameter `var` is `str`.
>>> pub.options.warntrim = False
"""
if hydpy.pub.options.trimvariables:
if lower is None:
lower = self.SPAN[0]
if upper is None:
upper = self.SPAN[1]
type_ = getattr(self, 'TYPE', float)
if type_ is float:
if self.NDIM == 0:
_trim_float_0d(self, lower, upper)
else:
_trim_float_nd(self, lower, upper)
elif type_ is int:
if self.NDIM == 0:
_trim_int_0d(self, lower, upper)
else:
_trim_int_nd(self, lower, upper)
elif type_ is bool:
pass
else:
raise NotImplementedError(
f'Method `trim` can only be applied on parameters '
f'handling floating point, integer, or boolean values, '
f'but the "value type" of parameter `{self.name}` is '
f'`{objecttools.classname(self.TYPE)}`.') | Trim the value(s) of a |Variable| instance.
Usually, users do not need to apply function |trim| directly.
Instead, some |Variable| subclasses implement their own `trim`
methods relying on function |trim|. Model developers should
implement individual `trim` methods for their |Parameter| or
|Sequence| subclasses when their boundary values depend on the
actual project configuration (one example is soil moisture;
its lowest possible value should possibly be zero in all cases,
but its highest possible value could depend on another parameter
defining the maximum storage capacity).
For the following examples, we prepare a simple (not fully
functional) |Variable| subclass, making use of function |trim|
without any modifications. Function |trim| works slightly
different for variables handling |float|, |int|, and |bool|
values. We start with the most common content type |float|:
>>> from hydpy.core.variabletools import trim, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... SPAN = 1.0, 3.0
... trim = trim
... initinfo = 2.0, False
... __hydpy__connect_variable2subgroup__ = None
First, we enable the printing of warning messages raised by function
|trim|:
>>> from hydpy import pub
>>> pub.options.warntrim = True
When not passing boundary values, function |trim| extracts them from
class attribute `SPAN` of the given |Variable| instance, if available:
>>> var = Var(None)
>>> var.value = 2.0
>>> var.trim()
>>> var
var(2.0)
>>> var.value = 0.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `0.0` and `1.0`, respectively.
>>> var
var(1.0)
>>> var.value = 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
>>> var
var(3.0)
In the examples above, outlier values are set to the respective
boundary value, accompanied by suitable warning messages. For very
tiny deviations, which might be due to precision problems only,
outliers are trimmed but not reported:
>>> var.value = 1.0 - 1e-15
>>> var == 1.0
False
>>> trim(var)
>>> var == 1.0
True
>>> var.value = 3.0 + 1e-15
>>> var == 3.0
False
>>> var.trim()
>>> var == 3.0
True
Use arguments `lower` and `upper` to override the (eventually)
available `SPAN` entries:
>>> var.trim(lower=4.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `3.0` and `4.0`, respectively.
>>> var.trim(upper=3.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
Function |trim| interprets both |None| and |numpy.nan| values as if
no boundary value exists:
>>> import numpy
>>> var.value = 0.0
>>> var.trim(lower=numpy.nan)
>>> var.value = 5.0
>>> var.trim(upper=numpy.nan)
You can disable function |trim| via option |Options.trimvariables|:
>>> with pub.options.trimvariables(False):
... var.value = 5.0
... var.trim()
>>> var
var(5.0)
Alternatively, you can omit the warning messages only:
>>> with pub.options.warntrim(False):
... var.value = 5.0
... var.trim()
>>> var
var(3.0)
If a |Variable| subclass does not have (fixed) boundaries, give it
either no `SPAN` attribute or a |tuple| containing |None| values:
>>> del Var.SPAN
>>> var.value = 5.0
>>> var.trim()
>>> var
var(5.0)
>>> Var.SPAN = (None, None)
>>> var.trim()
>>> var
var(5.0)
The above examples deal with a 0-dimensional |Variable| subclass.
The following examples repeat the most relevant examples for a
2-dimensional subclass:
>>> Var.SPAN = 1.0, 3.0
>>> Var.NDIM = 2
>>> var.shape = 1, 3
>>> var.values = 2.0
>>> var.trim()
>>> var.values = 0.0, 1.0, 2.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 1. 2.]]` and `[[ 1. 1. 2.]]`, \
respectively.
>>> var
var([[1.0, 1.0, 2.0]])
>>> var.values = 2.0, 3.0, 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 2. 3. 4.]]` and `[[ 2. 3. 3.]]`, \
respectively.
>>> var
var([[2.0, 3.0, 3.0]])
>>> var.values = 1.0-1e-15, 2.0, 3.0+1e-15
>>> var.values == (1.0, 2.0, 3.0)
array([[False, True, False]], dtype=bool)
>>> var.trim()
>>> var.values == (1.0, 2.0, 3.0)
array([[ True, True, True]], dtype=bool)
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(lower=numpy.nan, upper=numpy.nan)
>>> var
var([[0.0, 2.0, 4.0]])
>>> var.trim(lower=[numpy.nan, 3.0, 3.0])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 0. 3. 3.]]`, \
respectively.
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(upper=[numpy.nan, 1.0, numpy.nan])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 1. 1. 4.]]`, \
respectively.
For |Variable| subclasses handling |float| values, setting outliers
to the respective boundary value might often be an acceptable approach.
However, this is often not the case for subclasses handling |int|
values, which often serve as option flags (e.g. to enable/disable
a certain hydrological process for different land-use types). Hence,
function |trim| raises an exception instead of a warning and does
not modify the wrong |int| value:
>>> Var.TYPE = int
>>> Var.NDIM = 0
>>> Var.SPAN = 1, 3
>>> var.value = 2
>>> var.trim()
>>> var
var(2)
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> var
var(4)
>>> from hydpy import INT_NAN
>>> var.value = 0
>>> var.trim(lower=0)
>>> var.trim(lower=INT_NAN)
>>> var.value = 4
>>> var.trim(upper=4)
>>> var.trim(upper=INT_NAN)
>>> Var.SPAN = 1, None
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> Var.SPAN = None, 3
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> del Var.SPAN
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
>>> Var.SPAN = 1, 3
>>> Var.NDIM = 2
>>> var.shape = (1, 3)
>>> var.values = 2
>>> var.trim()
>>> var.values = 0, 1, 2
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[0, 1, 2]])
>>> var.values = 2, 3, 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[2, 3, 4]])
>>> var.values = 0, 0, 2
>>> var.trim(lower=[0, INT_NAN, 2])
>>> var.values = 2, 4, 4
>>> var.trim(upper=[2, INT_NAN, 4])
For |bool| values, defining outliers does not make much sense,
which is why function |trim| does nothing when applied on
variables handling |bool| values:
>>> Var.TYPE = bool
>>> var.trim()
If function |trim| encounters an unmanageable type, it raises an
exception like the following:
>>> Var.TYPE = str
>>> var.trim()
Traceback (most recent call last):
...
NotImplementedError: Method `trim` can only be applied on parameters \
handling floating point, integer, or boolean values, but the "value type" \
of parameter `var` is `str`.
>>> pub.options.warntrim = False | Below is the the instruction that describes the task:
### Input:
Trim the value(s) of a |Variable| instance.
Usually, users do not need to apply function |trim| directly.
Instead, some |Variable| subclasses implement their own `trim`
methods relying on function |trim|. Model developers should
implement individual `trim` methods for their |Parameter| or
|Sequence| subclasses when their boundary values depend on the
actual project configuration (one example is soil moisture;
its lowest possible value should possibly be zero in all cases,
but its highest possible value could depend on another parameter
defining the maximum storage capacity).
For the following examples, we prepare a simple (not fully
functional) |Variable| subclass, making use of function |trim|
without any modifications. Function |trim| works slightly
different for variables handling |float|, |int|, and |bool|
values. We start with the most common content type |float|:
>>> from hydpy.core.variabletools import trim, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... SPAN = 1.0, 3.0
... trim = trim
... initinfo = 2.0, False
... __hydpy__connect_variable2subgroup__ = None
First, we enable the printing of warning messages raised by function
|trim|:
>>> from hydpy import pub
>>> pub.options.warntrim = True
When not passing boundary values, function |trim| extracts them from
class attribute `SPAN` of the given |Variable| instance, if available:
>>> var = Var(None)
>>> var.value = 2.0
>>> var.trim()
>>> var
var(2.0)
>>> var.value = 0.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `0.0` and `1.0`, respectively.
>>> var
var(1.0)
>>> var.value = 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
>>> var
var(3.0)
In the examples above, outlier values are set to the respective
boundary value, accompanied by suitable warning messages. For very
tiny deviations, which might be due to precision problems only,
outliers are trimmed but not reported:
>>> var.value = 1.0 - 1e-15
>>> var == 1.0
False
>>> trim(var)
>>> var == 1.0
True
>>> var.value = 3.0 + 1e-15
>>> var == 3.0
False
>>> var.trim()
>>> var == 3.0
True
Use arguments `lower` and `upper` to override the (eventually)
available `SPAN` entries:
>>> var.trim(lower=4.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `3.0` and `4.0`, respectively.
>>> var.trim(upper=3.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
Function |trim| interprets both |None| and |numpy.nan| values as if
no boundary value exists:
>>> import numpy
>>> var.value = 0.0
>>> var.trim(lower=numpy.nan)
>>> var.value = 5.0
>>> var.trim(upper=numpy.nan)
You can disable function |trim| via option |Options.trimvariables|:
>>> with pub.options.trimvariables(False):
... var.value = 5.0
... var.trim()
>>> var
var(5.0)
Alternatively, you can omit the warning messages only:
>>> with pub.options.warntrim(False):
... var.value = 5.0
... var.trim()
>>> var
var(3.0)
If a |Variable| subclass does not have (fixed) boundaries, give it
either no `SPAN` attribute or a |tuple| containing |None| values:
>>> del Var.SPAN
>>> var.value = 5.0
>>> var.trim()
>>> var
var(5.0)
>>> Var.SPAN = (None, None)
>>> var.trim()
>>> var
var(5.0)
The above examples deal with a 0-dimensional |Variable| subclass.
The following examples repeat the most relevant examples for a
2-dimensional subclass:
>>> Var.SPAN = 1.0, 3.0
>>> Var.NDIM = 2
>>> var.shape = 1, 3
>>> var.values = 2.0
>>> var.trim()
>>> var.values = 0.0, 1.0, 2.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 1. 2.]]` and `[[ 1. 1. 2.]]`, \
respectively.
>>> var
var([[1.0, 1.0, 2.0]])
>>> var.values = 2.0, 3.0, 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 2. 3. 4.]]` and `[[ 2. 3. 3.]]`, \
respectively.
>>> var
var([[2.0, 3.0, 3.0]])
>>> var.values = 1.0-1e-15, 2.0, 3.0+1e-15
>>> var.values == (1.0, 2.0, 3.0)
array([[False, True, False]], dtype=bool)
>>> var.trim()
>>> var.values == (1.0, 2.0, 3.0)
array([[ True, True, True]], dtype=bool)
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(lower=numpy.nan, upper=numpy.nan)
>>> var
var([[0.0, 2.0, 4.0]])
>>> var.trim(lower=[numpy.nan, 3.0, 3.0])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 0. 3. 3.]]`, \
respectively.
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(upper=[numpy.nan, 1.0, numpy.nan])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 1. 1. 4.]]`, \
respectively.
For |Variable| subclasses handling |float| values, setting outliers
to the respective boundary value might often be an acceptable approach.
However, this is often not the case for subclasses handling |int|
values, which often serve as option flags (e.g. to enable/disable
a certain hydrological process for different land-use types). Hence,
function |trim| raises an exception instead of a warning and does
not modify the wrong |int| value:
>>> Var.TYPE = int
>>> Var.NDIM = 0
>>> Var.SPAN = 1, 3
>>> var.value = 2
>>> var.trim()
>>> var
var(2)
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> var
var(4)
>>> from hydpy import INT_NAN
>>> var.value = 0
>>> var.trim(lower=0)
>>> var.trim(lower=INT_NAN)
>>> var.value = 4
>>> var.trim(upper=4)
>>> var.trim(upper=INT_NAN)
>>> Var.SPAN = 1, None
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> Var.SPAN = None, 3
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> del Var.SPAN
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
>>> Var.SPAN = 1, 3
>>> Var.NDIM = 2
>>> var.shape = (1, 3)
>>> var.values = 2
>>> var.trim()
>>> var.values = 0, 1, 2
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[0, 1, 2]])
>>> var.values = 2, 3, 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[2, 3, 4]])
>>> var.values = 0, 0, 2
>>> var.trim(lower=[0, INT_NAN, 2])
>>> var.values = 2, 4, 4
>>> var.trim(upper=[2, INT_NAN, 4])
For |bool| values, defining outliers does not make much sense,
which is why function |trim| does nothing when applied on
variables handling |bool| values:
>>> Var.TYPE = bool
>>> var.trim()
If function |trim| encounters an unmanageable type, it raises an
exception like the following:
>>> Var.TYPE = str
>>> var.trim()
Traceback (most recent call last):
...
NotImplementedError: Method `trim` can only be applied on parameters \
handling floating point, integer, or boolean values, but the "value type" \
of parameter `var` is `str`.
>>> pub.options.warntrim = False
### Response:
def trim(self: 'Variable', lower=None, upper=None) -> None:
"""Trim the value(s) of a |Variable| instance.
Usually, users do not need to apply function |trim| directly.
Instead, some |Variable| subclasses implement their own `trim`
methods relying on function |trim|. Model developers should
implement individual `trim` methods for their |Parameter| or
|Sequence| subclasses when their boundary values depend on the
actual project configuration (one example is soil moisture;
its lowest possible value should possibly be zero in all cases,
but its highest possible value could depend on another parameter
defining the maximum storage capacity).
For the following examples, we prepare a simple (not fully
functional) |Variable| subclass, making use of function |trim|
without any modifications. Function |trim| works slightly
different for variables handling |float|, |int|, and |bool|
values. We start with the most common content type |float|:
>>> from hydpy.core.variabletools import trim, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... SPAN = 1.0, 3.0
... trim = trim
... initinfo = 2.0, False
... __hydpy__connect_variable2subgroup__ = None
First, we enable the printing of warning messages raised by function
|trim|:
>>> from hydpy import pub
>>> pub.options.warntrim = True
When not passing boundary values, function |trim| extracts them from
class attribute `SPAN` of the given |Variable| instance, if available:
>>> var = Var(None)
>>> var.value = 2.0
>>> var.trim()
>>> var
var(2.0)
>>> var.value = 0.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `0.0` and `1.0`, respectively.
>>> var
var(1.0)
>>> var.value = 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
>>> var
var(3.0)
In the examples above, outlier values are set to the respective
boundary value, accompanied by suitable warning messages. For very
tiny deviations, which might be due to precision problems only,
outliers are trimmed but not reported:
>>> var.value = 1.0 - 1e-15
>>> var == 1.0
False
>>> trim(var)
>>> var == 1.0
True
>>> var.value = 3.0 + 1e-15
>>> var == 3.0
False
>>> var.trim()
>>> var == 3.0
True
Use arguments `lower` and `upper` to override the (eventually)
available `SPAN` entries:
>>> var.trim(lower=4.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `3.0` and `4.0`, respectively.
>>> var.trim(upper=3.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
Function |trim| interprets both |None| and |numpy.nan| values as if
no boundary value exists:
>>> import numpy
>>> var.value = 0.0
>>> var.trim(lower=numpy.nan)
>>> var.value = 5.0
>>> var.trim(upper=numpy.nan)
You can disable function |trim| via option |Options.trimvariables|:
>>> with pub.options.trimvariables(False):
... var.value = 5.0
... var.trim()
>>> var
var(5.0)
Alternatively, you can omit the warning messages only:
>>> with pub.options.warntrim(False):
... var.value = 5.0
... var.trim()
>>> var
var(3.0)
If a |Variable| subclass does not have (fixed) boundaries, give it
either no `SPAN` attribute or a |tuple| containing |None| values:
>>> del Var.SPAN
>>> var.value = 5.0
>>> var.trim()
>>> var
var(5.0)
>>> Var.SPAN = (None, None)
>>> var.trim()
>>> var
var(5.0)
The above examples deal with a 0-dimensional |Variable| subclass.
The following examples repeat the most relevant examples for a
2-dimensional subclass:
>>> Var.SPAN = 1.0, 3.0
>>> Var.NDIM = 2
>>> var.shape = 1, 3
>>> var.values = 2.0
>>> var.trim()
>>> var.values = 0.0, 1.0, 2.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 1. 2.]]` and `[[ 1. 1. 2.]]`, \
respectively.
>>> var
var([[1.0, 1.0, 2.0]])
>>> var.values = 2.0, 3.0, 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 2. 3. 4.]]` and `[[ 2. 3. 3.]]`, \
respectively.
>>> var
var([[2.0, 3.0, 3.0]])
>>> var.values = 1.0-1e-15, 2.0, 3.0+1e-15
>>> var.values == (1.0, 2.0, 3.0)
array([[False, True, False]], dtype=bool)
>>> var.trim()
>>> var.values == (1.0, 2.0, 3.0)
array([[ True, True, True]], dtype=bool)
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(lower=numpy.nan, upper=numpy.nan)
>>> var
var([[0.0, 2.0, 4.0]])
>>> var.trim(lower=[numpy.nan, 3.0, 3.0])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 0. 3. 3.]]`, \
respectively.
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(upper=[numpy.nan, 1.0, numpy.nan])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 1. 1. 4.]]`, \
respectively.
For |Variable| subclasses handling |float| values, setting outliers
to the respective boundary value might often be an acceptable approach.
However, this is often not the case for subclasses handling |int|
values, which often serve as option flags (e.g. to enable/disable
a certain hydrological process for different land-use types). Hence,
function |trim| raises an exception instead of a warning and does
not modify the wrong |int| value:
>>> Var.TYPE = int
>>> Var.NDIM = 0
>>> Var.SPAN = 1, 3
>>> var.value = 2
>>> var.trim()
>>> var
var(2)
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> var
var(4)
>>> from hydpy import INT_NAN
>>> var.value = 0
>>> var.trim(lower=0)
>>> var.trim(lower=INT_NAN)
>>> var.value = 4
>>> var.trim(upper=4)
>>> var.trim(upper=INT_NAN)
>>> Var.SPAN = 1, None
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> Var.SPAN = None, 3
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> del Var.SPAN
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
>>> Var.SPAN = 1, 3
>>> Var.NDIM = 2
>>> var.shape = (1, 3)
>>> var.values = 2
>>> var.trim()
>>> var.values = 0, 1, 2
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[0, 1, 2]])
>>> var.values = 2, 3, 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[2, 3, 4]])
>>> var.values = 0, 0, 2
>>> var.trim(lower=[0, INT_NAN, 2])
>>> var.values = 2, 4, 4
>>> var.trim(upper=[2, INT_NAN, 4])
For |bool| values, defining outliers does not make much sense,
which is why function |trim| does nothing when applied on
variables handling |bool| values:
>>> Var.TYPE = bool
>>> var.trim()
If function |trim| encounters an unmanageable type, it raises an
exception like the following:
>>> Var.TYPE = str
>>> var.trim()
Traceback (most recent call last):
...
NotImplementedError: Method `trim` can only be applied on parameters \
handling floating point, integer, or boolean values, but the "value type" \
of parameter `var` is `str`.
>>> pub.options.warntrim = False
"""
if hydpy.pub.options.trimvariables:
if lower is None:
lower = self.SPAN[0]
if upper is None:
upper = self.SPAN[1]
type_ = getattr(self, 'TYPE', float)
if type_ is float:
if self.NDIM == 0:
_trim_float_0d(self, lower, upper)
else:
_trim_float_nd(self, lower, upper)
elif type_ is int:
if self.NDIM == 0:
_trim_int_0d(self, lower, upper)
else:
_trim_int_nd(self, lower, upper)
elif type_ is bool:
pass
else:
raise NotImplementedError(
f'Method `trim` can only be applied on parameters '
f'handling floating point, integer, or boolean values, '
f'but the "value type" of parameter `{self.name}` is '
f'`{objecttools.classname(self.TYPE)}`.') |
def parse(self, pointer):
"""parse pointer into tokens"""
if isinstance(pointer, Pointer):
return pointer.tokens[:]
elif pointer == '':
return []
tokens = []
staged, _, children = pointer.partition('/')
if staged:
try:
token = StagesToken(staged)
token.last = False
tokens.append(token)
except ValueError:
raise ParseError('pointer must start with / or int', pointer)
if _:
for part in children.split('/'):
part = part.replace('~1', '/')
part = part.replace('~0', '~')
token = ChildToken(part)
token.last = False
tokens.append(token)
return tokens | parse pointer into tokens | Below is the the instruction that describes the task:
### Input:
parse pointer into tokens
### Response:
def parse(self, pointer):
"""parse pointer into tokens"""
if isinstance(pointer, Pointer):
return pointer.tokens[:]
elif pointer == '':
return []
tokens = []
staged, _, children = pointer.partition('/')
if staged:
try:
token = StagesToken(staged)
token.last = False
tokens.append(token)
except ValueError:
raise ParseError('pointer must start with / or int', pointer)
if _:
for part in children.split('/'):
part = part.replace('~1', '/')
part = part.replace('~0', '~')
token = ChildToken(part)
token.last = False
tokens.append(token)
return tokens |
def convert_to_record(func):
"""Wrap mongodb record to a dict record with default value None
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if result is not None:
if isinstance(result, dict):
return _record(result)
return (_record(i) for i in result)
return result
return wrapper | Wrap mongodb record to a dict record with default value None | Below is the the instruction that describes the task:
### Input:
Wrap mongodb record to a dict record with default value None
### Response:
def convert_to_record(func):
"""Wrap mongodb record to a dict record with default value None
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if result is not None:
if isinstance(result, dict):
return _record(result)
return (_record(i) for i in result)
return result
return wrapper |
def close(self):
"""
Closes this VMware VM.
"""
if not (yield from super().close()):
return False
for adapter in self._ethernet_adapters.values():
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
try:
self.acpi_shutdown = False
yield from self.stop()
except VMwareError:
pass
if self.linked_clone:
yield from self.manager.remove_from_vmware_inventory(self._vmx_path) | Closes this VMware VM. | Below is the the instruction that describes the task:
### Input:
Closes this VMware VM.
### Response:
def close(self):
"""
Closes this VMware VM.
"""
if not (yield from super().close()):
return False
for adapter in self._ethernet_adapters.values():
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
try:
self.acpi_shutdown = False
yield from self.stop()
except VMwareError:
pass
if self.linked_clone:
yield from self.manager.remove_from_vmware_inventory(self._vmx_path) |
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self | Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone. | Below is the the instruction that describes the task:
### Input:
Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
### Response:
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self |
def dot1x_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dot1x = ET.SubElement(config, "dot1x", xmlns="urn:brocade.com:mgmt:brocade-dot1x")
enable = ET.SubElement(dot1x, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def dot1x_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dot1x = ET.SubElement(config, "dot1x", xmlns="urn:brocade.com:mgmt:brocade-dot1x")
enable = ET.SubElement(dot1x, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result | Create and return a copy of os.environ with the specified overrides | Below is the the instruction that describes the task:
### Input:
Create and return a copy of os.environ with the specified overrides
### Response:
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result |
async def post_entries(self, url, title='', tags='', starred=0, archive=0, content='', language='', published_at='',
authors='', public=1, original_url=''):
"""
POST /api/entries.{_format}
Create an entry
:param url: the url of the note to store
:param title: Optional, we'll get the title from the page.
:param tags: tag1,tag2,tag3 a comma-separated list of tags.
:param starred entry already starred
:param archive entry already archived
:param content additionnal html content
:param language
:param published_at
:param authors
:param public
:param original_url
:return result
"""
params = {'access_token': self.token, 'url': url, 'title': title,
'tags': tags, 'starred': starred, 'archive': archive,
'content': content, 'language': language, 'published_at': published_at,
'authors': authors, 'public': public, 'original_url': original_url}
if len(tags) > 0 and isinstance(tags, list):
params['tags'] = ', '.join(tags)
path = '/api/entries.{ext}'.format(ext=self.format)
return await self.query(path, "post", **params) | POST /api/entries.{_format}
Create an entry
:param url: the url of the note to store
:param title: Optional, we'll get the title from the page.
:param tags: tag1,tag2,tag3 a comma-separated list of tags.
:param starred entry already starred
:param archive entry already archived
:param content additionnal html content
:param language
:param published_at
:param authors
:param public
:param original_url
:return result | Below is the the instruction that describes the task:
### Input:
POST /api/entries.{_format}
Create an entry
:param url: the url of the note to store
:param title: Optional, we'll get the title from the page.
:param tags: tag1,tag2,tag3 a comma-separated list of tags.
:param starred entry already starred
:param archive entry already archived
:param content additionnal html content
:param language
:param published_at
:param authors
:param public
:param original_url
:return result
### Response:
async def post_entries(self, url, title='', tags='', starred=0, archive=0, content='', language='', published_at='',
authors='', public=1, original_url=''):
"""
POST /api/entries.{_format}
Create an entry
:param url: the url of the note to store
:param title: Optional, we'll get the title from the page.
:param tags: tag1,tag2,tag3 a comma-separated list of tags.
:param starred entry already starred
:param archive entry already archived
:param content additionnal html content
:param language
:param published_at
:param authors
:param public
:param original_url
:return result
"""
params = {'access_token': self.token, 'url': url, 'title': title,
'tags': tags, 'starred': starred, 'archive': archive,
'content': content, 'language': language, 'published_at': published_at,
'authors': authors, 'public': public, 'original_url': original_url}
if len(tags) > 0 and isinstance(tags, list):
params['tags'] = ', '.join(tags)
path = '/api/entries.{ext}'.format(ext=self.format)
return await self.query(path, "post", **params) |
def setTxPower(self, tx_power):
"""Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
"""
tx_pow_validated = self.get_tx_power(tx_power)
logger.debug('tx_pow_validated: %s', tx_pow_validated)
needs_update = False
for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items():
if self.tx_power[ant] != tx_pow_idx:
self.tx_power[ant] = tx_pow_idx
needs_update = True
logger.debug('tx_power for antenna %s: %s (%s dBm)', ant,
tx_pow_idx, tx_pow_dbm)
if needs_update and self.state == LLRPClient.STATE_INVENTORYING:
logger.debug('changing tx power; will stop politely, then resume')
d = self.stopPolitely()
d.addCallback(self.startInventory, force_regen_rospec=True) | Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table | Below is the the instruction that describes the task:
### Input:
Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
### Response:
def setTxPower(self, tx_power):
"""Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
"""
tx_pow_validated = self.get_tx_power(tx_power)
logger.debug('tx_pow_validated: %s', tx_pow_validated)
needs_update = False
for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items():
if self.tx_power[ant] != tx_pow_idx:
self.tx_power[ant] = tx_pow_idx
needs_update = True
logger.debug('tx_power for antenna %s: %s (%s dBm)', ant,
tx_pow_idx, tx_pow_dbm)
if needs_update and self.state == LLRPClient.STATE_INVENTORYING:
logger.debug('changing tx power; will stop politely, then resume')
d = self.stopPolitely()
d.addCallback(self.startInventory, force_regen_rospec=True) |
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-digitalocean-config profile=my-profile
'''
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
return {'Error': 'The requested profile was not found'}
# Make sure the profile belongs to DigitalOcean
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'digitalocean':
return {'Error': 'The requested profile does not belong to DigitalOcean'}
raw = {}
ret = {}
sizes = avail_sizes()
ret['per_hour'] = decimal.Decimal(sizes[profile['size']]['price_hourly'])
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = decimal.Decimal(sizes[profile['size']]['price_monthly'])
ret['per_year'] = ret['per_week'] * 52
if kwargs.get('raw', False):
ret['_raw'] = raw
return {profile['profile']: ret} | Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-digitalocean-config profile=my-profile | Below is the the instruction that describes the task:
### Input:
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-digitalocean-config profile=my-profile
### Response:
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-digitalocean-config profile=my-profile
'''
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
return {'Error': 'The requested profile was not found'}
# Make sure the profile belongs to DigitalOcean
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'digitalocean':
return {'Error': 'The requested profile does not belong to DigitalOcean'}
raw = {}
ret = {}
sizes = avail_sizes()
ret['per_hour'] = decimal.Decimal(sizes[profile['size']]['price_hourly'])
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = decimal.Decimal(sizes[profile['size']]['price_monthly'])
ret['per_year'] = ret['per_week'] * 52
if kwargs.get('raw', False):
ret['_raw'] = raw
return {profile['profile']: ret} |
def sequence_isoelectric_point(seq, granularity=0.1):
"""Calculates the isoelectric point of the sequence for ph 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...]
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
ph_range, charge_at_ph = charge_series(seq, granularity)
abs_charge_at_ph = [abs(ch) for ch in charge_at_ph]
pi_index = min(enumerate(abs_charge_at_ph), key=lambda x: x[1])[0]
return ph_range[pi_index] | Calculates the isoelectric point of the sequence for ph 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...] | Below is the the instruction that describes the task:
### Input:
Calculates the isoelectric point of the sequence for ph 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...]
### Response:
def sequence_isoelectric_point(seq, granularity=0.1):
"""Calculates the isoelectric point of the sequence for ph 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...]
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
ph_range, charge_at_ph = charge_series(seq, granularity)
abs_charge_at_ph = [abs(ch) for ch in charge_at_ph]
pi_index = min(enumerate(abs_charge_at_ph), key=lambda x: x[1])[0]
return ph_range[pi_index] |
def parse_alignment_summary_metrics(fn):
"""
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T
return df | Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file. | Below is the the instruction that describes the task:
### Input:
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
### Response:
def parse_alignment_summary_metrics(fn):
"""
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T
return df |
def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
"""wrapper"""
hostname = urlparse(url).hostname
if 'n.miaopai.com' == hostname:
smid = match1(url, r'n\.miaopai\.com/media/([^.]+)')
miaopai_download_by_smid(smid, output_dir, merge, info_only)
return
elif 'miaopai.com' in hostname: #Miaopai
yixia_download_by_scid = yixia_miaopai_download_by_scid
site_info = "Yixia Miaopai"
scid = match1(url, r'miaopai\.com/show/channel/([^.]+)\.htm') or \
match1(url, r'miaopai\.com/show/([^.]+)\.htm') or \
match1(url, r'm\.miaopai\.com/show/channel/([^.]+)\.htm') or \
match1(url, r'm\.miaopai\.com/show/channel/([^.]+)')
elif 'xiaokaxiu.com' in hostname: #Xiaokaxiu
yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid
site_info = "Yixia Xiaokaxiu"
if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url): #PC
scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html')
elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url): #Mobile
scid = match1(url, r'http://m.xiaokaxiu.com/m/(.+)\.html')
else:
pass
yixia_download_by_scid(scid, output_dir, merge, info_only) | wrapper | Below is the the instruction that describes the task:
### Input:
wrapper
### Response:
def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
"""wrapper"""
hostname = urlparse(url).hostname
if 'n.miaopai.com' == hostname:
smid = match1(url, r'n\.miaopai\.com/media/([^.]+)')
miaopai_download_by_smid(smid, output_dir, merge, info_only)
return
elif 'miaopai.com' in hostname: #Miaopai
yixia_download_by_scid = yixia_miaopai_download_by_scid
site_info = "Yixia Miaopai"
scid = match1(url, r'miaopai\.com/show/channel/([^.]+)\.htm') or \
match1(url, r'miaopai\.com/show/([^.]+)\.htm') or \
match1(url, r'm\.miaopai\.com/show/channel/([^.]+)\.htm') or \
match1(url, r'm\.miaopai\.com/show/channel/([^.]+)')
elif 'xiaokaxiu.com' in hostname: #Xiaokaxiu
yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid
site_info = "Yixia Xiaokaxiu"
if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url): #PC
scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html')
elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url): #Mobile
scid = match1(url, r'http://m.xiaokaxiu.com/m/(.+)\.html')
else:
pass
yixia_download_by_scid(scid, output_dir, merge, info_only) |
def get_preparer(mixed: Union[SQLCompiler, Engine,
Dialect]) -> IdentifierPreparer:
"""
Returns the SQLAlchemy :class:`IdentifierPreparer` in use for the dialect
being used.
Args:
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns: an :class:`IdentifierPreparer`
"""
dialect = get_dialect(mixed)
# noinspection PyUnresolvedReferences
return dialect.preparer(dialect) | Returns the SQLAlchemy :class:`IdentifierPreparer` in use for the dialect
being used.
Args:
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns: an :class:`IdentifierPreparer` | Below is the the instruction that describes the task:
### Input:
Returns the SQLAlchemy :class:`IdentifierPreparer` in use for the dialect
being used.
Args:
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns: an :class:`IdentifierPreparer`
### Response:
def get_preparer(mixed: Union[SQLCompiler, Engine,
Dialect]) -> IdentifierPreparer:
"""
Returns the SQLAlchemy :class:`IdentifierPreparer` in use for the dialect
being used.
Args:
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns: an :class:`IdentifierPreparer`
"""
dialect = get_dialect(mixed)
# noinspection PyUnresolvedReferences
return dialect.preparer(dialect) |
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg) | sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range | Below is the the instruction that describes the task:
### Input:
sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
### Response:
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg) |
def update(self, doc: dict, doc_id: str):
"""Partial update to a single document.
Uses the Update API with the specified partial document.
"""
body = {
'doc': doc
}
self.instance.update(self.index, self.doc_type, doc_id, body=body) | Partial update to a single document.
Uses the Update API with the specified partial document. | Below is the the instruction that describes the task:
### Input:
Partial update to a single document.
Uses the Update API with the specified partial document.
### Response:
def update(self, doc: dict, doc_id: str):
"""Partial update to a single document.
Uses the Update API with the specified partial document.
"""
body = {
'doc': doc
}
self.instance.update(self.index, self.doc_type, doc_id, body=body) |
def get_vg(self, name, mode="r"):
"""
Returns an instance of VolumeGroup. The name parameter should be an existing
volume group. By default, all volume groups are open in "read" mode::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
To open a volume group with write permissions set the mode parameter to "w"::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
*Args:*
* name (str): An existing volume group name.
* mode (str): "r" or "w" for read/write respectively. Default is "r".
*Raises:*
* HandleError
"""
vg = VolumeGroup(self, name=name, mode=mode)
return vg | Returns an instance of VolumeGroup. The name parameter should be an existing
volume group. By default, all volume groups are open in "read" mode::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
To open a volume group with write permissions set the mode parameter to "w"::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
*Args:*
* name (str): An existing volume group name.
* mode (str): "r" or "w" for read/write respectively. Default is "r".
*Raises:*
* HandleError | Below is the the instruction that describes the task:
### Input:
Returns an instance of VolumeGroup. The name parameter should be an existing
volume group. By default, all volume groups are open in "read" mode::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
To open a volume group with write permissions set the mode parameter to "w"::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
*Args:*
* name (str): An existing volume group name.
* mode (str): "r" or "w" for read/write respectively. Default is "r".
*Raises:*
* HandleError
### Response:
def get_vg(self, name, mode="r"):
"""
Returns an instance of VolumeGroup. The name parameter should be an existing
volume group. By default, all volume groups are open in "read" mode::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
To open a volume group with write permissions set the mode parameter to "w"::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
*Args:*
* name (str): An existing volume group name.
* mode (str): "r" or "w" for read/write respectively. Default is "r".
*Raises:*
* HandleError
"""
vg = VolumeGroup(self, name=name, mode=mode)
return vg |
def _fullCloneOrFallback(self):
"""Wrapper for _fullClone(). In the case of failure, if clobberOnFailure
is set to True remove the build directory and try a full clone again.
"""
res = yield self._fullClone()
if res != RC_SUCCESS:
if not self.clobberOnFailure:
raise buildstep.BuildStepFailed()
res = yield self.clobber()
return res | Wrapper for _fullClone(). In the case of failure, if clobberOnFailure
is set to True remove the build directory and try a full clone again. | Below is the the instruction that describes the task:
### Input:
Wrapper for _fullClone(). In the case of failure, if clobberOnFailure
is set to True remove the build directory and try a full clone again.
### Response:
def _fullCloneOrFallback(self):
"""Wrapper for _fullClone(). In the case of failure, if clobberOnFailure
is set to True remove the build directory and try a full clone again.
"""
res = yield self._fullClone()
if res != RC_SUCCESS:
if not self.clobberOnFailure:
raise buildstep.BuildStepFailed()
res = yield self.clobber()
return res |
def actual_age_solar(birthday, today=None):
"""See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return:
"""
birthday = LCalendars.cast_date(birthday, date)
if today:
today = LCalendars.cast_date(today, date)
else:
today = date.today()
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day)) | See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return: | Below is the the instruction that describes the task:
### Input:
See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return:
### Response:
def actual_age_solar(birthday, today=None):
"""See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return:
"""
birthday = LCalendars.cast_date(birthday, date)
if today:
today = LCalendars.cast_date(today, date)
else:
today = date.today()
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day)) |
def get_port_profile_status_output_port_profile_ppid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
ppid = ET.SubElement(port_profile, "ppid")
ppid.text = kwargs.pop('ppid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_port_profile_status_output_port_profile_ppid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
ppid = ET.SubElement(port_profile, "ppid")
ppid.text = kwargs.pop('ppid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# CPE Name must not have whitespaces
if (self.cpe_str.find(" ") != -1):
errmsg = "Bad-formed CPE Name: it must not have whitespaces"
raise ValueError(errmsg)
# Partitioning of CPE Name in parts
parts_match = CPE1_1._parts_rxc.match(self.cpe_str)
# ################################
# Validation of CPE Name parts #
# ################################
if (parts_match is None):
errmsg = "Bad-formed CPE Name: not correct definition of CPE Name parts"
raise ValueError(errmsg)
CPE_PART_KEYS = (CPE.KEY_HW, CPE.KEY_OS, CPE.KEY_APP)
for pk in CPE_PART_KEYS:
# Get part content
part = parts_match.group(pk)
elements = []
if (part is not None):
# Part of CPE Name defined
# ###############################
# Validation of part elements #
# ###############################
# semicolon (;) is used to separate the part elements
for part_elem in part.split(CPE1_1.ELEMENT_SEPARATOR):
j = 1
# ####################################
# Validation of element components #
# ####################################
components = dict()
# colon (:) is used to separate the element components
for elem_comp in part_elem.split(CPEComponent1_1.SEPARATOR_COMP):
comp_att = CPEComponent.ordered_comp_parts[j]
if elem_comp == CPEComponent1_1.VALUE_EMPTY:
comp = CPEComponentEmpty()
else:
try:
comp = CPEComponent1_1(elem_comp, comp_att)
except ValueError:
errmsg = "Bad-formed CPE Name: not correct value: {0}".format(
elem_comp)
raise ValueError(errmsg)
# Identification of component name
components[comp_att] = comp
j += 1
# Adds the components of version 2.3 of CPE not defined
# in version 1.1
for idx in range(j, len(CPEComponent.ordered_comp_parts)):
comp_att = CPEComponent.ordered_comp_parts[idx]
components[comp_att] = CPEComponentUndefined()
# Get the type of system associated with CPE Name and
# store it in element as component
if (pk == CPE.KEY_HW):
components[CPEComponent.ATT_PART] = CPEComponent1_1(
CPEComponent.VALUE_PART_HW, CPEComponent.ATT_PART)
elif (pk == CPE.KEY_OS):
components[CPEComponent.ATT_PART] = CPEComponent1_1(
CPEComponent.VALUE_PART_OS, CPEComponent.ATT_PART)
elif (pk == CPE.KEY_APP):
components[CPEComponent.ATT_PART] = CPEComponent1_1(
CPEComponent.VALUE_PART_APP, CPEComponent.ATT_PART)
# Store the element identified
elements.append(components)
# Store the part identified
self[pk] = elements
self[CPE.KEY_UNDEFINED] = [] | Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name | Below is the the instruction that describes the task:
### Input:
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
### Response:
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# CPE Name must not have whitespaces
if (self.cpe_str.find(" ") != -1):
errmsg = "Bad-formed CPE Name: it must not have whitespaces"
raise ValueError(errmsg)
# Partitioning of CPE Name in parts
parts_match = CPE1_1._parts_rxc.match(self.cpe_str)
# ################################
# Validation of CPE Name parts #
# ################################
if (parts_match is None):
errmsg = "Bad-formed CPE Name: not correct definition of CPE Name parts"
raise ValueError(errmsg)
CPE_PART_KEYS = (CPE.KEY_HW, CPE.KEY_OS, CPE.KEY_APP)
for pk in CPE_PART_KEYS:
# Get part content
part = parts_match.group(pk)
elements = []
if (part is not None):
# Part of CPE Name defined
# ###############################
# Validation of part elements #
# ###############################
# semicolon (;) is used to separate the part elements
for part_elem in part.split(CPE1_1.ELEMENT_SEPARATOR):
j = 1
# ####################################
# Validation of element components #
# ####################################
components = dict()
# colon (:) is used to separate the element components
for elem_comp in part_elem.split(CPEComponent1_1.SEPARATOR_COMP):
comp_att = CPEComponent.ordered_comp_parts[j]
if elem_comp == CPEComponent1_1.VALUE_EMPTY:
comp = CPEComponentEmpty()
else:
try:
comp = CPEComponent1_1(elem_comp, comp_att)
except ValueError:
errmsg = "Bad-formed CPE Name: not correct value: {0}".format(
elem_comp)
raise ValueError(errmsg)
# Identification of component name
components[comp_att] = comp
j += 1
# Adds the components of version 2.3 of CPE not defined
# in version 1.1
for idx in range(j, len(CPEComponent.ordered_comp_parts)):
comp_att = CPEComponent.ordered_comp_parts[idx]
components[comp_att] = CPEComponentUndefined()
# Get the type of system associated with CPE Name and
# store it in element as component
if (pk == CPE.KEY_HW):
components[CPEComponent.ATT_PART] = CPEComponent1_1(
CPEComponent.VALUE_PART_HW, CPEComponent.ATT_PART)
elif (pk == CPE.KEY_OS):
components[CPEComponent.ATT_PART] = CPEComponent1_1(
CPEComponent.VALUE_PART_OS, CPEComponent.ATT_PART)
elif (pk == CPE.KEY_APP):
components[CPEComponent.ATT_PART] = CPEComponent1_1(
CPEComponent.VALUE_PART_APP, CPEComponent.ATT_PART)
# Store the element identified
elements.append(components)
# Store the part identified
self[pk] = elements
self[CPE.KEY_UNDEFINED] = [] |
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json() | Returns the list of aggregates for a given check | Below is the the instruction that describes the task:
### Input:
Returns the list of aggregates for a given check
### Response:
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json() |
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time() | Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed. | Below is the the instruction that describes the task:
### Input:
Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
### Response:
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time() |
def _compute_diff(self):
"""Shows a diff between this version and the previous version"""
history = self.env['document.page.history']
for rec in self:
domain = [
('page_id', '=', rec.page_id.id),
('state', '=', 'approved')]
if rec.approved_date:
domain.append(('approved_date', '<', rec.approved_date))
prev = history.search(domain, limit=1, order='approved_date DESC')
if prev:
rec.diff = self.getDiff(prev.id, rec.id)
else:
rec.diff = self.getDiff(False, rec.id) | Shows a diff between this version and the previous version | Below is the the instruction that describes the task:
### Input:
Shows a diff between this version and the previous version
### Response:
def _compute_diff(self):
"""Shows a diff between this version and the previous version"""
history = self.env['document.page.history']
for rec in self:
domain = [
('page_id', '=', rec.page_id.id),
('state', '=', 'approved')]
if rec.approved_date:
domain.append(('approved_date', '<', rec.approved_date))
prev = history.search(domain, limit=1, order='approved_date DESC')
if prev:
rec.diff = self.getDiff(prev.id, rec.id)
else:
rec.diff = self.getDiff(False, rec.id) |
def visit_If(self, node):
"""Eliminate dead code."""
# do not optimize ifs that have a block inside so that it doesn't
# break super().
if node.find(nodes.Block) is not None:
return self.generic_visit(node)
try:
val = self.visit(node.test).as_const()
except nodes.Impossible:
return self.generic_visit(node)
if val:
body = node.body
else:
body = node.else_
result = []
for node in body:
result.extend(self.visit_list(node))
return result | Eliminate dead code. | Below is the the instruction that describes the task:
### Input:
Eliminate dead code.
### Response:
def visit_If(self, node):
"""Eliminate dead code."""
# do not optimize ifs that have a block inside so that it doesn't
# break super().
if node.find(nodes.Block) is not None:
return self.generic_visit(node)
try:
val = self.visit(node.test).as_const()
except nodes.Impossible:
return self.generic_visit(node)
if val:
body = node.body
else:
body = node.else_
result = []
for node in body:
result.extend(self.visit_list(node))
return result |
def _restore_replace(self):
"""
Check if we need to replace ".gitignore" to ".keep".
:return: The replacement status.
:rtype: bool
"""
if PyFunceble.path.isdir(self.base + ".git"):
# The `.git` directory exist.
if "PyFunceble" not in Command("git remote show origin").execute():
# PyFunceble is not in the origin.
# We return True.
return True
# We return False.
return False
# The `.git` directory does not exist.
# We return True.
return True | Check if we need to replace ".gitignore" to ".keep".
:return: The replacement status.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if we need to replace ".gitignore" to ".keep".
:return: The replacement status.
:rtype: bool
### Response:
def _restore_replace(self):
"""
Check if we need to replace ".gitignore" to ".keep".
:return: The replacement status.
:rtype: bool
"""
if PyFunceble.path.isdir(self.base + ".git"):
# The `.git` directory exist.
if "PyFunceble" not in Command("git remote show origin").execute():
# PyFunceble is not in the origin.
# We return True.
return True
# We return False.
return False
# The `.git` directory does not exist.
# We return True.
return True |
def gatk_remove_missingalt(in_file, data):
"""
GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream
tools, this filters those out.
"""
base = in_file.split('.vcf.gz')[0]
out_file = "%s-nomissingalt%s" % (base, '.vcf.gz')
if utils.file_exists(out_file):
return out_file
no_gzip_out = out_file.replace(".vcf.gz", ".vcf")
with file_transaction(no_gzip_out) as tx_out_file:
with utils.open_gzipsafe(in_file) as in_handle, open(tx_out_file, "w") as out_handle:
for line in in_handle:
line = remove_missingalt(line)
if line:
out_handle.write(line)
return vcfutils.bgzip_and_index(no_gzip_out, data["config"]) | GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream
tools, this filters those out. | Below is the the instruction that describes the task:
### Input:
GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream
tools, this filters those out.
### Response:
def gatk_remove_missingalt(in_file, data):
"""
GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream
tools, this filters those out.
"""
base = in_file.split('.vcf.gz')[0]
out_file = "%s-nomissingalt%s" % (base, '.vcf.gz')
if utils.file_exists(out_file):
return out_file
no_gzip_out = out_file.replace(".vcf.gz", ".vcf")
with file_transaction(no_gzip_out) as tx_out_file:
with utils.open_gzipsafe(in_file) as in_handle, open(tx_out_file, "w") as out_handle:
for line in in_handle:
line = remove_missingalt(line)
if line:
out_handle.write(line)
return vcfutils.bgzip_and_index(no_gzip_out, data["config"]) |
def normalizeBoolean(value):
"""
Normalizes a boolean.
* **value** must be an ``int`` with value of 0 or 1, or a ``bool``.
* Returned value will be a boolean.
"""
if isinstance(value, int) and value in (0, 1):
value = bool(value)
if not isinstance(value, bool):
raise ValueError("Boolean values must be True or False, not '%s'."
% value)
return value | Normalizes a boolean.
* **value** must be an ``int`` with value of 0 or 1, or a ``bool``.
* Returned value will be a boolean. | Below is the the instruction that describes the task:
### Input:
Normalizes a boolean.
* **value** must be an ``int`` with value of 0 or 1, or a ``bool``.
* Returned value will be a boolean.
### Response:
def normalizeBoolean(value):
"""
Normalizes a boolean.
* **value** must be an ``int`` with value of 0 or 1, or a ``bool``.
* Returned value will be a boolean.
"""
if isinstance(value, int) and value in (0, 1):
value = bool(value)
if not isinstance(value, bool):
raise ValueError("Boolean values must be True or False, not '%s'."
% value)
return value |
def get_appapi_params(self, prepay_id, timestamp=None, nonce_str=None):
"""
获取 APP 支付参数
:param prepay_id: 统一下单接口返回的 prepay_id 参数值
:param timestamp: 可选,时间戳,默认为当前时间戳
:param nonce_str: 可选,随机字符串,默认自动生成
:return: 签名
"""
data = {
'appid': self.appid,
'partnerid': self.mch_id,
'prepayid': prepay_id,
'package': 'Sign=WXPay',
'timestamp': timestamp or to_text(int(time.time())),
'noncestr': nonce_str or random_string(32)
}
sign = calculate_signature(data, self._client.api_key)
data['sign'] = sign
return data | 获取 APP 支付参数
:param prepay_id: 统一下单接口返回的 prepay_id 参数值
:param timestamp: 可选,时间戳,默认为当前时间戳
:param nonce_str: 可选,随机字符串,默认自动生成
:return: 签名 | Below is the the instruction that describes the task:
### Input:
获取 APP 支付参数
:param prepay_id: 统一下单接口返回的 prepay_id 参数值
:param timestamp: 可选,时间戳,默认为当前时间戳
:param nonce_str: 可选,随机字符串,默认自动生成
:return: 签名
### Response:
def get_appapi_params(self, prepay_id, timestamp=None, nonce_str=None):
"""
获取 APP 支付参数
:param prepay_id: 统一下单接口返回的 prepay_id 参数值
:param timestamp: 可选,时间戳,默认为当前时间戳
:param nonce_str: 可选,随机字符串,默认自动生成
:return: 签名
"""
data = {
'appid': self.appid,
'partnerid': self.mch_id,
'prepayid': prepay_id,
'package': 'Sign=WXPay',
'timestamp': timestamp or to_text(int(time.time())),
'noncestr': nonce_str or random_string(32)
}
sign = calculate_signature(data, self._client.api_key)
data['sign'] = sign
return data |
def branches(self):
"""Get basic block branches.
"""
branches = []
if self._taken_branch:
branches += [(self._taken_branch, 'taken')]
if self._not_taken_branch:
branches += [(self._not_taken_branch, 'not-taken')]
if self._direct_branch:
branches += [(self._direct_branch, 'direct')]
return branches | Get basic block branches. | Below is the the instruction that describes the task:
### Input:
Get basic block branches.
### Response:
def branches(self):
"""Get basic block branches.
"""
branches = []
if self._taken_branch:
branches += [(self._taken_branch, 'taken')]
if self._not_taken_branch:
branches += [(self._not_taken_branch, 'not-taken')]
if self._direct_branch:
branches += [(self._direct_branch, 'direct')]
return branches |
def win32_refresh_window(cls):
"""
Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it.
"""
# Get console handle
handle = windll.kernel32.GetConsoleWindow()
RDW_INVALIDATE = 0x0001
windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE)) | Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it. | Below is the the instruction that describes the task:
### Input:
Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it.
### Response:
def win32_refresh_window(cls):
"""
Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it.
"""
# Get console handle
handle = windll.kernel32.GetConsoleWindow()
RDW_INVALIDATE = 0x0001
windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE)) |
def soap_request(self, url, urn, action, params, body_elem="m"):
"""Send a SOAP request to the TV."""
soap_body = (
'<?xml version="1.0" encoding="utf-8"?>'
'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
'<s:Body>'
'<{body_elem}:{action} xmlns:{body_elem}="urn:{urn}">'
'{params}'
'</{body_elem}:{action}>'
'</s:Body>'
'</s:Envelope>'
).format(action=action, urn=urn, params=params, body_elem=body_elem).encode('utf-8')
headers = {
'Host': '{}:{}'.format(self._host, self._port),
'Content-Length': len(soap_body),
'Content-Type': 'text/xml; charset=utf-8"',
'SOAPAction': '"urn:{}#{}"'.format(urn, action),
}
url = 'http://{}:{}/{}'.format(self._host, self._port, url)
_LOGGER.debug("Sending to %s:\n%s\n%s", url, headers, soap_body)
req = Request(url, soap_body, headers)
res = urlopen(req, timeout=5).read()
_LOGGER.debug("Response: %s", res)
return res | Send a SOAP request to the TV. | Below is the the instruction that describes the task:
### Input:
Send a SOAP request to the TV.
### Response:
def soap_request(self, url, urn, action, params, body_elem="m"):
"""Send a SOAP request to the TV."""
soap_body = (
'<?xml version="1.0" encoding="utf-8"?>'
'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
'<s:Body>'
'<{body_elem}:{action} xmlns:{body_elem}="urn:{urn}">'
'{params}'
'</{body_elem}:{action}>'
'</s:Body>'
'</s:Envelope>'
).format(action=action, urn=urn, params=params, body_elem=body_elem).encode('utf-8')
headers = {
'Host': '{}:{}'.format(self._host, self._port),
'Content-Length': len(soap_body),
'Content-Type': 'text/xml; charset=utf-8"',
'SOAPAction': '"urn:{}#{}"'.format(urn, action),
}
url = 'http://{}:{}/{}'.format(self._host, self._port, url)
_LOGGER.debug("Sending to %s:\n%s\n%s", url, headers, soap_body)
req = Request(url, soap_body, headers)
res = urlopen(req, timeout=5).read()
_LOGGER.debug("Response: %s", res)
return res |
def wait(self, sec=0.1):
""" Wait for x seconds
each wait command is 100ms """
sec = max(sec, 0)
reps = int(floor(sec / 0.1))
commands = []
for i in range(0, reps):
commands.append(Command(0x00, wait=True))
return tuple(commands) | Wait for x seconds
each wait command is 100ms | Below is the the instruction that describes the task:
### Input:
Wait for x seconds
each wait command is 100ms
### Response:
def wait(self, sec=0.1):
""" Wait for x seconds
each wait command is 100ms """
sec = max(sec, 0)
reps = int(floor(sec / 0.1))
commands = []
for i in range(0, reps):
commands.append(Command(0x00, wait=True))
return tuple(commands) |
def _populate_local(self):
"""
Populate version data for local archives
"""
archives = glob(os.path.join(self.path, '*.zip'))
for archive in archives:
try:
version = self._read_zip(archive)
self.versions[version.vtuple] = self.meta_class(self, version, filepath=archive)
except BadZipfile as e:
self.log.warn('Unreadable zip archive in versions directory (%s): %s', e.message, archive)
if self.dev_path:
dev_archives = glob(os.path.join(self.dev_path, '*.zip'))
dev_versions = []
for dev_archive in dev_archives:
try:
dev_versions.append((self._read_zip(dev_archive), dev_archive))
except BadZipfile as e:
self.log.warn('Unreadable zip archive in versions directory (%s): %s', e.message, dev_archive)
if not dev_versions:
self.log.debug('No development releases found')
return
dev_version = sorted(dev_versions, key=lambda v: v[0].vtuple).pop()
self.dev_version = self.meta_class(self, dev_version[0], filepath=dev_version[1]) | Populate version data for local archives | Below is the the instruction that describes the task:
### Input:
Populate version data for local archives
### Response:
def _populate_local(self):
"""
Populate version data for local archives
"""
archives = glob(os.path.join(self.path, '*.zip'))
for archive in archives:
try:
version = self._read_zip(archive)
self.versions[version.vtuple] = self.meta_class(self, version, filepath=archive)
except BadZipfile as e:
self.log.warn('Unreadable zip archive in versions directory (%s): %s', e.message, archive)
if self.dev_path:
dev_archives = glob(os.path.join(self.dev_path, '*.zip'))
dev_versions = []
for dev_archive in dev_archives:
try:
dev_versions.append((self._read_zip(dev_archive), dev_archive))
except BadZipfile as e:
self.log.warn('Unreadable zip archive in versions directory (%s): %s', e.message, dev_archive)
if not dev_versions:
self.log.debug('No development releases found')
return
dev_version = sorted(dev_versions, key=lambda v: v[0].vtuple).pop()
self.dev_version = self.meta_class(self, dev_version[0], filepath=dev_version[1]) |
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self | Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group | Below is the the instruction that describes the task:
### Input:
Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
### Response:
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self |
def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth | Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root. | Below is the the instruction that describes the task:
### Input:
Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
### Response:
def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.