code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def update(self, **kwargs):
"""
Add, remove or modify a share's title.
Input:
* ``title`` The share title, if any (optional)
**NOTE**: Passing ``None`` or calling this method with an empty argument list will remove the share's title.
Output:
* None
Example::
share = client.get_share("4ddfds")
share.update(title="Example") # Set title to Example
share.update() # Remove title
"""
if 'title' in kwargs:
params = {"title": kwargs['title']}
else:
params = {"title": None}
response = GettRequest().post("/shares/%s/update?accesstoken=%s" % (self.sharename, self.user.access_token()), params)
if response.http_status == 200:
self.__init__(self.user, **response.response) | Add, remove or modify a share's title.
Input:
* ``title`` The share title, if any (optional)
**NOTE**: Passing ``None`` or calling this method with an empty argument list will remove the share's title.
Output:
* None
Example::
share = client.get_share("4ddfds")
share.update(title="Example") # Set title to Example
share.update() # Remove title | Below is the the instruction that describes the task:
### Input:
Add, remove or modify a share's title.
Input:
* ``title`` The share title, if any (optional)
**NOTE**: Passing ``None`` or calling this method with an empty argument list will remove the share's title.
Output:
* None
Example::
share = client.get_share("4ddfds")
share.update(title="Example") # Set title to Example
share.update() # Remove title
### Response:
def update(self, **kwargs):
"""
Add, remove or modify a share's title.
Input:
* ``title`` The share title, if any (optional)
**NOTE**: Passing ``None`` or calling this method with an empty argument list will remove the share's title.
Output:
* None
Example::
share = client.get_share("4ddfds")
share.update(title="Example") # Set title to Example
share.update() # Remove title
"""
if 'title' in kwargs:
params = {"title": kwargs['title']}
else:
params = {"title": None}
response = GettRequest().post("/shares/%s/update?accesstoken=%s" % (self.sharename, self.user.access_token()), params)
if response.http_status == 200:
self.__init__(self.user, **response.response) |
def plural_fmt(name, cnt):
"""
pluralize name if necessary and combine with cnt
:param name: str name of the item type
:param cnt: int number items of this type
:return: str name and cnt joined
"""
if cnt == 1:
return '{} {}'.format(cnt, name)
else:
return '{} {}s'.format(cnt, name) | pluralize name if necessary and combine with cnt
:param name: str name of the item type
:param cnt: int number items of this type
:return: str name and cnt joined | Below is the the instruction that describes the task:
### Input:
pluralize name if necessary and combine with cnt
:param name: str name of the item type
:param cnt: int number items of this type
:return: str name and cnt joined
### Response:
def plural_fmt(name, cnt):
"""
pluralize name if necessary and combine with cnt
:param name: str name of the item type
:param cnt: int number items of this type
:return: str name and cnt joined
"""
if cnt == 1:
return '{} {}'.format(cnt, name)
else:
return '{} {}s'.format(cnt, name) |
def begin(self):
"""Start a new transaction."""
if self.in_transaction: # we're already in a transaction...
if self._auto_transaction:
self._auto_transaction = False
return
self.commit()
self.in_transaction = True
for collection, store in self.stores.items():
store.begin()
indexes = self.indexes[collection]
for index in indexes.values():
index.begin() | Start a new transaction. | Below is the the instruction that describes the task:
### Input:
Start a new transaction.
### Response:
def begin(self):
"""Start a new transaction."""
if self.in_transaction: # we're already in a transaction...
if self._auto_transaction:
self._auto_transaction = False
return
self.commit()
self.in_transaction = True
for collection, store in self.stores.items():
store.begin()
indexes = self.indexes[collection]
for index in indexes.values():
index.begin() |
def set_image(self):
"""This code must be in its own method since the fetch functions need
credits to be set. m2m fields are not yet set at the end of either the
save method or post_save signal."""
if not self.image:
scrape_image(self)
# If still no image then use first contributor image
if not self.image:
contributors = self.get_primary_contributors()
if contributors:
self.image = contributors[0].image
self.save(set_image=False)
# If still not image then default
if not self.image:
filename = settings.STATIC_ROOT + 'music/images/default.png'
if os.path.exists(filename):
image = File(
open(filename, 'rb')
)
image.name = 'default.png'
self.image = image
self.save(set_image=False) | This code must be in its own method since the fetch functions need
credits to be set. m2m fields are not yet set at the end of either the
save method or post_save signal. | Below is the the instruction that describes the task:
### Input:
This code must be in its own method since the fetch functions need
credits to be set. m2m fields are not yet set at the end of either the
save method or post_save signal.
### Response:
def set_image(self):
"""This code must be in its own method since the fetch functions need
credits to be set. m2m fields are not yet set at the end of either the
save method or post_save signal."""
if not self.image:
scrape_image(self)
# If still no image then use first contributor image
if not self.image:
contributors = self.get_primary_contributors()
if contributors:
self.image = contributors[0].image
self.save(set_image=False)
# If still not image then default
if not self.image:
filename = settings.STATIC_ROOT + 'music/images/default.png'
if os.path.exists(filename):
image = File(
open(filename, 'rb')
)
image.name = 'default.png'
self.image = image
self.save(set_image=False) |
def insert_colorpoint(self, position=0.5, color1=[1.0,1.0,0.0], color2=[1.0,1.0,0.0]):
"""
Inserts the specified color into the list.
"""
L = self._colorpoint_list
# if position = 0 or 1, push the end points inward
if position <= 0.0:
L.insert(0,[0.0,color1,color2])
elif position >= 1.0:
L.append([1.0,color1,color2])
# otherwise, find the position where it belongs
else:
# loop over all the points
for n in range(len(self._colorpoint_list)):
# check if it's less than the next one
if position <= L[n+1][0]:
# found the place to insert it
L.insert(n+1,[position,color1,color2])
break
# update the image with the new cmap
self.update_image()
return self | Inserts the specified color into the list. | Below is the the instruction that describes the task:
### Input:
Inserts the specified color into the list.
### Response:
def insert_colorpoint(self, position=0.5, color1=[1.0,1.0,0.0], color2=[1.0,1.0,0.0]):
"""
Inserts the specified color into the list.
"""
L = self._colorpoint_list
# if position = 0 or 1, push the end points inward
if position <= 0.0:
L.insert(0,[0.0,color1,color2])
elif position >= 1.0:
L.append([1.0,color1,color2])
# otherwise, find the position where it belongs
else:
# loop over all the points
for n in range(len(self._colorpoint_list)):
# check if it's less than the next one
if position <= L[n+1][0]:
# found the place to insert it
L.insert(n+1,[position,color1,color2])
break
# update the image with the new cmap
self.update_image()
return self |
def content(self, value):
"""
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
"""
header = {'Content-Type': TYPES.get(value, value)}
self._request.headers = header
self.add_matcher(matcher('HeadersMatcher', header)) | Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance. | Below is the the instruction that describes the task:
### Input:
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
### Response:
def content(self, value):
"""
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
"""
header = {'Content-Type': TYPES.get(value, value)}
self._request.headers = header
self.add_matcher(matcher('HeadersMatcher', header)) |
def temporary_directory():
""" make a temporary directory, yeild its name, cleanup on exit """
dir_name = tempfile.mkdtemp()
yield dir_name
if os.path.exists(dir_name):
shutil.rmtree(dir_name) | make a temporary directory, yeild its name, cleanup on exit | Below is the the instruction that describes the task:
### Input:
make a temporary directory, yeild its name, cleanup on exit
### Response:
def temporary_directory():
""" make a temporary directory, yeild its name, cleanup on exit """
dir_name = tempfile.mkdtemp()
yield dir_name
if os.path.exists(dir_name):
shutil.rmtree(dir_name) |
def _from_dict(cls, _dict):
"""Initialize a AggregationResult object from a json dictionary."""
args = {}
if 'key' in _dict:
args['key'] = _dict.get('key')
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args) | Initialize a AggregationResult object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a AggregationResult object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a AggregationResult object from a json dictionary."""
args = {}
if 'key' in _dict:
args['key'] = _dict.get('key')
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args) |
def create(self, article, attachment, inline=False, file_name=None, content_type=None):
"""
This function creates attachment attached to article.
:param article: Numeric article id or :class:`Article` object.
:param attachment: File object or os path to file
:param inline: If true, the attached file is shown in the dedicated admin UI
for inline attachments and its url can be referenced in the HTML body of
the article. If false, the attachment is listed in the list of attachments.
Default is `false`
:param file_name: you can set filename on file upload.
:param content_type: The content type of the file. `Example: image/png`, Zendesk can ignore it.
:return: :class:`ArticleAttachment` object
"""
return HelpdeskAttachmentRequest(self).post(self.endpoint.create,
article=article,
attachments=attachment,
inline=inline,
file_name=file_name,
content_type=content_type) | This function creates attachment attached to article.
:param article: Numeric article id or :class:`Article` object.
:param attachment: File object or os path to file
:param inline: If true, the attached file is shown in the dedicated admin UI
for inline attachments and its url can be referenced in the HTML body of
the article. If false, the attachment is listed in the list of attachments.
Default is `false`
:param file_name: you can set filename on file upload.
:param content_type: The content type of the file. `Example: image/png`, Zendesk can ignore it.
:return: :class:`ArticleAttachment` object | Below is the the instruction that describes the task:
### Input:
This function creates attachment attached to article.
:param article: Numeric article id or :class:`Article` object.
:param attachment: File object or os path to file
:param inline: If true, the attached file is shown in the dedicated admin UI
for inline attachments and its url can be referenced in the HTML body of
the article. If false, the attachment is listed in the list of attachments.
Default is `false`
:param file_name: you can set filename on file upload.
:param content_type: The content type of the file. `Example: image/png`, Zendesk can ignore it.
:return: :class:`ArticleAttachment` object
### Response:
def create(self, article, attachment, inline=False, file_name=None, content_type=None):
"""
This function creates attachment attached to article.
:param article: Numeric article id or :class:`Article` object.
:param attachment: File object or os path to file
:param inline: If true, the attached file is shown in the dedicated admin UI
for inline attachments and its url can be referenced in the HTML body of
the article. If false, the attachment is listed in the list of attachments.
Default is `false`
:param file_name: you can set filename on file upload.
:param content_type: The content type of the file. `Example: image/png`, Zendesk can ignore it.
:return: :class:`ArticleAttachment` object
"""
return HelpdeskAttachmentRequest(self).post(self.endpoint.create,
article=article,
attachments=attachment,
inline=inline,
file_name=file_name,
content_type=content_type) |
def _onShortcutDuplicateLine(self):
"""Duplicate selected text or current line
"""
cursor = self.textCursor()
if cursor.hasSelection(): # duplicate selection
text = cursor.selectedText()
selectionStart, selectionEnd = cursor.selectionStart(), cursor.selectionEnd()
cursor.setPosition(selectionEnd)
cursor.insertText(text)
# restore selection
cursor.setPosition(selectionStart)
cursor.setPosition(selectionEnd, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
else:
line = cursor.blockNumber()
self.lines.insert(line + 1, self.lines[line])
self.ensureCursorVisible()
self._updateExtraSelections() | Duplicate selected text or current line | Below is the the instruction that describes the task:
### Input:
Duplicate selected text or current line
### Response:
def _onShortcutDuplicateLine(self):
"""Duplicate selected text or current line
"""
cursor = self.textCursor()
if cursor.hasSelection(): # duplicate selection
text = cursor.selectedText()
selectionStart, selectionEnd = cursor.selectionStart(), cursor.selectionEnd()
cursor.setPosition(selectionEnd)
cursor.insertText(text)
# restore selection
cursor.setPosition(selectionStart)
cursor.setPosition(selectionEnd, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
else:
line = cursor.blockNumber()
self.lines.insert(line + 1, self.lines[line])
self.ensureCursorVisible()
self._updateExtraSelections() |
def import_eit_fzj(self, filename, configfile, correction_file=None,
timestep=None, **kwargs):
"""EIT data import for FZJ Medusa systems"""
# we get not electrode positions (dummy1) and no topography data
# (dummy2)
df_emd, dummy1, dummy2 = eit_fzj.read_3p_data(
filename,
configfile,
**kwargs
)
if correction_file is not None:
eit_fzj_utils.apply_correction_factors(df_emd, correction_file)
if timestep is not None:
df_emd['timestep'] = timestep
self._add_to_container(df_emd)
print('Summary:')
self._describe_data(df_emd) | EIT data import for FZJ Medusa systems | Below is the the instruction that describes the task:
### Input:
EIT data import for FZJ Medusa systems
### Response:
def import_eit_fzj(self, filename, configfile, correction_file=None,
timestep=None, **kwargs):
"""EIT data import for FZJ Medusa systems"""
# we get not electrode positions (dummy1) and no topography data
# (dummy2)
df_emd, dummy1, dummy2 = eit_fzj.read_3p_data(
filename,
configfile,
**kwargs
)
if correction_file is not None:
eit_fzj_utils.apply_correction_factors(df_emd, correction_file)
if timestep is not None:
df_emd['timestep'] = timestep
self._add_to_container(df_emd)
print('Summary:')
self._describe_data(df_emd) |
def bottom(self, objects: Set[Object]) -> Set[Object]:
"""
Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for
each box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
max_y_loc = max([obj.y_loc for obj in box_objects])
return_set.update(set([obj for obj in box_objects if obj.y_loc == max_y_loc]))
return return_set | Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for
each box. | Below is the the instruction that describes the task:
### Input:
Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for
each box.
### Response:
def bottom(self, objects: Set[Object]) -> Set[Object]:
"""
Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for
each box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
max_y_loc = max([obj.y_loc for obj in box_objects])
return_set.update(set([obj for obj in box_objects if obj.y_loc == max_y_loc]))
return return_set |
def _controlSide(cls, side, grammar):
# type: (_MetaRule, List[object], Grammar) -> None
"""
Validate one side of the rule.
:param side: Iterable side of the rule.
:param grammar: Grammar on which to validate.
:raise RuleSyntaxException: If invalid syntax is use.
:raise UselessEpsilonException: If useless epsilon is used.
:raise TerminalDoesNotExistsException: If terminal does not exists in the grammar.
:raise NonterminalDoesNotExistsException: If nonterminal does not exists in the grammar.
"""
if not isinstance(side, list):
raise RuleSyntaxException(cls, 'One side of rule is not enclose by list', side)
if len(side) == 0:
raise RuleSyntaxException(cls, 'One side of rule is not define', side)
if EPS in side and len(side) > 1:
raise UselessEpsilonException(cls)
for symb in side:
if isclass(symb) and issubclass(symb, Nonterminal):
if symb not in grammar.nonterminals:
raise NonterminalDoesNotExistsException(cls, symb, grammar)
elif symb is EPS:
continue
elif symb not in grammar.terminals:
raise TerminalDoesNotExistsException(cls, symb, grammar) | Validate one side of the rule.
:param side: Iterable side of the rule.
:param grammar: Grammar on which to validate.
:raise RuleSyntaxException: If invalid syntax is use.
:raise UselessEpsilonException: If useless epsilon is used.
:raise TerminalDoesNotExistsException: If terminal does not exists in the grammar.
:raise NonterminalDoesNotExistsException: If nonterminal does not exists in the grammar. | Below is the the instruction that describes the task:
### Input:
Validate one side of the rule.
:param side: Iterable side of the rule.
:param grammar: Grammar on which to validate.
:raise RuleSyntaxException: If invalid syntax is use.
:raise UselessEpsilonException: If useless epsilon is used.
:raise TerminalDoesNotExistsException: If terminal does not exists in the grammar.
:raise NonterminalDoesNotExistsException: If nonterminal does not exists in the grammar.
### Response:
def _controlSide(cls, side, grammar):
# type: (_MetaRule, List[object], Grammar) -> None
"""
Validate one side of the rule.
:param side: Iterable side of the rule.
:param grammar: Grammar on which to validate.
:raise RuleSyntaxException: If invalid syntax is use.
:raise UselessEpsilonException: If useless epsilon is used.
:raise TerminalDoesNotExistsException: If terminal does not exists in the grammar.
:raise NonterminalDoesNotExistsException: If nonterminal does not exists in the grammar.
"""
if not isinstance(side, list):
raise RuleSyntaxException(cls, 'One side of rule is not enclose by list', side)
if len(side) == 0:
raise RuleSyntaxException(cls, 'One side of rule is not define', side)
if EPS in side and len(side) > 1:
raise UselessEpsilonException(cls)
for symb in side:
if isclass(symb) and issubclass(symb, Nonterminal):
if symb not in grammar.nonterminals:
raise NonterminalDoesNotExistsException(cls, symb, grammar)
elif symb is EPS:
continue
elif symb not in grammar.terminals:
raise TerminalDoesNotExistsException(cls, symb, grammar) |
def add_arguments(self):
"""
Definition and addition of all arguments.
"""
if self.parser is None:
raise TypeError("Parser cannot be None, has create_parser been called?")
for keys, kwargs in self.args.items():
if not isinstance(keys, tuple):
keys = (keys,)
self.parser.add_argument(*keys, **kwargs) | Definition and addition of all arguments. | Below is the the instruction that describes the task:
### Input:
Definition and addition of all arguments.
### Response:
def add_arguments(self):
"""
Definition and addition of all arguments.
"""
if self.parser is None:
raise TypeError("Parser cannot be None, has create_parser been called?")
for keys, kwargs in self.args.items():
if not isinstance(keys, tuple):
keys = (keys,)
self.parser.add_argument(*keys, **kwargs) |
def setup_figcanvas(self):
"""Setup the FigureCanvas."""
self.figcanvas = FigureCanvas(background_color=self.background_color)
self.figcanvas.installEventFilter(self)
self.setWidget(self.figcanvas) | Setup the FigureCanvas. | Below is the the instruction that describes the task:
### Input:
Setup the FigureCanvas.
### Response:
def setup_figcanvas(self):
"""Setup the FigureCanvas."""
self.figcanvas = FigureCanvas(background_color=self.background_color)
self.figcanvas.installEventFilter(self)
self.setWidget(self.figcanvas) |
def filter_name(funcname, package, context="decorate", explicit=False):
"""Returns True if the specified function should be filtered (i.e., included
or excluded for use in the specified context.)
Args:
funcname (str): name of the method/function being called.
package (str): name of the package that the method belongs to.
context (str): one of ['decorate', 'time', 'analyze']; specifies which
section of the configuration settings to check.
explicit (bool): when True, if a name is not explicitly specified for
inclusion, then the function returns False.
Returns:
bool: specifying whether the function should be decorated, timed or
analyzed.
"""
packfilter = _get_name_filter(package, context)
if packfilter is None:
# By default, if there are no rules specified, then we include
# everything.
return True
# First we handle the `fnmatch` filters. If something is explicitly included
# that takes precedence over the ignore filters.
matched = None
if packfilter["filters"] is not None:
from fnmatch import fnmatch
for pattern in packfilter["filters"]:
if fnmatch(funcname, pattern):
matched = True
return matched
#We don't have any use cases yet for regex filters.
if packfilter["rfilters"] is not None: # pragma: no cover
for pattern in packfilter["rfilters"]:
if pattern.match(funcname):
matched = True
return matched
if packfilter["ignores"] is not None:
from fnmatch import fnmatch
for pattern in packfilter["ignores"]:
if fnmatch(funcname, pattern):
matched = False
return matched
if packfilter["rignores"] is not None: # pragma: no cover
for pattern in packfilter["rignores"]:
if pattern.match(funcname):
matched = False
return matched
if matched is None:
matched = not explicit
return matched | Returns True if the specified function should be filtered (i.e., included
or excluded for use in the specified context.)
Args:
funcname (str): name of the method/function being called.
package (str): name of the package that the method belongs to.
context (str): one of ['decorate', 'time', 'analyze']; specifies which
section of the configuration settings to check.
explicit (bool): when True, if a name is not explicitly specified for
inclusion, then the function returns False.
Returns:
bool: specifying whether the function should be decorated, timed or
analyzed. | Below is the the instruction that describes the task:
### Input:
Returns True if the specified function should be filtered (i.e., included
or excluded for use in the specified context.)
Args:
funcname (str): name of the method/function being called.
package (str): name of the package that the method belongs to.
context (str): one of ['decorate', 'time', 'analyze']; specifies which
section of the configuration settings to check.
explicit (bool): when True, if a name is not explicitly specified for
inclusion, then the function returns False.
Returns:
bool: specifying whether the function should be decorated, timed or
analyzed.
### Response:
def filter_name(funcname, package, context="decorate", explicit=False):
"""Returns True if the specified function should be filtered (i.e., included
or excluded for use in the specified context.)
Args:
funcname (str): name of the method/function being called.
package (str): name of the package that the method belongs to.
context (str): one of ['decorate', 'time', 'analyze']; specifies which
section of the configuration settings to check.
explicit (bool): when True, if a name is not explicitly specified for
inclusion, then the function returns False.
Returns:
bool: specifying whether the function should be decorated, timed or
analyzed.
"""
packfilter = _get_name_filter(package, context)
if packfilter is None:
# By default, if there are no rules specified, then we include
# everything.
return True
# First we handle the `fnmatch` filters. If something is explicitly included
# that takes precedence over the ignore filters.
matched = None
if packfilter["filters"] is not None:
from fnmatch import fnmatch
for pattern in packfilter["filters"]:
if fnmatch(funcname, pattern):
matched = True
return matched
#We don't have any use cases yet for regex filters.
if packfilter["rfilters"] is not None: # pragma: no cover
for pattern in packfilter["rfilters"]:
if pattern.match(funcname):
matched = True
return matched
if packfilter["ignores"] is not None:
from fnmatch import fnmatch
for pattern in packfilter["ignores"]:
if fnmatch(funcname, pattern):
matched = False
return matched
if packfilter["rignores"] is not None: # pragma: no cover
for pattern in packfilter["rignores"]:
if pattern.match(funcname):
matched = False
return matched
if matched is None:
matched = not explicit
return matched |
def get_properties(self, instance, fields):
"""
Get the feature metadata which will be used for the GeoJSON
"properties" key.
By default it returns all serializer fields excluding those used for
the ID, the geometry and the bounding box.
:param instance: The current Django model instance
:param fields: The list of fields to process (fields already processed have been removed)
:return: OrderedDict containing the properties of the current feature
:rtype: OrderedDict
"""
properties = OrderedDict()
for field in fields:
if field.write_only:
continue
value = field.get_attribute(instance)
representation = None
if value is not None:
representation = field.to_representation(value)
properties[field.field_name] = representation
return properties | Get the feature metadata which will be used for the GeoJSON
"properties" key.
By default it returns all serializer fields excluding those used for
the ID, the geometry and the bounding box.
:param instance: The current Django model instance
:param fields: The list of fields to process (fields already processed have been removed)
:return: OrderedDict containing the properties of the current feature
:rtype: OrderedDict | Below is the the instruction that describes the task:
### Input:
Get the feature metadata which will be used for the GeoJSON
"properties" key.
By default it returns all serializer fields excluding those used for
the ID, the geometry and the bounding box.
:param instance: The current Django model instance
:param fields: The list of fields to process (fields already processed have been removed)
:return: OrderedDict containing the properties of the current feature
:rtype: OrderedDict
### Response:
def get_properties(self, instance, fields):
"""
Get the feature metadata which will be used for the GeoJSON
"properties" key.
By default it returns all serializer fields excluding those used for
the ID, the geometry and the bounding box.
:param instance: The current Django model instance
:param fields: The list of fields to process (fields already processed have been removed)
:return: OrderedDict containing the properties of the current feature
:rtype: OrderedDict
"""
properties = OrderedDict()
for field in fields:
if field.write_only:
continue
value = field.get_attribute(instance)
representation = None
if value is not None:
representation = field.to_representation(value)
properties[field.field_name] = representation
return properties |
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9): # pylint: disable=invalid-name
"""Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
"""
quaternion_yzy = quaternion_from_euler([theta1, xi, theta2], 'yzy')
euler = quaternion_yzy.to_zyz()
quaternion_zyz = quaternion_from_euler(euler, 'zyz')
# output order different than rotation order
out_angles = (euler[1], euler[0], euler[2])
abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data))
if not np.allclose(abs_inner, 1, eps):
raise TranspilerError('YZY and ZYZ angles do not give same rotation matrix.')
out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle
for angle in out_angles)
return out_angles | Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda. | Below is the the instruction that describes the task:
### Input:
Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
### Response:
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9): # pylint: disable=invalid-name
"""Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
"""
quaternion_yzy = quaternion_from_euler([theta1, xi, theta2], 'yzy')
euler = quaternion_yzy.to_zyz()
quaternion_zyz = quaternion_from_euler(euler, 'zyz')
# output order different than rotation order
out_angles = (euler[1], euler[0], euler[2])
abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data))
if not np.allclose(abs_inner, 1, eps):
raise TranspilerError('YZY and ZYZ angles do not give same rotation matrix.')
out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle
for angle in out_angles)
return out_angles |
def arithm_expr_target(x, target):
""" Create arithmetic expression approaching target value
:param x: allowed constants
:param target: target value
:returns: string in form 'expression=value'
:complexity: huge
"""
n = len(x)
expr = [{} for _ in range(1 << n)]
# expr[S][val]
# = string solely composed of values in set S that evaluates to val
for i in range(n):
expr[1 << i] = {x[i]: str(x[i])} # store singletons
all_ = (1 << n) - 1
for S in range(3, all_ + 1): # 3: first num that isn't a power of 2
if expr[S] != {}:
continue # in that case S is a power of 2
for L in range(1, S): # decompose set S into non-empty sets L, R
if L & S == L:
R = S ^ L
for vL in expr[L]: # combine expressions from L
for vR in expr[R]: # with expressions from R
eL = expr[L][vL]
eR = expr[R][vR]
expr[S][vL] = eL
if vL > vR: # difference cannot become negative
expr[S][vL - vR] = "(%s-%s)" % (eL, eR)
if L < R: # break symmetry
expr[S][vL + vR] = "(%s+%s)" % (eL, eR)
expr[S][vL * vR] = "(%s*%s)" % (eL, eR)
if vR != 0 and vL % vR == 0: # only integer div
expr[S][vL // vR] = "(%s/%s)" % (eL, eR)
# look for the closest expression from the target
for dist in range(target + 1):
for sign in [-1, +1]:
val = target + sign * dist
if val in expr[all_]:
return "%s=%i" % (expr[all_][val], val)
# never reaches here if x contains integers between 0 and target
pass | Create arithmetic expression approaching target value
:param x: allowed constants
:param target: target value
:returns: string in form 'expression=value'
:complexity: huge | Below is the the instruction that describes the task:
### Input:
Create arithmetic expression approaching target value
:param x: allowed constants
:param target: target value
:returns: string in form 'expression=value'
:complexity: huge
### Response:
def arithm_expr_target(x, target):
""" Create arithmetic expression approaching target value
:param x: allowed constants
:param target: target value
:returns: string in form 'expression=value'
:complexity: huge
"""
n = len(x)
expr = [{} for _ in range(1 << n)]
# expr[S][val]
# = string solely composed of values in set S that evaluates to val
for i in range(n):
expr[1 << i] = {x[i]: str(x[i])} # store singletons
all_ = (1 << n) - 1
for S in range(3, all_ + 1): # 3: first num that isn't a power of 2
if expr[S] != {}:
continue # in that case S is a power of 2
for L in range(1, S): # decompose set S into non-empty sets L, R
if L & S == L:
R = S ^ L
for vL in expr[L]: # combine expressions from L
for vR in expr[R]: # with expressions from R
eL = expr[L][vL]
eR = expr[R][vR]
expr[S][vL] = eL
if vL > vR: # difference cannot become negative
expr[S][vL - vR] = "(%s-%s)" % (eL, eR)
if L < R: # break symmetry
expr[S][vL + vR] = "(%s+%s)" % (eL, eR)
expr[S][vL * vR] = "(%s*%s)" % (eL, eR)
if vR != 0 and vL % vR == 0: # only integer div
expr[S][vL // vR] = "(%s/%s)" % (eL, eR)
# look for the closest expression from the target
for dist in range(target + 1):
for sign in [-1, +1]:
val = target + sign * dist
if val in expr[all_]:
return "%s=%i" % (expr[all_][val], val)
# never reaches here if x contains integers between 0 and target
pass |
def startDtmf():
"""START DTMF Section 9.3.24"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x35) # 00110101
c = KeypadFacilityHdr(ieiKF=0x2C, eightBitKF=0x0)
packet = a / b / c
return packet | START DTMF Section 9.3.24 | Below is the the instruction that describes the task:
### Input:
START DTMF Section 9.3.24
### Response:
def startDtmf():
"""START DTMF Section 9.3.24"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x35) # 00110101
c = KeypadFacilityHdr(ieiKF=0x2C, eightBitKF=0x0)
packet = a / b / c
return packet |
def init(name, storage_backend='dir', trust_password=None,
network_address=None, network_port=None, storage_create_device=None,
storage_create_loop=None, storage_pool=None,
done_file='%SALT_CONFIG_DIR%/lxd_initialized'):
'''
Initalizes the LXD Daemon, as LXD doesn't tell if its initialized
we touch the the done_file and check if it exist.
This can only be called once per host unless you remove the done_file.
name :
Ignore this. This is just here for salt.
storage_backend :
Storage backend to use (zfs or dir, default: dir)
trust_password :
Password required to add new clients
network_address : None
Address to bind LXD to (default: none)
network_port : None
Port to bind LXD to (Default: 8443)
storage_create_device : None
Setup device based storage using this DEVICE
storage_create_loop : None
Setup loop based storage with this SIZE in GB
storage_pool : None
Storage pool to use or create
done_file :
Path where we check that this method has been called,
as it can run only once and theres currently no way
to ask LXD if init has been called.
'''
ret = {
'name': name,
'storage_backend': storage_backend,
'trust_password': True if trust_password is not None else False,
'network_address': network_address,
'network_port': network_port,
'storage_create_device': storage_create_device,
'storage_create_loop': storage_create_loop,
'storage_pool': storage_pool,
'done_file': done_file,
}
# TODO: Get a better path and don't hardcode '/etc/salt'
done_file = done_file.replace('%SALT_CONFIG_DIR%', '/etc/salt')
if os.path.exists(done_file):
# Success we already did that.
return _success(ret, 'LXD is already initialized')
if __opts__['test']:
return _success(ret, 'Would initialize LXD')
# We always touch the done_file, so when LXD is already initialized
# we don't run this over and over.
__salt__['file.touch'](done_file)
try:
__salt__['lxd.init'](
storage_backend if storage_backend else None,
trust_password if trust_password else None,
network_address if network_address else None,
network_port if network_port else None,
storage_create_device if storage_create_device else None,
storage_create_loop if storage_create_loop else None,
storage_pool if storage_pool else None
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
return _success(ret, 'Initialized the LXD Daemon') | Initalizes the LXD Daemon, as LXD doesn't tell if its initialized
we touch the the done_file and check if it exist.
This can only be called once per host unless you remove the done_file.
name :
Ignore this. This is just here for salt.
storage_backend :
Storage backend to use (zfs or dir, default: dir)
trust_password :
Password required to add new clients
network_address : None
Address to bind LXD to (default: none)
network_port : None
Port to bind LXD to (Default: 8443)
storage_create_device : None
Setup device based storage using this DEVICE
storage_create_loop : None
Setup loop based storage with this SIZE in GB
storage_pool : None
Storage pool to use or create
done_file :
Path where we check that this method has been called,
as it can run only once and theres currently no way
to ask LXD if init has been called. | Below is the the instruction that describes the task:
### Input:
Initalizes the LXD Daemon, as LXD doesn't tell if its initialized
we touch the the done_file and check if it exist.
This can only be called once per host unless you remove the done_file.
name :
Ignore this. This is just here for salt.
storage_backend :
Storage backend to use (zfs or dir, default: dir)
trust_password :
Password required to add new clients
network_address : None
Address to bind LXD to (default: none)
network_port : None
Port to bind LXD to (Default: 8443)
storage_create_device : None
Setup device based storage using this DEVICE
storage_create_loop : None
Setup loop based storage with this SIZE in GB
storage_pool : None
Storage pool to use or create
done_file :
Path where we check that this method has been called,
as it can run only once and theres currently no way
to ask LXD if init has been called.
### Response:
def init(name, storage_backend='dir', trust_password=None,
network_address=None, network_port=None, storage_create_device=None,
storage_create_loop=None, storage_pool=None,
done_file='%SALT_CONFIG_DIR%/lxd_initialized'):
'''
Initalizes the LXD Daemon, as LXD doesn't tell if its initialized
we touch the the done_file and check if it exist.
This can only be called once per host unless you remove the done_file.
name :
Ignore this. This is just here for salt.
storage_backend :
Storage backend to use (zfs or dir, default: dir)
trust_password :
Password required to add new clients
network_address : None
Address to bind LXD to (default: none)
network_port : None
Port to bind LXD to (Default: 8443)
storage_create_device : None
Setup device based storage using this DEVICE
storage_create_loop : None
Setup loop based storage with this SIZE in GB
storage_pool : None
Storage pool to use or create
done_file :
Path where we check that this method has been called,
as it can run only once and theres currently no way
to ask LXD if init has been called.
'''
ret = {
'name': name,
'storage_backend': storage_backend,
'trust_password': True if trust_password is not None else False,
'network_address': network_address,
'network_port': network_port,
'storage_create_device': storage_create_device,
'storage_create_loop': storage_create_loop,
'storage_pool': storage_pool,
'done_file': done_file,
}
# TODO: Get a better path and don't hardcode '/etc/salt'
done_file = done_file.replace('%SALT_CONFIG_DIR%', '/etc/salt')
if os.path.exists(done_file):
# Success we already did that.
return _success(ret, 'LXD is already initialized')
if __opts__['test']:
return _success(ret, 'Would initialize LXD')
# We always touch the done_file, so when LXD is already initialized
# we don't run this over and over.
__salt__['file.touch'](done_file)
try:
__salt__['lxd.init'](
storage_backend if storage_backend else None,
trust_password if trust_password else None,
network_address if network_address else None,
network_port if network_port else None,
storage_create_device if storage_create_device else None,
storage_create_loop if storage_create_loop else None,
storage_pool if storage_pool else None
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
return _success(ret, 'Initialized the LXD Daemon') |
def _graphify(self, *args, graph=None): # defined
""" Lift phenotypeEdges to Restrictions """
if graph is None:
graph = self.out_graph
################## LABELS ARE DEFINED HERE ##################
gl = self.genLabel
ll = self.localLabel
ol = self.origLabel
graph.add((self.id_, ilxtr.genLabel, rdflib.Literal(gl)))
if ll != gl:
graph.add((self.id_, ilxtr.localLabel, rdflib.Literal(ll)))
if ol and ol != gl:
graph.add((self.id_, ilxtr.origLabel, rdflib.Literal(ol)))
members = [self.expand(self.owlClass)]
for pe in self.pes:
target = pe._graphify(graph=graph)
if isinstance(pe, NegPhenotype): # isinstance will match NegPhenotype -> Phenotype
#self.Class.disjointWith = [target] # FIXME for defined neurons this is what we need and I think it is strong than the complementOf version
djc = infixowl.Class(graph=graph) # TODO for generic neurons this is what we need
djc.complementOf = target
members.append(djc)
else:
members.append(target) # FIXME negative logical phenotypes :/
intersection = infixowl.BooleanClass(members=members, graph=graph) # FIXME dupes
#existing = list(self.Class.equivalentClass)
#if existing or str(pe.pLabel) == 'Htr3a':
#embed()
ec = [intersection]
self.Class.equivalentClass = ec
return self.Class | Lift phenotypeEdges to Restrictions | Below is the the instruction that describes the task:
### Input:
Lift phenotypeEdges to Restrictions
### Response:
def _graphify(self, *args, graph=None): # defined
""" Lift phenotypeEdges to Restrictions """
if graph is None:
graph = self.out_graph
################## LABELS ARE DEFINED HERE ##################
gl = self.genLabel
ll = self.localLabel
ol = self.origLabel
graph.add((self.id_, ilxtr.genLabel, rdflib.Literal(gl)))
if ll != gl:
graph.add((self.id_, ilxtr.localLabel, rdflib.Literal(ll)))
if ol and ol != gl:
graph.add((self.id_, ilxtr.origLabel, rdflib.Literal(ol)))
members = [self.expand(self.owlClass)]
for pe in self.pes:
target = pe._graphify(graph=graph)
if isinstance(pe, NegPhenotype): # isinstance will match NegPhenotype -> Phenotype
#self.Class.disjointWith = [target] # FIXME for defined neurons this is what we need and I think it is strong than the complementOf version
djc = infixowl.Class(graph=graph) # TODO for generic neurons this is what we need
djc.complementOf = target
members.append(djc)
else:
members.append(target) # FIXME negative logical phenotypes :/
intersection = infixowl.BooleanClass(members=members, graph=graph) # FIXME dupes
#existing = list(self.Class.equivalentClass)
#if existing or str(pe.pLabel) == 'Htr3a':
#embed()
ec = [intersection]
self.Class.equivalentClass = ec
return self.Class |
def deep_force_unicode(value):
"""
Recursively call force_text on value.
"""
if isinstance(value, (list, tuple, set)):
value = type(value)(map(deep_force_unicode, value))
elif isinstance(value, dict):
value = type(value)(map(deep_force_unicode, value.items()))
elif isinstance(value, Promise):
value = force_text(value)
return value | Recursively call force_text on value. | Below is the the instruction that describes the task:
### Input:
Recursively call force_text on value.
### Response:
def deep_force_unicode(value):
"""
Recursively call force_text on value.
"""
if isinstance(value, (list, tuple, set)):
value = type(value)(map(deep_force_unicode, value))
elif isinstance(value, dict):
value = type(value)(map(deep_force_unicode, value.items()))
elif isinstance(value, Promise):
value = force_text(value)
return value |
def requires_basic_auth(resource):
'''
Flask decorator protecting ressources using username/password scheme
'''
@functools.wraps(resource)
def decorated(*args, **kwargs):
''' Check provided username/password '''
auth = flask.request.authorization
user = check_credentials(auth.username, auth.password)
if not auth or user is None:
log.warn('authentification failed', credentials=auth)
return auth_failed()
log.info('authentification succeeded', credentials=auth)
flask.g.user = user
return resource(*args, **kwargs)
return decorated | Flask decorator protecting ressources using username/password scheme | Below is the the instruction that describes the task:
### Input:
Flask decorator protecting ressources using username/password scheme
### Response:
def requires_basic_auth(resource):
'''
Flask decorator protecting ressources using username/password scheme
'''
@functools.wraps(resource)
def decorated(*args, **kwargs):
''' Check provided username/password '''
auth = flask.request.authorization
user = check_credentials(auth.username, auth.password)
if not auth or user is None:
log.warn('authentification failed', credentials=auth)
return auth_failed()
log.info('authentification succeeded', credentials=auth)
flask.g.user = user
return resource(*args, **kwargs)
return decorated |
def end_tag(el):
""" The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. """
if el.tail and start_whitespace_re.search(el.tail):
extra = ' '
else:
extra = ''
return '</%s>%s' % (el.tag, extra) | The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. | Below is the the instruction that describes the task:
### Input:
The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate.
### Response:
def end_tag(el):
""" The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. """
if el.tail and start_whitespace_re.search(el.tail):
extra = ' '
else:
extra = ''
return '</%s>%s' % (el.tag, extra) |
def _resolve_task_logging(job_metadata, job_resources, task_descriptors):
"""Resolve the logging path from job and task properties.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
job_resources: Resources specified such as ram, cpu, and logging path.
task_descriptors: Task metadata, parameters, and resources.
Resolve the logging path, which may have substitution parameters such as
job-id, task-id, user-id, and job-name.
"""
if not job_resources.logging:
return
for task_descriptor in task_descriptors:
logging_uri = provider_base.format_logging_uri(
job_resources.logging.uri, job_metadata, task_descriptor.task_metadata)
logging_path = job_model.LoggingParam(logging_uri,
job_resources.logging.file_provider)
if task_descriptor.task_resources:
task_descriptor.task_resources = task_descriptor.task_resources._replace(
logging_path=logging_path)
else:
task_descriptor.task_resources = job_model.Resources(
logging_path=logging_path) | Resolve the logging path from job and task properties.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
job_resources: Resources specified such as ram, cpu, and logging path.
task_descriptors: Task metadata, parameters, and resources.
Resolve the logging path, which may have substitution parameters such as
job-id, task-id, user-id, and job-name. | Below is the the instruction that describes the task:
### Input:
Resolve the logging path from job and task properties.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
job_resources: Resources specified such as ram, cpu, and logging path.
task_descriptors: Task metadata, parameters, and resources.
Resolve the logging path, which may have substitution parameters such as
job-id, task-id, user-id, and job-name.
### Response:
def _resolve_task_logging(job_metadata, job_resources, task_descriptors):
"""Resolve the logging path from job and task properties.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
job_resources: Resources specified such as ram, cpu, and logging path.
task_descriptors: Task metadata, parameters, and resources.
Resolve the logging path, which may have substitution parameters such as
job-id, task-id, user-id, and job-name.
"""
if not job_resources.logging:
return
for task_descriptor in task_descriptors:
logging_uri = provider_base.format_logging_uri(
job_resources.logging.uri, job_metadata, task_descriptor.task_metadata)
logging_path = job_model.LoggingParam(logging_uri,
job_resources.logging.file_provider)
if task_descriptor.task_resources:
task_descriptor.task_resources = task_descriptor.task_resources._replace(
logging_path=logging_path)
else:
task_descriptor.task_resources = job_model.Resources(
logging_path=logging_path) |
def get_hash(file_path, checksum='sha1'):
"""
Generate a hash for the given file
Args:
file_path (str): Path to the file to generate the hash for
checksum (str): hash to apply, one of the supported by hashlib, for
example sha1 or sha512
Returns:
str: hash for that file
"""
sha = getattr(hashlib, checksum)()
with open(file_path) as file_descriptor:
while True:
chunk = file_descriptor.read(65536)
if not chunk:
break
sha.update(chunk)
return sha.hexdigest() | Generate a hash for the given file
Args:
file_path (str): Path to the file to generate the hash for
checksum (str): hash to apply, one of the supported by hashlib, for
example sha1 or sha512
Returns:
str: hash for that file | Below is the the instruction that describes the task:
### Input:
Generate a hash for the given file
Args:
file_path (str): Path to the file to generate the hash for
checksum (str): hash to apply, one of the supported by hashlib, for
example sha1 or sha512
Returns:
str: hash for that file
### Response:
def get_hash(file_path, checksum='sha1'):
"""
Generate a hash for the given file
Args:
file_path (str): Path to the file to generate the hash for
checksum (str): hash to apply, one of the supported by hashlib, for
example sha1 or sha512
Returns:
str: hash for that file
"""
sha = getattr(hashlib, checksum)()
with open(file_path) as file_descriptor:
while True:
chunk = file_descriptor.read(65536)
if not chunk:
break
sha.update(chunk)
return sha.hexdigest() |
def show(ctx, short_name):
"""Show metadata for a specific subscription
Example:
\b
$ wva subscriptions show speed
{'buffer': 'queue', 'interval': 5, 'uri': 'vehicle/data/VehicleSpeed'}
"""
wva = get_wva(ctx)
subscription = wva.get_subscription(short_name)
cli_pprint(subscription.get_metadata()) | Show metadata for a specific subscription
Example:
\b
$ wva subscriptions show speed
{'buffer': 'queue', 'interval': 5, 'uri': 'vehicle/data/VehicleSpeed'} | Below is the the instruction that describes the task:
### Input:
Show metadata for a specific subscription
Example:
\b
$ wva subscriptions show speed
{'buffer': 'queue', 'interval': 5, 'uri': 'vehicle/data/VehicleSpeed'}
### Response:
def show(ctx, short_name):
"""Show metadata for a specific subscription
Example:
\b
$ wva subscriptions show speed
{'buffer': 'queue', 'interval': 5, 'uri': 'vehicle/data/VehicleSpeed'}
"""
wva = get_wva(ctx)
subscription = wva.get_subscription(short_name)
cli_pprint(subscription.get_metadata()) |
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
timeout = kwargs.pop('timeout', None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
if headers is None:
headers = {}
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if 'Accept' not in headers:
headers['Accept'] = 'application/json'
# Set a user-agent so Xero knows the traffic is coming from pyxero
# or individual user/partner
headers['User-Agent'] = self.user_agent
response = getattr(requests, method)(
uri, data=body, headers=headers, auth=self.credentials.oauth,
params=params, timeout=timeout)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a binary file
if not response.headers['content-type'].startswith('application/json'):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 204:
return response.content
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper | This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject | Below is the the instruction that describes the task:
### Input:
This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
### Response:
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
timeout = kwargs.pop('timeout', None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
if headers is None:
headers = {}
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if 'Accept' not in headers:
headers['Accept'] = 'application/json'
# Set a user-agent so Xero knows the traffic is coming from pyxero
# or individual user/partner
headers['User-Agent'] = self.user_agent
response = getattr(requests, method)(
uri, data=body, headers=headers, auth=self.credentials.oauth,
params=params, timeout=timeout)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a binary file
if not response.headers['content-type'].startswith('application/json'):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 204:
return response.content
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper |
def time_zone_by_name(self, hostname):
"""
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.time_zone_by_addr(addr) | Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com) | Below is the the instruction that describes the task:
### Input:
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com)
### Response:
def time_zone_by_name(self, hostname):
"""
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.time_zone_by_addr(addr) |
def credentials(self, credentials):
"""
Sets the credentials of this WebAuthorization.
The confidential portion of the `Authorization` header that follows the `type` field. This field is write-only. It is omitted by read operations. If authorization is required, the `credentials` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `credentials` field--the update will preserve the previous value.
:param credentials: The credentials of this WebAuthorization.
:type: str
"""
if credentials is not None and len(credentials) > 1024:
raise ValueError("Invalid value for `credentials`, length must be less than or equal to `1024`")
if credentials is not None and len(credentials) < 1:
raise ValueError("Invalid value for `credentials`, length must be greater than or equal to `1`")
if credentials is not None and not re.search('[\\x21-\\x7E \\t]*', credentials):
raise ValueError("Invalid value for `credentials`, must be a follow pattern or equal to `/[\\x21-\\x7E \\t]*/`")
self._credentials = credentials | Sets the credentials of this WebAuthorization.
The confidential portion of the `Authorization` header that follows the `type` field. This field is write-only. It is omitted by read operations. If authorization is required, the `credentials` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `credentials` field--the update will preserve the previous value.
:param credentials: The credentials of this WebAuthorization.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the credentials of this WebAuthorization.
The confidential portion of the `Authorization` header that follows the `type` field. This field is write-only. It is omitted by read operations. If authorization is required, the `credentials` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `credentials` field--the update will preserve the previous value.
:param credentials: The credentials of this WebAuthorization.
:type: str
### Response:
def credentials(self, credentials):
"""
Sets the credentials of this WebAuthorization.
The confidential portion of the `Authorization` header that follows the `type` field. This field is write-only. It is omitted by read operations. If authorization is required, the `credentials` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `credentials` field--the update will preserve the previous value.
:param credentials: The credentials of this WebAuthorization.
:type: str
"""
if credentials is not None and len(credentials) > 1024:
raise ValueError("Invalid value for `credentials`, length must be less than or equal to `1024`")
if credentials is not None and len(credentials) < 1:
raise ValueError("Invalid value for `credentials`, length must be greater than or equal to `1`")
if credentials is not None and not re.search('[\\x21-\\x7E \\t]*', credentials):
raise ValueError("Invalid value for `credentials`, must be a follow pattern or equal to `/[\\x21-\\x7E \\t]*/`")
self._credentials = credentials |
def require_user(self, *users, user=None):
"""A decorator to protect views with Negotiate authentication."""
# accept old-style single user keyword-argument as well
if user:
users = (*users, user)
def _require_auth(view_func):
@wraps(view_func)
def wrapper(*args, **kwargs):
""" Effective wrapper """
username, out_token = self.authenticate()
if username and out_token:
b64_token = base64.b64encode(out_token).decode('utf-8')
auth_data = 'Negotiate {0}'.format(b64_token)
if not users or username in users:
response = make_response(view_func(*args,
username=username,
**kwargs))
else:
response = Response(status=403)
response.headers['WWW-Authenticate'] = auth_data
return response
return Response(
status=401,
headers={'WWW-Authenticate': 'Negotiate'},
)
return wrapper
return _require_auth | A decorator to protect views with Negotiate authentication. | Below is the the instruction that describes the task:
### Input:
A decorator to protect views with Negotiate authentication.
### Response:
def require_user(self, *users, user=None):
"""A decorator to protect views with Negotiate authentication."""
# accept old-style single user keyword-argument as well
if user:
users = (*users, user)
def _require_auth(view_func):
@wraps(view_func)
def wrapper(*args, **kwargs):
""" Effective wrapper """
username, out_token = self.authenticate()
if username and out_token:
b64_token = base64.b64encode(out_token).decode('utf-8')
auth_data = 'Negotiate {0}'.format(b64_token)
if not users or username in users:
response = make_response(view_func(*args,
username=username,
**kwargs))
else:
response = Response(status=403)
response.headers['WWW-Authenticate'] = auth_data
return response
return Response(
status=401,
headers={'WWW-Authenticate': 'Negotiate'},
)
return wrapper
return _require_auth |
def wait(self):
"""
Block until a matched message appears.
"""
if not self._patterns:
raise RuntimeError('Listener has nothing to capture')
while 1:
msg = self._queue.get(block=True)
if any(map(lambda p: filtering.match_all(msg, p), self._patterns)):
return msg | Block until a matched message appears. | Below is the the instruction that describes the task:
### Input:
Block until a matched message appears.
### Response:
def wait(self):
"""
Block until a matched message appears.
"""
if not self._patterns:
raise RuntimeError('Listener has nothing to capture')
while 1:
msg = self._queue.get(block=True)
if any(map(lambda p: filtering.match_all(msg, p), self._patterns)):
return msg |
def plot_kde(self,
ax=None,
amax=None,
amin=None,
label=None,
return_fig=False):
"""
Plot a KDE for the curve. Very nice summary of KDEs:
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
Args:
ax (axis): Optional matplotlib (MPL) axis to plot into. Returned.
amax (float): Optional max value to permit.
amin (float): Optional min value to permit.
label (string): What to put on the y-axis. Defaults to curve name.
return_fig (bool): If you want to return the MPL figure object.
Returns:
None, axis, figure: depending on what you ask for.
"""
from scipy.stats import gaussian_kde
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
a = self[~np.isnan(self)]
# Find values for common axis to exclude outliers.
if amax is None:
amax = np.percentile(a, 99)
if amin is None:
amin = np.percentile(a, 1)
x = a[np.abs(a - 0.5 * (amax + amin)) < 0.5 * (amax - amin)]
x_grid = np.linspace(amin, amax, 100)
kde = gaussian_kde(x)
std_a = kde.evaluate(x_grid)
img = np.array([std_a]) / np.max([std_a])
extent = [amin, amax, 0, 1]
ax.imshow(img, aspect='auto', cmap='viridis', extent=extent)
ax.set_yticklabels([])
ax.set_ylabel(label or self.mnemonic)
if return_ax:
return ax
elif return_fig:
return fig
else:
return None | Plot a KDE for the curve. Very nice summary of KDEs:
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
Args:
ax (axis): Optional matplotlib (MPL) axis to plot into. Returned.
amax (float): Optional max value to permit.
amin (float): Optional min value to permit.
label (string): What to put on the y-axis. Defaults to curve name.
return_fig (bool): If you want to return the MPL figure object.
Returns:
None, axis, figure: depending on what you ask for. | Below is the the instruction that describes the task:
### Input:
Plot a KDE for the curve. Very nice summary of KDEs:
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
Args:
ax (axis): Optional matplotlib (MPL) axis to plot into. Returned.
amax (float): Optional max value to permit.
amin (float): Optional min value to permit.
label (string): What to put on the y-axis. Defaults to curve name.
return_fig (bool): If you want to return the MPL figure object.
Returns:
None, axis, figure: depending on what you ask for.
### Response:
def plot_kde(self,
ax=None,
amax=None,
amin=None,
label=None,
return_fig=False):
"""
Plot a KDE for the curve. Very nice summary of KDEs:
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
Args:
ax (axis): Optional matplotlib (MPL) axis to plot into. Returned.
amax (float): Optional max value to permit.
amin (float): Optional min value to permit.
label (string): What to put on the y-axis. Defaults to curve name.
return_fig (bool): If you want to return the MPL figure object.
Returns:
None, axis, figure: depending on what you ask for.
"""
from scipy.stats import gaussian_kde
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
a = self[~np.isnan(self)]
# Find values for common axis to exclude outliers.
if amax is None:
amax = np.percentile(a, 99)
if amin is None:
amin = np.percentile(a, 1)
x = a[np.abs(a - 0.5 * (amax + amin)) < 0.5 * (amax - amin)]
x_grid = np.linspace(amin, amax, 100)
kde = gaussian_kde(x)
std_a = kde.evaluate(x_grid)
img = np.array([std_a]) / np.max([std_a])
extent = [amin, amax, 0, 1]
ax.imshow(img, aspect='auto', cmap='viridis', extent=extent)
ax.set_yticklabels([])
ax.set_ylabel(label or self.mnemonic)
if return_ax:
return ax
elif return_fig:
return fig
else:
return None |
def get_video_transcript(video_id, language_code):
"""
Get video transcript info
Arguments:
video_id(unicode): A video id, it can be an edx_video_id or an external video id extracted from
external sources of a video component.
language_code(unicode): it will be the language code of the requested transcript.
"""
transcript = VideoTranscript.get_or_none(video_id=video_id, language_code=language_code)
return TranscriptSerializer(transcript).data if transcript else None | Get video transcript info
Arguments:
video_id(unicode): A video id, it can be an edx_video_id or an external video id extracted from
external sources of a video component.
language_code(unicode): it will be the language code of the requested transcript. | Below is the the instruction that describes the task:
### Input:
Get video transcript info
Arguments:
video_id(unicode): A video id, it can be an edx_video_id or an external video id extracted from
external sources of a video component.
language_code(unicode): it will be the language code of the requested transcript.
### Response:
def get_video_transcript(video_id, language_code):
"""
Get video transcript info
Arguments:
video_id(unicode): A video id, it can be an edx_video_id or an external video id extracted from
external sources of a video component.
language_code(unicode): it will be the language code of the requested transcript.
"""
transcript = VideoTranscript.get_or_none(video_id=video_id, language_code=language_code)
return TranscriptSerializer(transcript).data if transcript else None |
def xor(left, right):
"""xor 2 strings. They can be shorter than each other, in which case
the shortest will be padded with null bytes at its right.
:param left: a string to be the left side of the xor
:param right: a string to be the left side of the xor
"""
maxlength = max(map(len, (left, right)))
ileft = string_to_int(rpad(left, maxlength))
iright = string_to_int(rpad(right, maxlength))
xored = ileft ^ iright
return int_to_string(xored) | xor 2 strings. They can be shorter than each other, in which case
the shortest will be padded with null bytes at its right.
:param left: a string to be the left side of the xor
:param right: a string to be the left side of the xor | Below is the the instruction that describes the task:
### Input:
xor 2 strings. They can be shorter than each other, in which case
the shortest will be padded with null bytes at its right.
:param left: a string to be the left side of the xor
:param right: a string to be the left side of the xor
### Response:
def xor(left, right):
"""xor 2 strings. They can be shorter than each other, in which case
the shortest will be padded with null bytes at its right.
:param left: a string to be the left side of the xor
:param right: a string to be the left side of the xor
"""
maxlength = max(map(len, (left, right)))
ileft = string_to_int(rpad(left, maxlength))
iright = string_to_int(rpad(right, maxlength))
xored = ileft ^ iright
return int_to_string(xored) |
def getStretchTwistBendModulus(self, bp, frames=None, paxis='Z', masked=True, matrix=False):
r"""Calculate Bending-Stretching-Twisting matrix
It calculate elastic matrix and modulus matrix.
.. math::
\text{modulus matrix} = 4.1419464 \times \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \\
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \\
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \\
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix} \times L_0
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
matrix : bool
If it is ``True``, elastic constant matrix will be returned. Otherwise, by default modulus matrix will be
returned.
Return
------
mean : numpy.ndarray
Value of bending angles, contour length and twist angle (as 1D array) at which energy is zero. Minimum point
on free energy landscape.
.. math::
\begin{bmatrix}
\theta^{x}_0 & \theta^{y}_0 & L_0 & \phi_0
\end{bmatrix}
result : numpy.ndarray
Either elastic matrix or modulus matrix depending on ``matrix`` value.
"""
if self.esType == 'ST':
raise KeyError(' Use dnaEY.getStretchTwistModulus for Stretching-Twisting modulus.')
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}'.format(bp[0], bp[1], frames[0], frames[1])
if name not in self.esMatrix:
time, array = self.extractGlobalParameters(self.dna, bp, frames=frames, paxis=paxis, masked=masked)
mean = np.mean(array, axis=1)
esMatrix = np.asarray(self.getElasticMatrix(array))
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if not matrix:
result = 4.1419464 * np.array(esMatrix) * mean[2] # Calculate modulus
else:
result = esMatrix
return mean, result | r"""Calculate Bending-Stretching-Twisting matrix
It calculate elastic matrix and modulus matrix.
.. math::
\text{modulus matrix} = 4.1419464 \times \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \\
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \\
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \\
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix} \times L_0
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
matrix : bool
If it is ``True``, elastic constant matrix will be returned. Otherwise, by default modulus matrix will be
returned.
Return
------
mean : numpy.ndarray
Value of bending angles, contour length and twist angle (as 1D array) at which energy is zero. Minimum point
on free energy landscape.
.. math::
\begin{bmatrix}
\theta^{x}_0 & \theta^{y}_0 & L_0 & \phi_0
\end{bmatrix}
result : numpy.ndarray
Either elastic matrix or modulus matrix depending on ``matrix`` value. | Below is the the instruction that describes the task:
### Input:
r"""Calculate Bending-Stretching-Twisting matrix
It calculate elastic matrix and modulus matrix.
.. math::
\text{modulus matrix} = 4.1419464 \times \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \\
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \\
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \\
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix} \times L_0
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
matrix : bool
If it is ``True``, elastic constant matrix will be returned. Otherwise, by default modulus matrix will be
returned.
Return
------
mean : numpy.ndarray
Value of bending angles, contour length and twist angle (as 1D array) at which energy is zero. Minimum point
on free energy landscape.
.. math::
\begin{bmatrix}
\theta^{x}_0 & \theta^{y}_0 & L_0 & \phi_0
\end{bmatrix}
result : numpy.ndarray
Either elastic matrix or modulus matrix depending on ``matrix`` value.
### Response:
def getStretchTwistBendModulus(self, bp, frames=None, paxis='Z', masked=True, matrix=False):
r"""Calculate Bending-Stretching-Twisting matrix
It calculate elastic matrix and modulus matrix.
.. math::
\text{modulus matrix} = 4.1419464 \times \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \\
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \\
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \\
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix} \times L_0
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
matrix : bool
If it is ``True``, elastic constant matrix will be returned. Otherwise, by default modulus matrix will be
returned.
Return
------
mean : numpy.ndarray
Value of bending angles, contour length and twist angle (as 1D array) at which energy is zero. Minimum point
on free energy landscape.
.. math::
\begin{bmatrix}
\theta^{x}_0 & \theta^{y}_0 & L_0 & \phi_0
\end{bmatrix}
result : numpy.ndarray
Either elastic matrix or modulus matrix depending on ``matrix`` value.
"""
if self.esType == 'ST':
raise KeyError(' Use dnaEY.getStretchTwistModulus for Stretching-Twisting modulus.')
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}'.format(bp[0], bp[1], frames[0], frames[1])
if name not in self.esMatrix:
time, array = self.extractGlobalParameters(self.dna, bp, frames=frames, paxis=paxis, masked=masked)
mean = np.mean(array, axis=1)
esMatrix = np.asarray(self.getElasticMatrix(array))
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if not matrix:
result = 4.1419464 * np.array(esMatrix) * mean[2] # Calculate modulus
else:
result = esMatrix
return mean, result |
def create_analytic_backend(settings):
"""
Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> })
"""
backend = settings.get('backend')
if isinstance(backend, basestring):
backend = import_string(backend)
elif backend:
backend = backend
else:
raise KeyError('backend')
return backend(settings.get("settings", {})) | Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> }) | Below is the the instruction that describes the task:
### Input:
Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> })
### Response:
def create_analytic_backend(settings):
"""
Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> })
"""
backend = settings.get('backend')
if isinstance(backend, basestring):
backend = import_string(backend)
elif backend:
backend = backend
else:
raise KeyError('backend')
return backend(settings.get("settings", {})) |
def Read(self, length=None):
"""Read from the file."""
if self.progress_callback:
self.progress_callback()
available_to_read = max(0, (self.size or 0) - self.offset)
if length is None:
to_read = available_to_read
else:
to_read = min(length, available_to_read)
with FileHandleManager(self.filename) as fd:
offset = self.file_offset + self.offset
pre_padding = offset % self.alignment
# Due to alignment we read some more data than we need to.
aligned_offset = offset - pre_padding
fd.Seek(aligned_offset)
data = fd.Read(to_read + pre_padding)
self.offset += len(data) - pre_padding
return data[pre_padding:] | Read from the file. | Below is the the instruction that describes the task:
### Input:
Read from the file.
### Response:
def Read(self, length=None):
"""Read from the file."""
if self.progress_callback:
self.progress_callback()
available_to_read = max(0, (self.size or 0) - self.offset)
if length is None:
to_read = available_to_read
else:
to_read = min(length, available_to_read)
with FileHandleManager(self.filename) as fd:
offset = self.file_offset + self.offset
pre_padding = offset % self.alignment
# Due to alignment we read some more data than we need to.
aligned_offset = offset - pre_padding
fd.Seek(aligned_offset)
data = fd.Read(to_read + pre_padding)
self.offset += len(data) - pre_padding
return data[pre_padding:] |
def register(self, *actions):
"""Register `actions` in the current application. All `actions` must be
an instance of :class:`.Action` or one of its subclasses.
If `overwrite` is `True`, then it is allowed to overwrite an
existing action with same name and category; else `ValueError`
is raised.
"""
assert self.installed(), "Actions not enabled on this application"
assert all(isinstance(a, Action) for a in actions)
for action in actions:
cat = action.category
reg = self._state["categories"].setdefault(cat, [])
reg.append(action) | Register `actions` in the current application. All `actions` must be
an instance of :class:`.Action` or one of its subclasses.
If `overwrite` is `True`, then it is allowed to overwrite an
existing action with same name and category; else `ValueError`
is raised. | Below is the the instruction that describes the task:
### Input:
Register `actions` in the current application. All `actions` must be
an instance of :class:`.Action` or one of its subclasses.
If `overwrite` is `True`, then it is allowed to overwrite an
existing action with same name and category; else `ValueError`
is raised.
### Response:
def register(self, *actions):
"""Register `actions` in the current application. All `actions` must be
an instance of :class:`.Action` or one of its subclasses.
If `overwrite` is `True`, then it is allowed to overwrite an
existing action with same name and category; else `ValueError`
is raised.
"""
assert self.installed(), "Actions not enabled on this application"
assert all(isinstance(a, Action) for a in actions)
for action in actions:
cat = action.category
reg = self._state["categories"].setdefault(cat, [])
reg.append(action) |
def get_real_stored_key(self, session_key):
"""Return the real key name in redis storage
@return string
"""
prefix = settings.SESSION_REDIS_PREFIX
if not prefix:
return session_key
return ':'.join([prefix, session_key]) | Return the real key name in redis storage
@return string | Below is the the instruction that describes the task:
### Input:
Return the real key name in redis storage
@return string
### Response:
def get_real_stored_key(self, session_key):
"""Return the real key name in redis storage
@return string
"""
prefix = settings.SESSION_REDIS_PREFIX
if not prefix:
return session_key
return ':'.join([prefix, session_key]) |
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8') | Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str | Below is the the instruction that describes the task:
### Input:
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
### Response:
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8') |
def set_options(cls, obj, options=None, backend=None, **kwargs):
"""
Pure Python function for customize HoloViews objects in terms of
their style, plot and normalization options.
The options specification is a dictionary containing the target
for customization as a {type}.{group}.{label} keys. An example of
such a key is 'Image' which would customize all Image components
in the object. The key 'Image.Channel' would only customize Images
in the object that have the group 'Channel'.
The corresponding value is then a list of Option objects specified
with an appropriate category ('plot', 'style' or 'norm'). For
instance, using the keys described above, the specs could be:
{'Image:[Options('style', cmap='jet')]}
Or setting two types of option at once:
{'Image.Channel':[Options('plot', size=50),
Options('style', cmap='Blues')]}
Relationship to the %%opts magic
----------------------------------
This function matches the functionality supplied by the %%opts
cell magic in the IPython extension. In fact, you can use the same
syntax as the IPython cell magic to achieve the same customization
as shown above:
from holoviews.util.parser import OptsSpec
set_options(my_image, OptsSpec.parse("Image (cmap='jet')"))
Then setting both plot and style options:
set_options(my_image, OptsSpec.parse("Image [size=50] (cmap='Blues')"))
"""
# Note that an alternate, more verbose and less recommended
# syntax can also be used:
# {'Image.Channel:{'plot': Options(size=50),
# 'style': Options('style', cmap='Blues')]}
options = cls.merge_options(Store.options(backend=backend).groups.keys(), options, **kwargs)
spec, compositor_applied = cls.expand_compositor_keys(options)
custom_trees, id_mapping = cls.create_custom_trees(obj, spec)
cls.update_backends(id_mapping, custom_trees, backend=backend)
# Propagate ids to the objects
not_used = []
for (match_id, new_id) in id_mapping:
applied = cls.propagate_ids(obj, match_id, new_id, compositor_applied+list(spec.keys()), backend=backend)
if not applied:
not_used.append(new_id)
# Clean up unused custom option trees
for new_id in set(not_used):
cleanup_custom_options(new_id)
return obj | Pure Python function for customize HoloViews objects in terms of
their style, plot and normalization options.
The options specification is a dictionary containing the target
for customization as a {type}.{group}.{label} keys. An example of
such a key is 'Image' which would customize all Image components
in the object. The key 'Image.Channel' would only customize Images
in the object that have the group 'Channel'.
The corresponding value is then a list of Option objects specified
with an appropriate category ('plot', 'style' or 'norm'). For
instance, using the keys described above, the specs could be:
{'Image:[Options('style', cmap='jet')]}
Or setting two types of option at once:
{'Image.Channel':[Options('plot', size=50),
Options('style', cmap='Blues')]}
Relationship to the %%opts magic
----------------------------------
This function matches the functionality supplied by the %%opts
cell magic in the IPython extension. In fact, you can use the same
syntax as the IPython cell magic to achieve the same customization
as shown above:
from holoviews.util.parser import OptsSpec
set_options(my_image, OptsSpec.parse("Image (cmap='jet')"))
Then setting both plot and style options:
set_options(my_image, OptsSpec.parse("Image [size=50] (cmap='Blues')")) | Below is the the instruction that describes the task:
### Input:
Pure Python function for customize HoloViews objects in terms of
their style, plot and normalization options.
The options specification is a dictionary containing the target
for customization as a {type}.{group}.{label} keys. An example of
such a key is 'Image' which would customize all Image components
in the object. The key 'Image.Channel' would only customize Images
in the object that have the group 'Channel'.
The corresponding value is then a list of Option objects specified
with an appropriate category ('plot', 'style' or 'norm'). For
instance, using the keys described above, the specs could be:
{'Image:[Options('style', cmap='jet')]}
Or setting two types of option at once:
{'Image.Channel':[Options('plot', size=50),
Options('style', cmap='Blues')]}
Relationship to the %%opts magic
----------------------------------
This function matches the functionality supplied by the %%opts
cell magic in the IPython extension. In fact, you can use the same
syntax as the IPython cell magic to achieve the same customization
as shown above:
from holoviews.util.parser import OptsSpec
set_options(my_image, OptsSpec.parse("Image (cmap='jet')"))
Then setting both plot and style options:
set_options(my_image, OptsSpec.parse("Image [size=50] (cmap='Blues')"))
### Response:
def set_options(cls, obj, options=None, backend=None, **kwargs):
"""
Pure Python function for customize HoloViews objects in terms of
their style, plot and normalization options.
The options specification is a dictionary containing the target
for customization as a {type}.{group}.{label} keys. An example of
such a key is 'Image' which would customize all Image components
in the object. The key 'Image.Channel' would only customize Images
in the object that have the group 'Channel'.
The corresponding value is then a list of Option objects specified
with an appropriate category ('plot', 'style' or 'norm'). For
instance, using the keys described above, the specs could be:
{'Image:[Options('style', cmap='jet')]}
Or setting two types of option at once:
{'Image.Channel':[Options('plot', size=50),
Options('style', cmap='Blues')]}
Relationship to the %%opts magic
----------------------------------
This function matches the functionality supplied by the %%opts
cell magic in the IPython extension. In fact, you can use the same
syntax as the IPython cell magic to achieve the same customization
as shown above:
from holoviews.util.parser import OptsSpec
set_options(my_image, OptsSpec.parse("Image (cmap='jet')"))
Then setting both plot and style options:
set_options(my_image, OptsSpec.parse("Image [size=50] (cmap='Blues')"))
"""
# Note that an alternate, more verbose and less recommended
# syntax can also be used:
# {'Image.Channel:{'plot': Options(size=50),
# 'style': Options('style', cmap='Blues')]}
options = cls.merge_options(Store.options(backend=backend).groups.keys(), options, **kwargs)
spec, compositor_applied = cls.expand_compositor_keys(options)
custom_trees, id_mapping = cls.create_custom_trees(obj, spec)
cls.update_backends(id_mapping, custom_trees, backend=backend)
# Propagate ids to the objects
not_used = []
for (match_id, new_id) in id_mapping:
applied = cls.propagate_ids(obj, match_id, new_id, compositor_applied+list(spec.keys()), backend=backend)
if not applied:
not_used.append(new_id)
# Clean up unused custom option trees
for new_id in set(not_used):
cleanup_custom_options(new_id)
return obj |
def workflow_overwrite(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/overwrite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2Foverwrite
"""
return DXHTTPRequest('/%s/overwrite' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /workflow-xxxx/overwrite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2Foverwrite | Below is the the instruction that describes the task:
### Input:
Invokes the /workflow-xxxx/overwrite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2Foverwrite
### Response:
def workflow_overwrite(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/overwrite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2Foverwrite
"""
return DXHTTPRequest('/%s/overwrite' % object_id, input_params, always_retry=always_retry, **kwargs) |
def grids(fig=None, value='solid'):
"""Sets the value of the grid_lines for the axis to the passed value.
The default value is `solid`.
Parameters
----------
fig: Figure or None(default: None)
The figure for which the axes should be edited. If the value is None,
the current figure is used.
value: {'none', 'solid', 'dashed'}
The display of the grid_lines
"""
if fig is None:
fig = current_figure()
for a in fig.axes:
a.grid_lines = value | Sets the value of the grid_lines for the axis to the passed value.
The default value is `solid`.
Parameters
----------
fig: Figure or None(default: None)
The figure for which the axes should be edited. If the value is None,
the current figure is used.
value: {'none', 'solid', 'dashed'}
The display of the grid_lines | Below is the the instruction that describes the task:
### Input:
Sets the value of the grid_lines for the axis to the passed value.
The default value is `solid`.
Parameters
----------
fig: Figure or None(default: None)
The figure for which the axes should be edited. If the value is None,
the current figure is used.
value: {'none', 'solid', 'dashed'}
The display of the grid_lines
### Response:
def grids(fig=None, value='solid'):
"""Sets the value of the grid_lines for the axis to the passed value.
The default value is `solid`.
Parameters
----------
fig: Figure or None(default: None)
The figure for which the axes should be edited. If the value is None,
the current figure is used.
value: {'none', 'solid', 'dashed'}
The display of the grid_lines
"""
if fig is None:
fig = current_figure()
for a in fig.axes:
a.grid_lines = value |
def remove_field(self, field):
"""
Removes a field from this table
:param field: This can be a string of a field name, a dict of {'alias': field}, or
a ``Field`` instance
:type field: str or dict or :class:`Field <querybuilder.fields.Field>`
"""
new_field = FieldFactory(
field,
)
new_field.set_table(self)
new_field_identifier = new_field.get_identifier()
for field in self.fields:
if field.get_identifier() == new_field_identifier:
self.fields.remove(field)
return field
return None | Removes a field from this table
:param field: This can be a string of a field name, a dict of {'alias': field}, or
a ``Field`` instance
:type field: str or dict or :class:`Field <querybuilder.fields.Field>` | Below is the the instruction that describes the task:
### Input:
Removes a field from this table
:param field: This can be a string of a field name, a dict of {'alias': field}, or
a ``Field`` instance
:type field: str or dict or :class:`Field <querybuilder.fields.Field>`
### Response:
def remove_field(self, field):
"""
Removes a field from this table
:param field: This can be a string of a field name, a dict of {'alias': field}, or
a ``Field`` instance
:type field: str or dict or :class:`Field <querybuilder.fields.Field>`
"""
new_field = FieldFactory(
field,
)
new_field.set_table(self)
new_field_identifier = new_field.get_identifier()
for field in self.fields:
if field.get_identifier() == new_field_identifier:
self.fields.remove(field)
return field
return None |
def auth(self):
"""
tuple of (username, password). if use_keyring is set to true the password will be queried from the local keyring instead of taken from the
configuration file.
"""
username = self._settings["username"]
if not username:
raise ValueError("Username was not configured in %s" % CONFIG_FILE)
if self._settings["use_keyring"]:
password = self.keyring_get_password(username)
if not password:
self.keyring_set_password(username)
password = self.keyring_get_password(username)
else:
password = self._settings["password"]
return self._settings["username"], password | tuple of (username, password). if use_keyring is set to true the password will be queried from the local keyring instead of taken from the
configuration file. | Below is the the instruction that describes the task:
### Input:
tuple of (username, password). if use_keyring is set to true the password will be queried from the local keyring instead of taken from the
configuration file.
### Response:
def auth(self):
"""
tuple of (username, password). if use_keyring is set to true the password will be queried from the local keyring instead of taken from the
configuration file.
"""
username = self._settings["username"]
if not username:
raise ValueError("Username was not configured in %s" % CONFIG_FILE)
if self._settings["use_keyring"]:
password = self.keyring_get_password(username)
if not password:
self.keyring_set_password(username)
password = self.keyring_get_password(username)
else:
password = self._settings["password"]
return self._settings["username"], password |
def load_objective(config):
"""
Loads the objective function from a .json file.
"""
assert 'prjpath' in config
assert 'main-file' in config, "The problem file ('main-file') is missing!"
os.chdir(config['prjpath'])
if config['language'].lower()=='python':
assert config['main-file'].endswith('.py'), 'The python problem file has to end with .py!'
import imp
m = imp.load_source(config['main-file'][:-3], os.path.join(config['prjpath'],config['main-file']))
func = m.__dict__[config['main-file'][:-3]]
return func | Loads the objective function from a .json file. | Below is the the instruction that describes the task:
### Input:
Loads the objective function from a .json file.
### Response:
def load_objective(config):
"""
Loads the objective function from a .json file.
"""
assert 'prjpath' in config
assert 'main-file' in config, "The problem file ('main-file') is missing!"
os.chdir(config['prjpath'])
if config['language'].lower()=='python':
assert config['main-file'].endswith('.py'), 'The python problem file has to end with .py!'
import imp
m = imp.load_source(config['main-file'][:-3], os.path.join(config['prjpath'],config['main-file']))
func = m.__dict__[config['main-file'][:-3]]
return func |
def delete(self):
"""Delete the file."""
self.close()
if self.does_file_exist():
os.remove(self.path) | Delete the file. | Below is the the instruction that describes the task:
### Input:
Delete the file.
### Response:
def delete(self):
"""Delete the file."""
self.close()
if self.does_file_exist():
os.remove(self.path) |
def delete_value(self, key):
"""
Delete the key if the token is expired.
Arg:
key : cache key
"""
response = {}
response['status'] = False
response['msg'] = "key does not exist"
file_cache = self.read_file()
if key in file_cache:
del file_cache[key]
self.update_file(file_cache)
response['status'] = True
response['msg'] = "success"
return response | Delete the key if the token is expired.
Arg:
key : cache key | Below is the the instruction that describes the task:
### Input:
Delete the key if the token is expired.
Arg:
key : cache key
### Response:
def delete_value(self, key):
"""
Delete the key if the token is expired.
Arg:
key : cache key
"""
response = {}
response['status'] = False
response['msg'] = "key does not exist"
file_cache = self.read_file()
if key in file_cache:
del file_cache[key]
self.update_file(file_cache)
response['status'] = True
response['msg'] = "success"
return response |
def G(self, v, t):
"""Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0].
"""
ret = np.zeros((8, 1))
ret[4,0] = self.ke1 * self.He1 * self.u_sdev
ret[5,0] = self.ke2 * self.He2 * self.p_sdev
return ret | Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0]. | Below is the the instruction that describes the task:
### Input:
Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0].
### Response:
def G(self, v, t):
"""Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0].
"""
ret = np.zeros((8, 1))
ret[4,0] = self.ke1 * self.He1 * self.u_sdev
ret[5,0] = self.ke2 * self.He2 * self.p_sdev
return ret |
def service_status(self, name):
"""Pull the current status of a service by name.
Returns:
dict: A dictionary of service status
"""
return self._loop.run_coroutine(self._client.service_status(name)) | Pull the current status of a service by name.
Returns:
dict: A dictionary of service status | Below is the the instruction that describes the task:
### Input:
Pull the current status of a service by name.
Returns:
dict: A dictionary of service status
### Response:
def service_status(self, name):
"""Pull the current status of a service by name.
Returns:
dict: A dictionary of service status
"""
return self._loop.run_coroutine(self._client.service_status(name)) |
def is_same_day(self, dt):
"""
Checks if the passed in date is the same day
as the instance current day.
:type dt: DateTime or datetime or str or int
:rtype: bool
"""
dt = pendulum.instance(dt)
return self.to_date_string() == dt.to_date_string() | Checks if the passed in date is the same day
as the instance current day.
:type dt: DateTime or datetime or str or int
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Checks if the passed in date is the same day
as the instance current day.
:type dt: DateTime or datetime or str or int
:rtype: bool
### Response:
def is_same_day(self, dt):
"""
Checks if the passed in date is the same day
as the instance current day.
:type dt: DateTime or datetime or str or int
:rtype: bool
"""
dt = pendulum.instance(dt)
return self.to_date_string() == dt.to_date_string() |
def handle_authorized_event(self, event):
"""Request roster upon login."""
self.server = event.authorized_jid.bare()
if "versioning" in self.server_features:
if self.roster is not None and self.roster.version is not None:
version = self.roster.version
else:
version = u""
else:
version = None
self.request_roster(version) | Request roster upon login. | Below is the the instruction that describes the task:
### Input:
Request roster upon login.
### Response:
def handle_authorized_event(self, event):
"""Request roster upon login."""
self.server = event.authorized_jid.bare()
if "versioning" in self.server_features:
if self.roster is not None and self.roster.version is not None:
version = self.roster.version
else:
version = u""
else:
version = None
self.request_roster(version) |
def _parse_members(self, contents, module):
"""Extracts any module-level members from the code. They must appear before
any type declalations."""
#We need to get hold of the text before the module's main CONTAINS keyword
#so that we don't find variables from executables and claim them as
#belonging to the module.
icontains = module.contains_index
ichar = module.charindex(icontains, 0)
module.preamble = module.refstring[:ichar]
#Get a dictionary of all the members in this module body
#We only want to look at variable definitions before the first type
lowest = ichar
remove = [] #Will use later below, see next comment
for t in module.types:
remove.append((module.types[t].start, module.types[t].end))
if module.types[t].start < lowest:
lowest = module.types[t].start
module.members.update(self.vparser.parse(contents[:lowest-(module.start + 10 + len(module.name))], module))
#The docstrings for these members will appear as member tags in the same
#preamble text. We can't use the entire preamble for this because member
#docs inside of a type declaration will show up as belonging to the
#module, when in fact, they don't.
remove.sort(key=lambda tup: tup[0])
retain = []
cur_end = 0
for rem in remove:
signature = module.refstring[rem[0]+1:rem[1]].index("\n") + 2
keep = module.refstring[cur_end:rem[0] + signature]
cur_end = rem[1]
retain.append(keep)
#If there weren't any types in the module, we still want to get at the docs in
#the preamble.
if len(remove) == 0:
retain = module.preamble
docsearch = "".join(retain)
module.predocs = self.docparser.parse_docs(docsearch, module)
if module.name in module.predocs:
#We can only do member docstrings if the module had internal docstrings
#that may to members.
memdocs = self.docparser.to_doc(module.predocs[module.name][0], module.name)
remainingdocs = self.docparser.process_memberdocs(memdocs, module)
module.predocs[module.name] = remainingdocs | Extracts any module-level members from the code. They must appear before
any type declalations. | Below is the the instruction that describes the task:
### Input:
Extracts any module-level members from the code. They must appear before
any type declalations.
### Response:
def _parse_members(self, contents, module):
"""Extracts any module-level members from the code. They must appear before
any type declalations."""
#We need to get hold of the text before the module's main CONTAINS keyword
#so that we don't find variables from executables and claim them as
#belonging to the module.
icontains = module.contains_index
ichar = module.charindex(icontains, 0)
module.preamble = module.refstring[:ichar]
#Get a dictionary of all the members in this module body
#We only want to look at variable definitions before the first type
lowest = ichar
remove = [] #Will use later below, see next comment
for t in module.types:
remove.append((module.types[t].start, module.types[t].end))
if module.types[t].start < lowest:
lowest = module.types[t].start
module.members.update(self.vparser.parse(contents[:lowest-(module.start + 10 + len(module.name))], module))
#The docstrings for these members will appear as member tags in the same
#preamble text. We can't use the entire preamble for this because member
#docs inside of a type declaration will show up as belonging to the
#module, when in fact, they don't.
remove.sort(key=lambda tup: tup[0])
retain = []
cur_end = 0
for rem in remove:
signature = module.refstring[rem[0]+1:rem[1]].index("\n") + 2
keep = module.refstring[cur_end:rem[0] + signature]
cur_end = rem[1]
retain.append(keep)
#If there weren't any types in the module, we still want to get at the docs in
#the preamble.
if len(remove) == 0:
retain = module.preamble
docsearch = "".join(retain)
module.predocs = self.docparser.parse_docs(docsearch, module)
if module.name in module.predocs:
#We can only do member docstrings if the module had internal docstrings
#that may to members.
memdocs = self.docparser.to_doc(module.predocs[module.name][0], module.name)
remainingdocs = self.docparser.process_memberdocs(memdocs, module)
module.predocs[module.name] = remainingdocs |
def open(self):
"""Opens a SSH connection with a Pluribus machine."""
self._connection = paramiko.SSHClient()
self._connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self._connection.connect(hostname=self._hostname,
username=self._username,
password=self._password,
timeout=self._timeout,
port=self._port)
self._connection.get_transport().set_keepalive(self._keepalive)
self.connected = True
self.config = PluribusConfig(self)
except paramiko.ssh_exception.AuthenticationException:
raise pyPluribus.exceptions.ConnectionError("Unable to open connection with {hostname}: \
invalid credentials!".format(hostname=self._hostname))
except socket_error as sockerr:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {skterr}. \
Wrong port?".format(skterr=sockerr.message))
except socket_gaierror as sockgai:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {gaierr}. \
Wrong hostname?".format(gaierr=sockgai.message)) | Opens a SSH connection with a Pluribus machine. | Below is the the instruction that describes the task:
### Input:
Opens a SSH connection with a Pluribus machine.
### Response:
def open(self):
"""Opens a SSH connection with a Pluribus machine."""
self._connection = paramiko.SSHClient()
self._connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self._connection.connect(hostname=self._hostname,
username=self._username,
password=self._password,
timeout=self._timeout,
port=self._port)
self._connection.get_transport().set_keepalive(self._keepalive)
self.connected = True
self.config = PluribusConfig(self)
except paramiko.ssh_exception.AuthenticationException:
raise pyPluribus.exceptions.ConnectionError("Unable to open connection with {hostname}: \
invalid credentials!".format(hostname=self._hostname))
except socket_error as sockerr:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {skterr}. \
Wrong port?".format(skterr=sockerr.message))
except socket_gaierror as sockgai:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {gaierr}. \
Wrong hostname?".format(gaierr=sockgai.message)) |
def UsersChangePassword (self, current_password, new_password):
"""
Change the password for the current user
@param current_password (string) - md5 hash of the current password of the user
@param new_password (string) - md5 hash of the new password of the user (make sure to doublecheck!)
@return (bool) - Boolean indicating whether ChangePassword was successful.
"""
if self.__SenseApiCall__('/change_password', "POST", {"current_password":current_password, "new_password":new_password}):
return True
else:
self.__error__ = "api call unsuccessful"
return False | Change the password for the current user
@param current_password (string) - md5 hash of the current password of the user
@param new_password (string) - md5 hash of the new password of the user (make sure to doublecheck!)
@return (bool) - Boolean indicating whether ChangePassword was successful. | Below is the the instruction that describes the task:
### Input:
Change the password for the current user
@param current_password (string) - md5 hash of the current password of the user
@param new_password (string) - md5 hash of the new password of the user (make sure to doublecheck!)
@return (bool) - Boolean indicating whether ChangePassword was successful.
### Response:
def UsersChangePassword (self, current_password, new_password):
"""
Change the password for the current user
@param current_password (string) - md5 hash of the current password of the user
@param new_password (string) - md5 hash of the new password of the user (make sure to doublecheck!)
@return (bool) - Boolean indicating whether ChangePassword was successful.
"""
if self.__SenseApiCall__('/change_password', "POST", {"current_password":current_password, "new_password":new_password}):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
def target_sequence(self):
# type: () -> SeqRecord
"""Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence).
"""
if self.cutter.is_3overhang():
start, end = self._match.span(2)[0], self._match.span(3)[1]
else:
start, end = self._match.span(1)[0], self._match.span(2)[1]
return add_as_source(self.record, (self.record << start)[end - start :]) | Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence). | Below is the the instruction that describes the task:
### Input:
Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence).
### Response:
def target_sequence(self):
# type: () -> SeqRecord
"""Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence).
"""
if self.cutter.is_3overhang():
start, end = self._match.span(2)[0], self._match.span(3)[1]
else:
start, end = self._match.span(1)[0], self._match.span(2)[1]
return add_as_source(self.record, (self.record << start)[end - start :]) |
def remove_specification(self, name):
""" Remove a specification that matches a query parameter. No checks for the specified or any parameter
are made regarding specification removing
:param name: parameter name to remove
:return: None
"""
if name in self.__specs:
self.__specs.pop(name) | Remove a specification that matches a query parameter. No checks for the specified or any parameter
are made regarding specification removing
:param name: parameter name to remove
:return: None | Below is the the instruction that describes the task:
### Input:
Remove a specification that matches a query parameter. No checks for the specified or any parameter
are made regarding specification removing
:param name: parameter name to remove
:return: None
### Response:
def remove_specification(self, name):
""" Remove a specification that matches a query parameter. No checks for the specified or any parameter
are made regarding specification removing
:param name: parameter name to remove
:return: None
"""
if name in self.__specs:
self.__specs.pop(name) |
def image_to_rgb(self, path, n=10):
"""
Returns a list of colors based on pixel values in the image.
The Core Image library must be present to determine pixel colors.
F. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05
"""
from PIL import Image
img = Image.open(path)
p = img.getdata()
f = lambda p: choice(p)
for i in _range(n):
rgba = f(p)
rgba = _list(rgba)
if len(rgba) == 3:
rgba.append(255)
r, g, b, a = [v / 255.0 for v in rgba]
clr = color(r, g, b, a, mode="rgb")
self.append(clr) | Returns a list of colors based on pixel values in the image.
The Core Image library must be present to determine pixel colors.
F. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05 | Below is the the instruction that describes the task:
### Input:
Returns a list of colors based on pixel values in the image.
The Core Image library must be present to determine pixel colors.
F. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05
### Response:
def image_to_rgb(self, path, n=10):
"""
Returns a list of colors based on pixel values in the image.
The Core Image library must be present to determine pixel colors.
F. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05
"""
from PIL import Image
img = Image.open(path)
p = img.getdata()
f = lambda p: choice(p)
for i in _range(n):
rgba = f(p)
rgba = _list(rgba)
if len(rgba) == 3:
rgba.append(255)
r, g, b, a = [v / 255.0 for v in rgba]
clr = color(r, g, b, a, mode="rgb")
self.append(clr) |
def expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None:
"""Add causal edges between entities in the sub-graph.
Is an extremely thin wrapper around :func:`expand_internal`.
:param universe: A BEL graph representing the universe of all knowledge
:param graph: The target BEL graph to enrich with causal relations between contained nodes
Equivalent to:
>>> from pybel_tools.mutation import expand_internal
>>> from pybel.struct.filters.edge_predicates import is_causal_relation
>>> expand_internal(universe, graph, edge_predicates=is_causal_relation)
"""
expand_internal(universe, graph, edge_predicates=is_causal_relation) | Add causal edges between entities in the sub-graph.
Is an extremely thin wrapper around :func:`expand_internal`.
:param universe: A BEL graph representing the universe of all knowledge
:param graph: The target BEL graph to enrich with causal relations between contained nodes
Equivalent to:
>>> from pybel_tools.mutation import expand_internal
>>> from pybel.struct.filters.edge_predicates import is_causal_relation
>>> expand_internal(universe, graph, edge_predicates=is_causal_relation) | Below is the the instruction that describes the task:
### Input:
Add causal edges between entities in the sub-graph.
Is an extremely thin wrapper around :func:`expand_internal`.
:param universe: A BEL graph representing the universe of all knowledge
:param graph: The target BEL graph to enrich with causal relations between contained nodes
Equivalent to:
>>> from pybel_tools.mutation import expand_internal
>>> from pybel.struct.filters.edge_predicates import is_causal_relation
>>> expand_internal(universe, graph, edge_predicates=is_causal_relation)
### Response:
def expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None:
"""Add causal edges between entities in the sub-graph.
Is an extremely thin wrapper around :func:`expand_internal`.
:param universe: A BEL graph representing the universe of all knowledge
:param graph: The target BEL graph to enrich with causal relations between contained nodes
Equivalent to:
>>> from pybel_tools.mutation import expand_internal
>>> from pybel.struct.filters.edge_predicates import is_causal_relation
>>> expand_internal(universe, graph, edge_predicates=is_causal_relation)
"""
expand_internal(universe, graph, edge_predicates=is_causal_relation) |
def get_action_by_id(self, action_id):
""" Получение детального описания события
"""
parsed_json, raw_json = self._call('getactionbyid', element_id=action_id)
action = Action.object_from_api(parsed_json, raw_json)
return action | Получение детального описания события | Below is the the instruction that describes the task:
### Input:
Получение детального описания события
### Response:
def get_action_by_id(self, action_id):
""" Получение детального описания события
"""
parsed_json, raw_json = self._call('getactionbyid', element_id=action_id)
action = Action.object_from_api(parsed_json, raw_json)
return action |
def find_state_op_colocation_error(graph, reported_tags=None):
"""Returns error message for colocation of state ops, or None if ok."""
state_op_types = list_registered_stateful_ops_without_inputs()
state_op_map = {op.name: op for op in graph.get_operations()
if op.type in state_op_types}
for op in state_op_map.values():
for colocation_group in op.colocation_groups():
if not (colocation_group.startswith(tf.compat.as_bytes("loc:@")) and
tf.compat.as_str_any(colocation_group[5:]) in state_op_map):
tags_prefix = ("" if reported_tags is None else
"in the graph for tags %s, " % reported_tags)
return (
"A state-holding node x of a module's graph (e.g., a Variable op) "
"must not be subject to a tf.colocate_with(y) constraint "
"unless y is also a state-holding node.\n"
"Details: %snode '%s' has op '%s', which counts as state-holding, "
"but Operation.colocation_groups() == %s. " %
(tags_prefix, op.name, op.type, op.colocation_groups()))
return None | Returns error message for colocation of state ops, or None if ok. | Below is the the instruction that describes the task:
### Input:
Returns error message for colocation of state ops, or None if ok.
### Response:
def find_state_op_colocation_error(graph, reported_tags=None):
"""Returns error message for colocation of state ops, or None if ok."""
state_op_types = list_registered_stateful_ops_without_inputs()
state_op_map = {op.name: op for op in graph.get_operations()
if op.type in state_op_types}
for op in state_op_map.values():
for colocation_group in op.colocation_groups():
if not (colocation_group.startswith(tf.compat.as_bytes("loc:@")) and
tf.compat.as_str_any(colocation_group[5:]) in state_op_map):
tags_prefix = ("" if reported_tags is None else
"in the graph for tags %s, " % reported_tags)
return (
"A state-holding node x of a module's graph (e.g., a Variable op) "
"must not be subject to a tf.colocate_with(y) constraint "
"unless y is also a state-holding node.\n"
"Details: %snode '%s' has op '%s', which counts as state-holding, "
"but Operation.colocation_groups() == %s. " %
(tags_prefix, op.name, op.type, op.colocation_groups()))
return None |
def parse(msg):
"""
Helper method for parsing a Mongrel2 request string and returning a new
`MongrelRequest` instance.
"""
sender, conn_id, path, rest = msg.split(' ', 3)
headers, rest = tnetstring.pop(rest)
body, _ = tnetstring.pop(rest)
if type(headers) is str:
headers = json.loads(headers)
return MongrelRequest(sender, conn_id, path, headers, body) | Helper method for parsing a Mongrel2 request string and returning a new
`MongrelRequest` instance. | Below is the the instruction that describes the task:
### Input:
Helper method for parsing a Mongrel2 request string and returning a new
`MongrelRequest` instance.
### Response:
def parse(msg):
"""
Helper method for parsing a Mongrel2 request string and returning a new
`MongrelRequest` instance.
"""
sender, conn_id, path, rest = msg.split(' ', 3)
headers, rest = tnetstring.pop(rest)
body, _ = tnetstring.pop(rest)
if type(headers) is str:
headers = json.loads(headers)
return MongrelRequest(sender, conn_id, path, headers, body) |
def load_secrets(self, secret_path):
"""render secrets into config object"""
self._config = p_config.render_secrets(self.config_path, secret_path) | render secrets into config object | Below is the the instruction that describes the task:
### Input:
render secrets into config object
### Response:
def load_secrets(self, secret_path):
"""render secrets into config object"""
self._config = p_config.render_secrets(self.config_path, secret_path) |
def fix_timezone(df, freq, tz=None):
""" set timezone for pandas """
index_name = df.index.name
# fix timezone
if isinstance(df.index[0], str):
# timezone df exists
if ("-" in df.index[0][-6:]) | ("+" in df.index[0][-6:]):
df.index = pd.to_datetime(df.index, utc=False)
df.index = df.index.tz_localize('UTC').tz_convert(tz)
# no timezone df - do some resampling
else:
# original range
start_range = df.index[0]
end_range = df.index[-1]
# resample df
df.index = pd.to_datetime(df.index, utc=True)
df = resample(df, resolution=freq, ffill=False, dropna=False)
# create date range
new_freq = ''.join(i for i in freq if not i.isdigit())
rng = pd.date_range(start=start_range,
end=end_range, tz=tz, freq=new_freq)
# assign date range to df and drop empty rows
df.index = rng
df.dropna(inplace=True)
# finalize timezone (also for timezone-aware df)
df = set_timezone(df, tz=tz)
df.index.name = index_name
return df | set timezone for pandas | Below is the the instruction that describes the task:
### Input:
set timezone for pandas
### Response:
def fix_timezone(df, freq, tz=None):
""" set timezone for pandas """
index_name = df.index.name
# fix timezone
if isinstance(df.index[0], str):
# timezone df exists
if ("-" in df.index[0][-6:]) | ("+" in df.index[0][-6:]):
df.index = pd.to_datetime(df.index, utc=False)
df.index = df.index.tz_localize('UTC').tz_convert(tz)
# no timezone df - do some resampling
else:
# original range
start_range = df.index[0]
end_range = df.index[-1]
# resample df
df.index = pd.to_datetime(df.index, utc=True)
df = resample(df, resolution=freq, ffill=False, dropna=False)
# create date range
new_freq = ''.join(i for i in freq if not i.isdigit())
rng = pd.date_range(start=start_range,
end=end_range, tz=tz, freq=new_freq)
# assign date range to df and drop empty rows
df.index = rng
df.dropna(inplace=True)
# finalize timezone (also for timezone-aware df)
df = set_timezone(df, tz=tz)
df.index.name = index_name
return df |
def set_annotation(self):
"""Appends the context's ``pending_symbol`` to its ``annotations`` sequence."""
assert self.pending_symbol is not None
assert not self.value
annotations = (_as_symbol(self.pending_symbol, is_symbol_value=False),) # pending_symbol becomes an annotation
self.annotations = annotations if not self.annotations else self.annotations + annotations
self.ion_type = None
self.pending_symbol = None # reset pending symbol
self.quoted_text = False
self.line_comment = False
self.is_self_delimiting = False
return self | Appends the context's ``pending_symbol`` to its ``annotations`` sequence. | Below is the the instruction that describes the task:
### Input:
Appends the context's ``pending_symbol`` to its ``annotations`` sequence.
### Response:
def set_annotation(self):
"""Appends the context's ``pending_symbol`` to its ``annotations`` sequence."""
assert self.pending_symbol is not None
assert not self.value
annotations = (_as_symbol(self.pending_symbol, is_symbol_value=False),) # pending_symbol becomes an annotation
self.annotations = annotations if not self.annotations else self.annotations + annotations
self.ion_type = None
self.pending_symbol = None # reset pending symbol
self.quoted_text = False
self.line_comment = False
self.is_self_delimiting = False
return self |
def best_fit_plane(self):
"""Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane.
"""
X = np.c_[self.x_coords, self.y_coords, np.ones(self.num_points)]
y = self.z_coords
A = X.T.dot(X)
b = X.T.dot(y)
w = np.linalg.inv(A).dot(b)
n = np.array([w[0], w[1], -1])
n = n / np.linalg.norm(n)
n = Direction(n, self._frame)
x0 = self.mean()
return n, x0 | Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane. | Below is the the instruction that describes the task:
### Input:
Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane.
### Response:
def best_fit_plane(self):
"""Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane.
"""
X = np.c_[self.x_coords, self.y_coords, np.ones(self.num_points)]
y = self.z_coords
A = X.T.dot(X)
b = X.T.dot(y)
w = np.linalg.inv(A).dot(b)
n = np.array([w[0], w[1], -1])
n = n / np.linalg.norm(n)
n = Direction(n, self._frame)
x0 = self.mean()
return n, x0 |
def sargasso_chart (self):
""" Make the sargasso plot """
# Config for the plot
config = {
'id': 'sargasso_assignment_plot',
'title': 'Sargasso: Assigned Reads',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
#We only want to plot the READs at the moment
return bargraph.plot(self.sargasso_data, [name for name in self.sargasso_keys if 'Reads' in name], config) | Make the sargasso plot | Below is the the instruction that describes the task:
### Input:
Make the sargasso plot
### Response:
def sargasso_chart (self):
""" Make the sargasso plot """
# Config for the plot
config = {
'id': 'sargasso_assignment_plot',
'title': 'Sargasso: Assigned Reads',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
#We only want to plot the READs at the moment
return bargraph.plot(self.sargasso_data, [name for name in self.sargasso_keys if 'Reads' in name], config) |
def __makeShowColumnFunction(self, column_idx):
""" Creates a function that shows or hides a column."""
show_column = lambda checked: self.setColumnHidden(column_idx, not checked)
return show_column | Creates a function that shows or hides a column. | Below is the the instruction that describes the task:
### Input:
Creates a function that shows or hides a column.
### Response:
def __makeShowColumnFunction(self, column_idx):
""" Creates a function that shows or hides a column."""
show_column = lambda checked: self.setColumnHidden(column_idx, not checked)
return show_column |
def log(self, x, base=2):
"""Computes the logarithm of x with the given base
(the default base is 2)."""
return self._format_result(log(float(x), base)) | Computes the logarithm of x with the given base
(the default base is 2). | Below is the the instruction that describes the task:
### Input:
Computes the logarithm of x with the given base
(the default base is 2).
### Response:
def log(self, x, base=2):
"""Computes the logarithm of x with the given base
(the default base is 2)."""
return self._format_result(log(float(x), base)) |
def get_all_firmwares(self, filter='', start=0, count=-1, query='', sort=''):
"""
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
"""
uri = self.URI + "/*/firmware"
return self._helper.get_all(start, count, filter, query, sort, '', '', uri) | Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory. | Below is the the instruction that describes the task:
### Input:
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
### Response:
def get_all_firmwares(self, filter='', start=0, count=-1, query='', sort=''):
"""
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
"""
uri = self.URI + "/*/firmware"
return self._helper.get_all(start, count, filter, query, sort, '', '', uri) |
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn) | Return data to an odbc server | Below is the the instruction that describes the task:
### Input:
Return data to an odbc server
### Response:
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn) |
def enter_maintenance_mode(self):
"""
Put the service in maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('enterMaintenanceMode')
if cmd.success:
self._update(_get_service(self._get_resource_root(), self._path()))
return cmd | Put the service in maintenance mode.
@return: Reference to the completed command.
@since: API v2 | Below is the the instruction that describes the task:
### Input:
Put the service in maintenance mode.
@return: Reference to the completed command.
@since: API v2
### Response:
def enter_maintenance_mode(self):
"""
Put the service in maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('enterMaintenanceMode')
if cmd.success:
self._update(_get_service(self._get_resource_root(), self._path()))
return cmd |
def do_bucket(self, count=1):
'''Set self.bucket and return results.
:param count: Number of rolls to make
:return: List of tuples (total of roll, times it was rolled)
'''
self._bucket = dict()
for roll in self.roll.roll(count):
self._bucket[roll] = self._bucket.get(roll, 0) + 1
return self.bucket | Set self.bucket and return results.
:param count: Number of rolls to make
:return: List of tuples (total of roll, times it was rolled) | Below is the the instruction that describes the task:
### Input:
Set self.bucket and return results.
:param count: Number of rolls to make
:return: List of tuples (total of roll, times it was rolled)
### Response:
def do_bucket(self, count=1):
'''Set self.bucket and return results.
:param count: Number of rolls to make
:return: List of tuples (total of roll, times it was rolled)
'''
self._bucket = dict()
for roll in self.roll.roll(count):
self._bucket[roll] = self._bucket.get(roll, 0) + 1
return self.bucket |
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root | Returns the root directory for the specified drive, creating
it if necessary. | Below is the the instruction that describes the task:
### Input:
Returns the root directory for the specified drive, creating
it if necessary.
### Response:
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root |
def enqueue(self, destination):
"""Enqueues given destination for processing.
Given instance should be a valid destination.
"""
if not destination:
raise BgpProcessorError('Invalid destination %s.' % destination)
dest_queue = self._dest_queue
# RtDest are queued in a separate queue
if destination.route_family == RF_RTC_UC:
dest_queue = self._rtdest_queue
# We do not add given destination to the queue for processing if
# it is already on the queue.
if not dest_queue.is_on_list(destination):
dest_queue.append(destination)
# Wake-up processing thread if sleeping.
self.dest_que_evt.set() | Enqueues given destination for processing.
Given instance should be a valid destination. | Below is the the instruction that describes the task:
### Input:
Enqueues given destination for processing.
Given instance should be a valid destination.
### Response:
def enqueue(self, destination):
"""Enqueues given destination for processing.
Given instance should be a valid destination.
"""
if not destination:
raise BgpProcessorError('Invalid destination %s.' % destination)
dest_queue = self._dest_queue
# RtDest are queued in a separate queue
if destination.route_family == RF_RTC_UC:
dest_queue = self._rtdest_queue
# We do not add given destination to the queue for processing if
# it is already on the queue.
if not dest_queue.is_on_list(destination):
dest_queue.append(destination)
# Wake-up processing thread if sleeping.
self.dest_que_evt.set() |
def get_build_params(metadata):
'''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
'''
params = dict()
for item in metadata:
if item['value'] == None:
response = get_build_metadata(key=item['key'])
item['value'] = response
params[item['key']] = item['value']
if item['key'] not in ['token', 'secret', 'credential']:
bot.info('%s is set to %s' %(item['key'],item['value']))
return params | get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }] | Below is the the instruction that describes the task:
### Input:
get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
### Response:
def get_build_params(metadata):
'''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
'''
params = dict()
for item in metadata:
if item['value'] == None:
response = get_build_metadata(key=item['key'])
item['value'] = response
params[item['key']] = item['value']
if item['key'] not in ['token', 'secret', 'credential']:
bot.info('%s is set to %s' %(item['key'],item['value']))
return params |
def inverse_transform(self, sequences):
"""Transform a list of sequences from internal indexing into
labels
Parameters
----------
sequences : list
List of sequences, each of which is one-dimensional array of
integers in ``0, ..., n_states_ - 1``.
Returns
-------
sequences : list
List of sequences, each of which is one-dimensional array
of labels.
"""
sequences = list_of_1d(sequences)
inverse_mapping = {v: k for k, v in self.mapping_.items()}
f = np.vectorize(inverse_mapping.get)
result = []
for y in sequences:
uq = np.unique(y)
if not np.all(np.logical_and(0 <= uq, uq < self.n_states_)):
raise ValueError('sequence must be between 0 and n_states-1')
result.append(f(y))
return result | Transform a list of sequences from internal indexing into
labels
Parameters
----------
sequences : list
List of sequences, each of which is one-dimensional array of
integers in ``0, ..., n_states_ - 1``.
Returns
-------
sequences : list
List of sequences, each of which is one-dimensional array
of labels. | Below is the the instruction that describes the task:
### Input:
Transform a list of sequences from internal indexing into
labels
Parameters
----------
sequences : list
List of sequences, each of which is one-dimensional array of
integers in ``0, ..., n_states_ - 1``.
Returns
-------
sequences : list
List of sequences, each of which is one-dimensional array
of labels.
### Response:
def inverse_transform(self, sequences):
"""Transform a list of sequences from internal indexing into
labels
Parameters
----------
sequences : list
List of sequences, each of which is one-dimensional array of
integers in ``0, ..., n_states_ - 1``.
Returns
-------
sequences : list
List of sequences, each of which is one-dimensional array
of labels.
"""
sequences = list_of_1d(sequences)
inverse_mapping = {v: k for k, v in self.mapping_.items()}
f = np.vectorize(inverse_mapping.get)
result = []
for y in sequences:
uq = np.unique(y)
if not np.all(np.logical_and(0 <= uq, uq < self.n_states_)):
raise ValueError('sequence must be between 0 and n_states-1')
result.append(f(y))
return result |
def col_iscat(df,col_name = None):
""" Returns a list of columns that are of type 'category'. If col_name is specified, returns
whether the column in the DataFrame is of type 'category' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'category'
"""
col_list = df.select_dtypes(include = 'category').columns
if col_name is None:
return col_list
else:
return col_name in col_list | Returns a list of columns that are of type 'category'. If col_name is specified, returns
whether the column in the DataFrame is of type 'category' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'category' | Below is the the instruction that describes the task:
### Input:
Returns a list of columns that are of type 'category'. If col_name is specified, returns
whether the column in the DataFrame is of type 'category' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'category'
### Response:
def col_iscat(df,col_name = None):
""" Returns a list of columns that are of type 'category'. If col_name is specified, returns
whether the column in the DataFrame is of type 'category' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'category'
"""
col_list = df.select_dtypes(include = 'category').columns
if col_name is None:
return col_list
else:
return col_name in col_list |
def nsorted(
to_sort: Iterable[str],
key: Optional[Callable[[str], Any]] = None
) -> List[str]:
"""Returns a naturally sorted list"""
if key is None:
key_callback = _natural_keys
else:
def key_callback(text: str) -> List[Any]:
return _natural_keys(key(text)) # type: ignore
return sorted(to_sort, key=key_callback) | Returns a naturally sorted list | Below is the the instruction that describes the task:
### Input:
Returns a naturally sorted list
### Response:
def nsorted(
to_sort: Iterable[str],
key: Optional[Callable[[str], Any]] = None
) -> List[str]:
"""Returns a naturally sorted list"""
if key is None:
key_callback = _natural_keys
else:
def key_callback(text: str) -> List[Any]:
return _natural_keys(key(text)) # type: ignore
return sorted(to_sort, key=key_callback) |
def content_to_html(content, article_id):
"""Returns artilce/page content as HTML"""
def render_node(html, node, index):
"""Renders node as HTML"""
if node['type'] == 'paragraph':
return html + '<p>%s</p>' % node['data']
else:
if node['type'] == 'ad':
id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id)
dfp_type = 'Intra_Article_' + str(index + 1)
size = 'banner'
if node['data'] == 'mobile':
size = 'box'
newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>'
return html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString
try:
if node['type'] == 'poll':
node['type'] = 'widget'
node['data']['data'] = node['data']
return html + embeds.render(node['type'], node['data'])
except EmbedException:
return html
html = ''
index = 0
for node in content:
html = render_node(html, node, index)
if (node['type'] == 'ad'):
index += 1
# return mark_safe(reduce(render_node, content, ''))
return mark_safe(html) | Returns artilce/page content as HTML | Below is the the instruction that describes the task:
### Input:
Returns artilce/page content as HTML
### Response:
def content_to_html(content, article_id):
"""Returns artilce/page content as HTML"""
def render_node(html, node, index):
"""Renders node as HTML"""
if node['type'] == 'paragraph':
return html + '<p>%s</p>' % node['data']
else:
if node['type'] == 'ad':
id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id)
dfp_type = 'Intra_Article_' + str(index + 1)
size = 'banner'
if node['data'] == 'mobile':
size = 'box'
newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>'
return html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString
try:
if node['type'] == 'poll':
node['type'] = 'widget'
node['data']['data'] = node['data']
return html + embeds.render(node['type'], node['data'])
except EmbedException:
return html
html = ''
index = 0
for node in content:
html = render_node(html, node, index)
if (node['type'] == 'ad'):
index += 1
# return mark_safe(reduce(render_node, content, ''))
return mark_safe(html) |
def _set_tpvm(self, v, load=False):
"""
Setter method for tpvm, mapped from YANG variable /tpvm (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tpvm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tpvm() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tpvm.tpvm, is_container='container', presence=False, yang_name="tpvm", rest_name="tpvm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TPVM administration command', u'action': u'stop'}}, namespace='urn:brocade.com:mgmt:brocade-tpvm', defining_module='brocade-tpvm', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tpvm must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tpvm.tpvm, is_container='container', presence=False, yang_name="tpvm", rest_name="tpvm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TPVM administration command', u'action': u'stop'}}, namespace='urn:brocade.com:mgmt:brocade-tpvm', defining_module='brocade-tpvm', yang_type='container', is_config=True)""",
})
self.__tpvm = t
if hasattr(self, '_set'):
self._set() | Setter method for tpvm, mapped from YANG variable /tpvm (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tpvm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tpvm() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for tpvm, mapped from YANG variable /tpvm (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tpvm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tpvm() directly.
### Response:
def _set_tpvm(self, v, load=False):
"""
Setter method for tpvm, mapped from YANG variable /tpvm (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tpvm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tpvm() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tpvm.tpvm, is_container='container', presence=False, yang_name="tpvm", rest_name="tpvm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TPVM administration command', u'action': u'stop'}}, namespace='urn:brocade.com:mgmt:brocade-tpvm', defining_module='brocade-tpvm', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tpvm must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tpvm.tpvm, is_container='container', presence=False, yang_name="tpvm", rest_name="tpvm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TPVM administration command', u'action': u'stop'}}, namespace='urn:brocade.com:mgmt:brocade-tpvm', defining_module='brocade-tpvm', yang_type='container', is_config=True)""",
})
self.__tpvm = t
if hasattr(self, '_set'):
self._set() |
def loaddata(self, path, site=None):
"""
Runs the Dango loaddata management command.
By default, runs on only the current site.
Pass site=all to run on all sites.
"""
site = site or self.genv.SITE
r = self.local_renderer
r.env._loaddata_path = path
for _site, site_data in self.iter_sites(site=site, no_secure=True):
try:
self.set_db(site=_site)
r.env.SITE = _site
r.sudo('export SITE={SITE}; export ROLE={ROLE}; '
'cd {project_dir}; '
'{manage_cmd} loaddata {_loaddata_path}')
except KeyError:
pass | Runs the Dango loaddata management command.
By default, runs on only the current site.
Pass site=all to run on all sites. | Below is the the instruction that describes the task:
### Input:
Runs the Dango loaddata management command.
By default, runs on only the current site.
Pass site=all to run on all sites.
### Response:
def loaddata(self, path, site=None):
"""
Runs the Dango loaddata management command.
By default, runs on only the current site.
Pass site=all to run on all sites.
"""
site = site or self.genv.SITE
r = self.local_renderer
r.env._loaddata_path = path
for _site, site_data in self.iter_sites(site=site, no_secure=True):
try:
self.set_db(site=_site)
r.env.SITE = _site
r.sudo('export SITE={SITE}; export ROLE={ROLE}; '
'cd {project_dir}; '
'{manage_cmd} loaddata {_loaddata_path}')
except KeyError:
pass |
def start(self, text=None):
"""Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
"""
if text is not None:
self.text = text
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide(stream=self._stream)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self | Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
### Response:
def start(self, text=None):
"""Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
"""
if text is not None:
self.text = text
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide(stream=self._stream)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self |
def printo(msg, encoding=None, errors='replace', std_type='stdout'):
"""Write msg on stdout. If no encoding is specified
the detected encoding of stdout is used. If the encoding
can't encode some chars they are replaced by '?'
:param msg: message
:type msg: unicode on python2 | str on python3
"""
std = getattr(sys, std_type, sys.stdout)
if encoding is None:
try:
encoding = std.encoding
except AttributeError:
encoding = None
# Fallback to ascii if no encoding is found
if encoding is None:
encoding = 'ascii'
# https://docs.python.org/3/library/sys.html#sys.stdout
# write in the binary buffer directly in python3
if hasattr(std, 'buffer'):
std = std.buffer
std.write(msg.encode(encoding, errors=errors))
std.write(b'\n')
std.flush() | Write msg on stdout. If no encoding is specified
the detected encoding of stdout is used. If the encoding
can't encode some chars they are replaced by '?'
:param msg: message
:type msg: unicode on python2 | str on python3 | Below is the the instruction that describes the task:
### Input:
Write msg on stdout. If no encoding is specified
the detected encoding of stdout is used. If the encoding
can't encode some chars they are replaced by '?'
:param msg: message
:type msg: unicode on python2 | str on python3
### Response:
def printo(msg, encoding=None, errors='replace', std_type='stdout'):
"""Write msg on stdout. If no encoding is specified
the detected encoding of stdout is used. If the encoding
can't encode some chars they are replaced by '?'
:param msg: message
:type msg: unicode on python2 | str on python3
"""
std = getattr(sys, std_type, sys.stdout)
if encoding is None:
try:
encoding = std.encoding
except AttributeError:
encoding = None
# Fallback to ascii if no encoding is found
if encoding is None:
encoding = 'ascii'
# https://docs.python.org/3/library/sys.html#sys.stdout
# write in the binary buffer directly in python3
if hasattr(std, 'buffer'):
std = std.buffer
std.write(msg.encode(encoding, errors=errors))
std.write(b'\n')
std.flush() |
def unshare(self, group_id, **kwargs):
"""Delete a shared project link within a group.
Args:
group_id (int): ID of the group.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
"""
path = '/projects/%s/share/%s' % (self.get_id(), group_id)
self.manager.gitlab.http_delete(path, **kwargs) | Delete a shared project link within a group.
Args:
group_id (int): ID of the group.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request | Below is the the instruction that describes the task:
### Input:
Delete a shared project link within a group.
Args:
group_id (int): ID of the group.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
### Response:
def unshare(self, group_id, **kwargs):
"""Delete a shared project link within a group.
Args:
group_id (int): ID of the group.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
"""
path = '/projects/%s/share/%s' % (self.get_id(), group_id)
self.manager.gitlab.http_delete(path, **kwargs) |
def getPeopleTags(self):
"""
Return a sequence of tags which have been applied to L{Person} items.
@rtype: C{set}
"""
query = self.store.query(
Tag, Tag.object == Person.storeID)
return set(query.getColumn('name').distinct()) | Return a sequence of tags which have been applied to L{Person} items.
@rtype: C{set} | Below is the the instruction that describes the task:
### Input:
Return a sequence of tags which have been applied to L{Person} items.
@rtype: C{set}
### Response:
def getPeopleTags(self):
"""
Return a sequence of tags which have been applied to L{Person} items.
@rtype: C{set}
"""
query = self.store.query(
Tag, Tag.object == Person.storeID)
return set(query.getColumn('name').distinct()) |
def get_keys(self, bucket, timeout=None):
"""
Fetch a list of keys for the bucket
"""
bucket_type = self._get_bucket_type(bucket.bucket_type)
url = self.key_list_path(bucket.name, bucket_type=bucket_type,
timeout=timeout)
status, _, body = self._request('GET', url)
if status == 200:
props = json.loads(bytes_to_str(body))
return props['keys']
else:
raise RiakError('Error listing keys.') | Fetch a list of keys for the bucket | Below is the the instruction that describes the task:
### Input:
Fetch a list of keys for the bucket
### Response:
def get_keys(self, bucket, timeout=None):
"""
Fetch a list of keys for the bucket
"""
bucket_type = self._get_bucket_type(bucket.bucket_type)
url = self.key_list_path(bucket.name, bucket_type=bucket_type,
timeout=timeout)
status, _, body = self._request('GET', url)
if status == 200:
props = json.loads(bytes_to_str(body))
return props['keys']
else:
raise RiakError('Error listing keys.') |
def within_set(df, items=None):
"""
Assert that df is a subset of items
Parameters
==========
df : DataFrame
items : dict
mapping of columns (k) to array-like of values (v) that
``df[k]`` is expected to be a subset of
Returns
=======
df : DataFrame
"""
for k, v in items.items():
if not df[k].isin(v).all():
bad = df.loc[~df[k].isin(v), k]
raise AssertionError('Not in set', bad)
return df | Assert that df is a subset of items
Parameters
==========
df : DataFrame
items : dict
mapping of columns (k) to array-like of values (v) that
``df[k]`` is expected to be a subset of
Returns
=======
df : DataFrame | Below is the the instruction that describes the task:
### Input:
Assert that df is a subset of items
Parameters
==========
df : DataFrame
items : dict
mapping of columns (k) to array-like of values (v) that
``df[k]`` is expected to be a subset of
Returns
=======
df : DataFrame
### Response:
def within_set(df, items=None):
"""
Assert that df is a subset of items
Parameters
==========
df : DataFrame
items : dict
mapping of columns (k) to array-like of values (v) that
``df[k]`` is expected to be a subset of
Returns
=======
df : DataFrame
"""
for k, v in items.items():
if not df[k].isin(v).all():
bad = df.loc[~df[k].isin(v), k]
raise AssertionError('Not in set', bad)
return df |
def optimized(code, silent=True, ignore_errors=True):
"""Performs optimizations on already parsed code."""
return constant_fold(code, silent=silent, ignore_errors=ignore_errors) | Performs optimizations on already parsed code. | Below is the the instruction that describes the task:
### Input:
Performs optimizations on already parsed code.
### Response:
def optimized(code, silent=True, ignore_errors=True):
"""Performs optimizations on already parsed code."""
return constant_fold(code, silent=silent, ignore_errors=ignore_errors) |
def discrete_mean_curvature_measure(mesh, points, radius):
"""
Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# axis aligned bounds
bounds = np.column_stack((points - radius,
points + radius))
# line segments that intersect axis aligned bounding box
candidates = [list(mesh.face_adjacency_tree.intersection(b))
for b in bounds]
mean_curv = np.empty(len(points))
for i, (x, x_candidates) in enumerate(zip(points, candidates)):
endpoints = mesh.vertices[mesh.face_adjacency_edges[x_candidates]]
lengths = line_ball_intersection(
endpoints[:, 0],
endpoints[:, 1],
center=x,
radius=radius)
angles = mesh.face_adjacency_angles[x_candidates]
signs = np.where(mesh.face_adjacency_convex[x_candidates], 1, -1)
mean_curv[i] = (lengths * angles * signs).sum() / 2
return mean_curv | Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure. | Below is the the instruction that describes the task:
### Input:
Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure.
### Response:
def discrete_mean_curvature_measure(mesh, points, radius):
"""
Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# axis aligned bounds
bounds = np.column_stack((points - radius,
points + radius))
# line segments that intersect axis aligned bounding box
candidates = [list(mesh.face_adjacency_tree.intersection(b))
for b in bounds]
mean_curv = np.empty(len(points))
for i, (x, x_candidates) in enumerate(zip(points, candidates)):
endpoints = mesh.vertices[mesh.face_adjacency_edges[x_candidates]]
lengths = line_ball_intersection(
endpoints[:, 0],
endpoints[:, 1],
center=x,
radius=radius)
angles = mesh.face_adjacency_angles[x_candidates]
signs = np.where(mesh.face_adjacency_convex[x_candidates], 1, -1)
mean_curv[i] = (lengths * angles * signs).sum() / 2
return mean_curv |
def per_chat_id_in(s, types='all'):
"""
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is in ``s``
and chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if (types == 'all' or msg['chat']['type'] in types) and msg['chat']['id'] in s
else None) | :param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is in ``s``
and chat type is in ``types``. | Below is the the instruction that describes the task:
### Input:
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is in ``s``
and chat type is in ``types``.
### Response:
def per_chat_id_in(s, types='all'):
"""
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is in ``s``
and chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if (types == 'all' or msg['chat']['type'] in types) and msg['chat']['id'] in s
else None) |
def open_mask(fn:PathOrStr, div=False, convert_mode='L', after_open:Callable=None)->ImageSegment:
"Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255."
return open_image(fn, div=div, convert_mode=convert_mode, cls=ImageSegment, after_open=after_open) | Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255. | Below is the the instruction that describes the task:
### Input:
Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255.
### Response:
def open_mask(fn:PathOrStr, div=False, convert_mode='L', after_open:Callable=None)->ImageSegment:
"Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255."
return open_image(fn, div=div, convert_mode=convert_mode, cls=ImageSegment, after_open=after_open) |
def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True | Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele. | Below is the the instruction that describes the task:
### Input:
Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
### Response:
def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True |
def make_pixel_mask_from_col_row(column, row, default=0, value=1):
'''Generate mask from column and row lists
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
mask : numpy.ndarray
'''
# FE columns and rows start from 1
col_array = np.array(column) - 1
row_array = np.array(row) - 1
if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0):
raise ValueError('Column and/or row out of range')
shape = (80, 336)
mask = np.full(shape, default, dtype=np.uint8)
mask[col_array, row_array] = value # advanced indexing
return mask | Generate mask from column and row lists
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
mask : numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Generate mask from column and row lists
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
mask : numpy.ndarray
### Response:
def make_pixel_mask_from_col_row(column, row, default=0, value=1):
'''Generate mask from column and row lists
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
mask : numpy.ndarray
'''
# FE columns and rows start from 1
col_array = np.array(column) - 1
row_array = np.array(row) - 1
if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0):
raise ValueError('Column and/or row out of range')
shape = (80, 336)
mask = np.full(shape, default, dtype=np.uint8)
mask[col_array, row_array] = value # advanced indexing
return mask |
def all_units_idle(self):
"""Return True if all units are idle.
"""
for unit in self.units.values():
unit_status = unit.data['agent-status']['current']
if unit_status != 'idle':
return False
return True | Return True if all units are idle. | Below is the the instruction that describes the task:
### Input:
Return True if all units are idle.
### Response:
def all_units_idle(self):
"""Return True if all units are idle.
"""
for unit in self.units.values():
unit_status = unit.data['agent-status']['current']
if unit_status != 'idle':
return False
return True |
def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False,
read_concern=None,
write_concern=None,
parse_write_concern_error=False,
collation=None,
session=None,
client=None,
retryable_write=False,
publish_events=True,
user_fields=None):
"""Execute a command or raise an error.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
- `session`: optional ClientSession instance.
- `client`: optional MongoClient for gossipping $clusterTime.
- `retryable_write`: True if this command is a retryable write.
- `publish_events`: Should we publish events for this command?
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
"""
self.validate_session(client, session)
session = _validate_session_write_concern(session, write_concern)
# Ensure command name remains in first place.
if not isinstance(spec, ORDERED_TYPES):
spec = SON(spec)
if (read_concern and self.max_wire_version < 4
and not read_concern.ok_for_legacy):
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (read_concern.level, self.max_wire_version))
if not (write_concern is None or write_concern.acknowledged or
collation is None):
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if (self.max_wire_version >= 5 and
write_concern and
not write_concern.is_server_default):
spec['writeConcern'] = write_concern.document
elif self.max_wire_version < 5 and collation is not None:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use a collation.')
if session:
session._apply_to(spec, retryable_write, read_preference)
self.send_cluster_time(spec, session, client)
listeners = self.listeners if publish_events else None
unacknowledged = write_concern and not write_concern.acknowledged
if self.op_msg_enabled:
self._raise_if_not_writable(unacknowledged)
try:
return command(self.sock, dbname, spec, slave_ok,
self.is_mongos, read_preference, codec_options,
session, client, check, allowable_errors,
self.address, check_keys, listeners,
self.max_bson_size, read_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation,
compression_ctx=self.compression_context,
use_op_msg=self.op_msg_enabled,
unacknowledged=unacknowledged,
user_fields=user_fields)
except OperationFailure:
raise
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
except BaseException as error:
self._raise_connection_failure(error) | Execute a command or raise an error.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
- `session`: optional ClientSession instance.
- `client`: optional MongoClient for gossipping $clusterTime.
- `retryable_write`: True if this command is a retryable write.
- `publish_events`: Should we publish events for this command?
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective. | Below is the the instruction that describes the task:
### Input:
Execute a command or raise an error.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
- `session`: optional ClientSession instance.
- `client`: optional MongoClient for gossipping $clusterTime.
- `retryable_write`: True if this command is a retryable write.
- `publish_events`: Should we publish events for this command?
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
### Response:
def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False,
read_concern=None,
write_concern=None,
parse_write_concern_error=False,
collation=None,
session=None,
client=None,
retryable_write=False,
publish_events=True,
user_fields=None):
"""Execute a command or raise an error.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
- `session`: optional ClientSession instance.
- `client`: optional MongoClient for gossipping $clusterTime.
- `retryable_write`: True if this command is a retryable write.
- `publish_events`: Should we publish events for this command?
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
"""
self.validate_session(client, session)
session = _validate_session_write_concern(session, write_concern)
# Ensure command name remains in first place.
if not isinstance(spec, ORDERED_TYPES):
spec = SON(spec)
if (read_concern and self.max_wire_version < 4
and not read_concern.ok_for_legacy):
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (read_concern.level, self.max_wire_version))
if not (write_concern is None or write_concern.acknowledged or
collation is None):
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if (self.max_wire_version >= 5 and
write_concern and
not write_concern.is_server_default):
spec['writeConcern'] = write_concern.document
elif self.max_wire_version < 5 and collation is not None:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use a collation.')
if session:
session._apply_to(spec, retryable_write, read_preference)
self.send_cluster_time(spec, session, client)
listeners = self.listeners if publish_events else None
unacknowledged = write_concern and not write_concern.acknowledged
if self.op_msg_enabled:
self._raise_if_not_writable(unacknowledged)
try:
return command(self.sock, dbname, spec, slave_ok,
self.is_mongos, read_preference, codec_options,
session, client, check, allowable_errors,
self.address, check_keys, listeners,
self.max_bson_size, read_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation,
compression_ctx=self.compression_context,
use_op_msg=self.op_msg_enabled,
unacknowledged=unacknowledged,
user_fields=user_fields)
except OperationFailure:
raise
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
except BaseException as error:
self._raise_connection_failure(error) |
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,)) | Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range. | Below is the the instruction that describes the task:
### Input:
Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
### Response:
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,)) |
def is_list_of_list(item):
"""
check whether the item is list (tuple)
and consist of list (tuple) elements
"""
if (
type(item) in (list, tuple)
and len(item)
and isinstance(item[0], (list, tuple))
):
return True
return False | check whether the item is list (tuple)
and consist of list (tuple) elements | Below is the the instruction that describes the task:
### Input:
check whether the item is list (tuple)
and consist of list (tuple) elements
### Response:
def is_list_of_list(item):
"""
check whether the item is list (tuple)
and consist of list (tuple) elements
"""
if (
type(item) in (list, tuple)
and len(item)
and isinstance(item[0], (list, tuple))
):
return True
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.