code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def preview(request):
""" Render preview page.
:returns: A rendered preview
"""
if settings.MARKDOWN_PROTECT_PREVIEW:
user = getattr(request, 'user', None)
if not user or not user.is_staff:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return render(
request, settings.MARKDOWN_PREVIEW_TEMPLATE, dict(
content=request.POST.get('data', 'No content posted'),
css=settings.MARKDOWN_STYLE
)) | Render preview page.
:returns: A rendered preview | Below is the the instruction that describes the task:
### Input:
Render preview page.
:returns: A rendered preview
### Response:
def preview(request):
""" Render preview page.
:returns: A rendered preview
"""
if settings.MARKDOWN_PROTECT_PREVIEW:
user = getattr(request, 'user', None)
if not user or not user.is_staff:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return render(
request, settings.MARKDOWN_PREVIEW_TEMPLATE, dict(
content=request.POST.get('data', 'No content posted'),
css=settings.MARKDOWN_STYLE
)) |
def overlaps(self, other, permissive=False):
"""
Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then intervals that are contiguous
are considered as overlapping intervals as well (e.g. [1, 2) and [2, 3],
but not [1, 2) and (2, 3] because 2 is not part of their union).
:param other: an interval or atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise.
"""
if isinstance(other, AtomicInterval):
for interval in self._intervals:
if interval.overlaps(other, permissive=permissive):
return True
return False
elif isinstance(other, Interval):
for o_interval in other._intervals:
if self.overlaps(o_interval, permissive=permissive):
return True
return False
else:
raise TypeError('Unsupported type {} for {}'.format(type(other), other)) | Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then intervals that are contiguous
are considered as overlapping intervals as well (e.g. [1, 2) and [2, 3],
but not [1, 2) and (2, 3] because 2 is not part of their union).
:param other: an interval or atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise. | Below is the the instruction that describes the task:
### Input:
Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then intervals that are contiguous
are considered as overlapping intervals as well (e.g. [1, 2) and [2, 3],
but not [1, 2) and (2, 3] because 2 is not part of their union).
:param other: an interval or atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise.
### Response:
def overlaps(self, other, permissive=False):
"""
Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then intervals that are contiguous
are considered as overlapping intervals as well (e.g. [1, 2) and [2, 3],
but not [1, 2) and (2, 3] because 2 is not part of their union).
:param other: an interval or atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise.
"""
if isinstance(other, AtomicInterval):
for interval in self._intervals:
if interval.overlaps(other, permissive=permissive):
return True
return False
elif isinstance(other, Interval):
for o_interval in other._intervals:
if self.overlaps(o_interval, permissive=permissive):
return True
return False
else:
raise TypeError('Unsupported type {} for {}'.format(type(other), other)) |
def set_mode(self, value):
"""Set the currently active mode on the device (DAB, FM, Spotify)."""
mode = -1
modes = yield from self.get_modes()
for temp_mode in modes:
if temp_mode['label'] == value:
mode = temp_mode['band']
return (yield from self.handle_set(self.API.get('mode'), mode)) | Set the currently active mode on the device (DAB, FM, Spotify). | Below is the the instruction that describes the task:
### Input:
Set the currently active mode on the device (DAB, FM, Spotify).
### Response:
def set_mode(self, value):
"""Set the currently active mode on the device (DAB, FM, Spotify)."""
mode = -1
modes = yield from self.get_modes()
for temp_mode in modes:
if temp_mode['label'] == value:
mode = temp_mode['band']
return (yield from self.handle_set(self.API.get('mode'), mode)) |
def app0(self):
"""
First APP0 marker in image markers.
"""
for m in self._markers:
if m.marker_code == JPEG_MARKER_CODE.APP0:
return m
raise KeyError('no APP0 marker in image') | First APP0 marker in image markers. | Below is the the instruction that describes the task:
### Input:
First APP0 marker in image markers.
### Response:
def app0(self):
"""
First APP0 marker in image markers.
"""
for m in self._markers:
if m.marker_code == JPEG_MARKER_CODE.APP0:
return m
raise KeyError('no APP0 marker in image') |
def get_annotation(self, id_):
"""Data for a specific annotation."""
endpoint = "annotations/{id}".format(id=id_)
return self._make_request(endpoint) | Data for a specific annotation. | Below is the the instruction that describes the task:
### Input:
Data for a specific annotation.
### Response:
def get_annotation(self, id_):
"""Data for a specific annotation."""
endpoint = "annotations/{id}".format(id=id_)
return self._make_request(endpoint) |
def tatoeba(language, word, minlength = 10, maxlength = 100):
''' Returns a list of suitable textsamples for a given word using Tatoeba.org. '''
word, sentences = unicode(word), []
page = requests.get('http://tatoeba.org/deu/sentences/search?query=%s&from=%s&to=und' % (word, lltk.locale.iso639_1to3(language)))
tree = html.fromstring(page.text)
for sentence in tree.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " mainSentence ")]/div/a/text()'):
sentence = sentence.strip(u' "ββ').replace(u'β β', u' β ').replace('" "', u' β ')
if word in sentence and len(sentence) < maxlength and len(sentence) > minlength:
sentences.append(sentence)
return sentences | Returns a list of suitable textsamples for a given word using Tatoeba.org. | Below is the the instruction that describes the task:
### Input:
Returns a list of suitable textsamples for a given word using Tatoeba.org.
### Response:
def tatoeba(language, word, minlength = 10, maxlength = 100):
''' Returns a list of suitable textsamples for a given word using Tatoeba.org. '''
word, sentences = unicode(word), []
page = requests.get('http://tatoeba.org/deu/sentences/search?query=%s&from=%s&to=und' % (word, lltk.locale.iso639_1to3(language)))
tree = html.fromstring(page.text)
for sentence in tree.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " mainSentence ")]/div/a/text()'):
sentence = sentence.strip(u' "ββ').replace(u'β β', u' β ').replace('" "', u' β ')
if word in sentence and len(sentence) < maxlength and len(sentence) > minlength:
sentences.append(sentence)
return sentences |
def _wrap_jinja_filter(self, function):
"""Propagate exceptions as undefined values filter."""
def wrapper(*args, **kwargs):
"""Filter wrapper."""
try:
return function(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return NestedUndefined()
# Copy over Jinja filter decoration attributes.
for attribute in dir(function):
if attribute.endswith('filter'):
setattr(wrapper, attribute, getattr(function, attribute))
return wrapper | Propagate exceptions as undefined values filter. | Below is the the instruction that describes the task:
### Input:
Propagate exceptions as undefined values filter.
### Response:
def _wrap_jinja_filter(self, function):
"""Propagate exceptions as undefined values filter."""
def wrapper(*args, **kwargs):
"""Filter wrapper."""
try:
return function(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return NestedUndefined()
# Copy over Jinja filter decoration attributes.
for attribute in dir(function):
if attribute.endswith('filter'):
setattr(wrapper, attribute, getattr(function, attribute))
return wrapper |
def renew_step_dir(step_dir: str):
"""Delete step directory if exists and create, reporting actions."""
if os.path.exists(step_dir):
logging.info("Remove unfinished step %s", step_dir)
shutil.rmtree(step_dir)
logging.info("Create: %s", step_dir)
os.makedirs(step_dir) | Delete step directory if exists and create, reporting actions. | Below is the the instruction that describes the task:
### Input:
Delete step directory if exists and create, reporting actions.
### Response:
def renew_step_dir(step_dir: str):
"""Delete step directory if exists and create, reporting actions."""
if os.path.exists(step_dir):
logging.info("Remove unfinished step %s", step_dir)
shutil.rmtree(step_dir)
logging.info("Create: %s", step_dir)
os.makedirs(step_dir) |
def root_directory(self):
"""Map the root directory to user profile/inasafe so the minimum needs
profile will be placed there (user profile/inasafe/minimum_needs).
:returns: root directory
:rtype: QString
"""
if not QgsApplication.qgisSettingsDirPath() or (
QgsApplication.qgisSettingsDirPath() == ''):
self._root_directory = None
else:
# noinspection PyArgumentList
self._root_directory = os.path.join(
QgsApplication.qgisSettingsDirPath(),
'inasafe')
return self._root_directory | Map the root directory to user profile/inasafe so the minimum needs
profile will be placed there (user profile/inasafe/minimum_needs).
:returns: root directory
:rtype: QString | Below is the the instruction that describes the task:
### Input:
Map the root directory to user profile/inasafe so the minimum needs
profile will be placed there (user profile/inasafe/minimum_needs).
:returns: root directory
:rtype: QString
### Response:
def root_directory(self):
"""Map the root directory to user profile/inasafe so the minimum needs
profile will be placed there (user profile/inasafe/minimum_needs).
:returns: root directory
:rtype: QString
"""
if not QgsApplication.qgisSettingsDirPath() or (
QgsApplication.qgisSettingsDirPath() == ''):
self._root_directory = None
else:
# noinspection PyArgumentList
self._root_directory = os.path.join(
QgsApplication.qgisSettingsDirPath(),
'inasafe')
return self._root_directory |
def get_network_as_xml_template(network_id,**kwargs):
"""
Turn an existing network into an xml template
using its attributes.
If an optional scenario ID is passed in, default
values will be populated from that scenario.
"""
template_xml = etree.Element("template_definition")
net_i = db.DBSession.query(Network).filter(Network.id==network_id).one()
template_name = etree.SubElement(template_xml, "template_name")
template_name.text = "TemplateType from Network %s"%(net_i.name)
layout = _get_layout_as_etree(net_i.layout)
resources = etree.SubElement(template_xml, "resources")
if net_i.attributes:
net_resource = etree.SubElement(resources, "resource")
resource_type = etree.SubElement(net_resource, "type")
resource_type.text = "NETWORK"
resource_name = etree.SubElement(net_resource, "name")
resource_name.text = net_i.name
layout = _get_layout_as_etree(net_i.layout)
if layout is not None:
net_resource.append(layout)
for net_attr in net_i.attributes:
_make_attr_element_from_resourceattr(net_resource, net_attr)
resources.append(net_resource)
existing_types = {'NODE': [], 'LINK': [], 'GROUP': []}
for node_i in net_i.nodes:
node_attributes = node_i.attributes
attr_ids = [res_attr.attr_id for res_attr in node_attributes]
if len(attr_ids) > 0 and attr_ids not in existing_types['NODE']:
node_resource = etree.Element("resource")
resource_type = etree.SubElement(node_resource, "type")
resource_type.text = "NODE"
resource_name = etree.SubElement(node_resource, "name")
resource_name.text = node_i.node_name
layout = _get_layout_as_etree(node_i.layout)
if layout is not None:
node_resource.append(layout)
for node_attr in node_attributes:
_make_attr_element_from_resourceattr(node_resource, node_attr)
existing_types['NODE'].append(attr_ids)
resources.append(node_resource)
for link_i in net_i.links:
link_attributes = link_i.attributes
attr_ids = [link_attr.attr_id for link_attr in link_attributes]
if len(attr_ids) > 0 and attr_ids not in existing_types['LINK']:
link_resource = etree.Element("resource")
resource_type = etree.SubElement(link_resource, "type")
resource_type.text = "LINK"
resource_name = etree.SubElement(link_resource, "name")
resource_name.text = link_i.link_name
layout = _get_layout_as_etree(link_i.layout)
if layout is not None:
link_resource.append(layout)
for link_attr in link_attributes:
_make_attr_element_from_resourceattr(link_resource, link_attr)
existing_types['LINK'].append(attr_ids)
resources.append(link_resource)
for group_i in net_i.resourcegroups:
group_attributes = group_i.attributes
attr_ids = [group_attr.attr_id for group_attr in group_attributes]
if len(attr_ids) > 0 and attr_ids not in existing_types['GROUP']:
group_resource = etree.Element("resource")
resource_type = etree.SubElement(group_resource, "type")
resource_type.text = "GROUP"
resource_name = etree.SubElement(group_resource, "name")
resource_name.text = group_i.group_name
for group_attr in group_attributes:
_make_attr_element_from_resourceattr(group_resource, group_attr)
existing_types['GROUP'].append(attr_ids)
resources.append(group_resource)
xml_string = etree.tostring(template_xml, encoding="unicode")
return xml_string | Turn an existing network into an xml template
using its attributes.
If an optional scenario ID is passed in, default
values will be populated from that scenario. | Below is the the instruction that describes the task:
### Input:
Turn an existing network into an xml template
using its attributes.
If an optional scenario ID is passed in, default
values will be populated from that scenario.
### Response:
def get_network_as_xml_template(network_id,**kwargs):
"""
Turn an existing network into an xml template
using its attributes.
If an optional scenario ID is passed in, default
values will be populated from that scenario.
"""
template_xml = etree.Element("template_definition")
net_i = db.DBSession.query(Network).filter(Network.id==network_id).one()
template_name = etree.SubElement(template_xml, "template_name")
template_name.text = "TemplateType from Network %s"%(net_i.name)
layout = _get_layout_as_etree(net_i.layout)
resources = etree.SubElement(template_xml, "resources")
if net_i.attributes:
net_resource = etree.SubElement(resources, "resource")
resource_type = etree.SubElement(net_resource, "type")
resource_type.text = "NETWORK"
resource_name = etree.SubElement(net_resource, "name")
resource_name.text = net_i.name
layout = _get_layout_as_etree(net_i.layout)
if layout is not None:
net_resource.append(layout)
for net_attr in net_i.attributes:
_make_attr_element_from_resourceattr(net_resource, net_attr)
resources.append(net_resource)
existing_types = {'NODE': [], 'LINK': [], 'GROUP': []}
for node_i in net_i.nodes:
node_attributes = node_i.attributes
attr_ids = [res_attr.attr_id for res_attr in node_attributes]
if len(attr_ids) > 0 and attr_ids not in existing_types['NODE']:
node_resource = etree.Element("resource")
resource_type = etree.SubElement(node_resource, "type")
resource_type.text = "NODE"
resource_name = etree.SubElement(node_resource, "name")
resource_name.text = node_i.node_name
layout = _get_layout_as_etree(node_i.layout)
if layout is not None:
node_resource.append(layout)
for node_attr in node_attributes:
_make_attr_element_from_resourceattr(node_resource, node_attr)
existing_types['NODE'].append(attr_ids)
resources.append(node_resource)
for link_i in net_i.links:
link_attributes = link_i.attributes
attr_ids = [link_attr.attr_id for link_attr in link_attributes]
if len(attr_ids) > 0 and attr_ids not in existing_types['LINK']:
link_resource = etree.Element("resource")
resource_type = etree.SubElement(link_resource, "type")
resource_type.text = "LINK"
resource_name = etree.SubElement(link_resource, "name")
resource_name.text = link_i.link_name
layout = _get_layout_as_etree(link_i.layout)
if layout is not None:
link_resource.append(layout)
for link_attr in link_attributes:
_make_attr_element_from_resourceattr(link_resource, link_attr)
existing_types['LINK'].append(attr_ids)
resources.append(link_resource)
for group_i in net_i.resourcegroups:
group_attributes = group_i.attributes
attr_ids = [group_attr.attr_id for group_attr in group_attributes]
if len(attr_ids) > 0 and attr_ids not in existing_types['GROUP']:
group_resource = etree.Element("resource")
resource_type = etree.SubElement(group_resource, "type")
resource_type.text = "GROUP"
resource_name = etree.SubElement(group_resource, "name")
resource_name.text = group_i.group_name
for group_attr in group_attributes:
_make_attr_element_from_resourceattr(group_resource, group_attr)
existing_types['GROUP'].append(attr_ids)
resources.append(group_resource)
xml_string = etree.tostring(template_xml, encoding="unicode")
return xml_string |
def board_name(default):
"""Returns the boards name (if available)."""
try:
import board
try:
name = board.name
except AttributeError:
# There was a board.py file, but it didn't have an name attribute
# We also ignore this as an error
name = default
except ImportError:
# No board.py file on the pyboard - not an error
name = default
except BaseException as err:
print('Error encountered executing board.py')
import sys
sys.print_exception(err)
name = default
return repr(name) | Returns the boards name (if available). | Below is the the instruction that describes the task:
### Input:
Returns the boards name (if available).
### Response:
def board_name(default):
"""Returns the boards name (if available)."""
try:
import board
try:
name = board.name
except AttributeError:
# There was a board.py file, but it didn't have an name attribute
# We also ignore this as an error
name = default
except ImportError:
# No board.py file on the pyboard - not an error
name = default
except BaseException as err:
print('Error encountered executing board.py')
import sys
sys.print_exception(err)
name = default
return repr(name) |
def serve_forever_stoppable(self):
"""Handle one request at a time until stop_serve_forever().
http://code.activestate.com/recipes/336012/
"""
self.stop_request = False
self.stopped = False
while not self.stop_request:
self.handle_request()
# _logger.info "serve_forever_stoppable() stopped."
self.stopped = True | Handle one request at a time until stop_serve_forever().
http://code.activestate.com/recipes/336012/ | Below is the the instruction that describes the task:
### Input:
Handle one request at a time until stop_serve_forever().
http://code.activestate.com/recipes/336012/
### Response:
def serve_forever_stoppable(self):
"""Handle one request at a time until stop_serve_forever().
http://code.activestate.com/recipes/336012/
"""
self.stop_request = False
self.stopped = False
while not self.stop_request:
self.handle_request()
# _logger.info "serve_forever_stoppable() stopped."
self.stopped = True |
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result | Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up. | Below is the the instruction that describes the task:
### Input:
Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
### Response:
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result |
def __get_vibration_code(self, left_motor, right_motor, duration):
"""This is some crazy voodoo, if you can simplify it, please do."""
inner_event = struct.pack(
'2h6x2h2x2H28x',
0x50,
-1,
duration,
0,
int(left_motor * 65535),
int(right_motor * 65535))
buf_conts = ioctl(self._write_device, 1076905344, inner_event)
return int(codecs.encode(buf_conts[1:3], 'hex'), 16) | This is some crazy voodoo, if you can simplify it, please do. | Below is the the instruction that describes the task:
### Input:
This is some crazy voodoo, if you can simplify it, please do.
### Response:
def __get_vibration_code(self, left_motor, right_motor, duration):
"""This is some crazy voodoo, if you can simplify it, please do."""
inner_event = struct.pack(
'2h6x2h2x2H28x',
0x50,
-1,
duration,
0,
int(left_motor * 65535),
int(right_motor * 65535))
buf_conts = ioctl(self._write_device, 1076905344, inner_event)
return int(codecs.encode(buf_conts[1:3], 'hex'), 16) |
def zero_fill(self, corpus):
"""Adds rows to the results to ensure that, for every n-gram that is
attested in at least one witness, every witness for that text
has a row, with added rows having a count of zero.
:param corpus: corpus containing the texts appearing in the results
:type corpus: `Corpus`
"""
self._logger.info('Zero-filling results')
zero_rows = []
work_sigla = {}
grouping_cols = [constants.LABEL_FIELDNAME, constants.NGRAM_FIELDNAME,
constants.SIZE_FIELDNAME, constants.WORK_FIELDNAME]
grouped = self._matches.groupby(grouping_cols, sort=False)
for (label, ngram, size, work), group in grouped:
row_data = {
constants.NGRAM_FIELDNAME: ngram,
constants.LABEL_FIELDNAME: label,
constants.SIZE_FIELDNAME: size,
constants.COUNT_FIELDNAME: 0,
constants.WORK_FIELDNAME: work,
}
if work not in work_sigla:
work_sigla[work] = corpus.get_sigla(work)
for siglum in work_sigla[work]:
if group[group[constants.SIGLUM_FIELDNAME] == siglum].empty:
row_data[constants.SIGLUM_FIELDNAME] = siglum
zero_rows.append(row_data)
zero_df = pd.DataFrame(zero_rows, columns=constants.QUERY_FIELDNAMES)
self._matches = pd.concat([self._matches, zero_df], ignore_index=True,
sort=False) | Adds rows to the results to ensure that, for every n-gram that is
attested in at least one witness, every witness for that text
has a row, with added rows having a count of zero.
:param corpus: corpus containing the texts appearing in the results
:type corpus: `Corpus` | Below is the the instruction that describes the task:
### Input:
Adds rows to the results to ensure that, for every n-gram that is
attested in at least one witness, every witness for that text
has a row, with added rows having a count of zero.
:param corpus: corpus containing the texts appearing in the results
:type corpus: `Corpus`
### Response:
def zero_fill(self, corpus):
"""Adds rows to the results to ensure that, for every n-gram that is
attested in at least one witness, every witness for that text
has a row, with added rows having a count of zero.
:param corpus: corpus containing the texts appearing in the results
:type corpus: `Corpus`
"""
self._logger.info('Zero-filling results')
zero_rows = []
work_sigla = {}
grouping_cols = [constants.LABEL_FIELDNAME, constants.NGRAM_FIELDNAME,
constants.SIZE_FIELDNAME, constants.WORK_FIELDNAME]
grouped = self._matches.groupby(grouping_cols, sort=False)
for (label, ngram, size, work), group in grouped:
row_data = {
constants.NGRAM_FIELDNAME: ngram,
constants.LABEL_FIELDNAME: label,
constants.SIZE_FIELDNAME: size,
constants.COUNT_FIELDNAME: 0,
constants.WORK_FIELDNAME: work,
}
if work not in work_sigla:
work_sigla[work] = corpus.get_sigla(work)
for siglum in work_sigla[work]:
if group[group[constants.SIGLUM_FIELDNAME] == siglum].empty:
row_data[constants.SIGLUM_FIELDNAME] = siglum
zero_rows.append(row_data)
zero_df = pd.DataFrame(zero_rows, columns=constants.QUERY_FIELDNAMES)
self._matches = pd.concat([self._matches, zero_df], ignore_index=True,
sort=False) |
def get_output_fields(self):
""" Get field names from output template.
"""
# Re-engineer list from output format
# XXX TODO: Would be better to use a FieldRecorder class to catch the full field names
emit_fields = list(i.lower() for i in re.sub(r"[^_A-Z]+", ' ', self.format_item(None)).split())
# Validate result
result = []
for name in emit_fields[:]:
if name not in engine.FieldDefinition.FIELDS:
self.LOG.warn("Omitted unknown name '%s' from statistics and output format sorting" % name)
else:
result.append(name)
return result | Get field names from output template. | Below is the the instruction that describes the task:
### Input:
Get field names from output template.
### Response:
def get_output_fields(self):
""" Get field names from output template.
"""
# Re-engineer list from output format
# XXX TODO: Would be better to use a FieldRecorder class to catch the full field names
emit_fields = list(i.lower() for i in re.sub(r"[^_A-Z]+", ' ', self.format_item(None)).split())
# Validate result
result = []
for name in emit_fields[:]:
if name not in engine.FieldDefinition.FIELDS:
self.LOG.warn("Omitted unknown name '%s' from statistics and output format sorting" % name)
else:
result.append(name)
return result |
def validate_refresh_token(self, refresh_token, client, request,
*args, **kwargs):
"""Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant.
"""
token = self._tokengetter(refresh_token=refresh_token)
if token and token.client_id == client.client_id:
# Make sure the request object contains user and client_id
request.client_id = token.client_id
request.user = token.user
return True
return False | Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant. | Below is the the instruction that describes the task:
### Input:
Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant.
### Response:
def validate_refresh_token(self, refresh_token, client, request,
*args, **kwargs):
"""Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant.
"""
token = self._tokengetter(refresh_token=refresh_token)
if token and token.client_id == client.client_id:
# Make sure the request object contains user and client_id
request.client_id = token.client_id
request.user = token.user
return True
return False |
def unsubscribe(self, connection, destination):
"""
Unsubscribes a connection from the specified topic destination.
@param connection: The client connection to unsubscribe.
@type connection: L{coilmq.server.StompConnection}
@param destination: The topic destination (e.g. '/topic/foo')
@type destination: C{str}
"""
self.log.debug("Unsubscribing %s from %s" % (connection, destination))
if connection in self._topics[destination]:
self._topics[destination].remove(connection)
if not self._topics[destination]:
del self._topics[destination] | Unsubscribes a connection from the specified topic destination.
@param connection: The client connection to unsubscribe.
@type connection: L{coilmq.server.StompConnection}
@param destination: The topic destination (e.g. '/topic/foo')
@type destination: C{str} | Below is the the instruction that describes the task:
### Input:
Unsubscribes a connection from the specified topic destination.
@param connection: The client connection to unsubscribe.
@type connection: L{coilmq.server.StompConnection}
@param destination: The topic destination (e.g. '/topic/foo')
@type destination: C{str}
### Response:
def unsubscribe(self, connection, destination):
"""
Unsubscribes a connection from the specified topic destination.
@param connection: The client connection to unsubscribe.
@type connection: L{coilmq.server.StompConnection}
@param destination: The topic destination (e.g. '/topic/foo')
@type destination: C{str}
"""
self.log.debug("Unsubscribing %s from %s" % (connection, destination))
if connection in self._topics[destination]:
self._topics[destination].remove(connection)
if not self._topics[destination]:
del self._topics[destination] |
def readImages(path, sc=None, minParitions = 1, bigdl_type="float"):
"""
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
"""
df = callBigDlFunc(bigdl_type, "dlReadImage", path, sc, minParitions)
df._sc._jsc = sc._jsc
return df | Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes) | Below is the the instruction that describes the task:
### Input:
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
### Response:
def readImages(path, sc=None, minParitions = 1, bigdl_type="float"):
"""
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
"""
df = callBigDlFunc(bigdl_type, "dlReadImage", path, sc, minParitions)
df._sc._jsc = sc._jsc
return df |
def synchronize(self):
"""Perform sync of the security groups between ML2 and EOS."""
# Get expected ACLs and rules
expected_acls = self.get_expected_acls()
# Get expected interface to ACL mappings
all_expected_bindings = self.get_expected_bindings()
# Check that config is correct on every registered switch
for switch_ip in self._switches.keys():
expected_bindings = all_expected_bindings.get(switch_ip, [])
try:
self.synchronize_switch(switch_ip, expected_acls,
expected_bindings)
except Exception:
LOG.exception("Failed to sync SGs for %(switch)s",
{'switch': switch_ip}) | Perform sync of the security groups between ML2 and EOS. | Below is the the instruction that describes the task:
### Input:
Perform sync of the security groups between ML2 and EOS.
### Response:
def synchronize(self):
"""Perform sync of the security groups between ML2 and EOS."""
# Get expected ACLs and rules
expected_acls = self.get_expected_acls()
# Get expected interface to ACL mappings
all_expected_bindings = self.get_expected_bindings()
# Check that config is correct on every registered switch
for switch_ip in self._switches.keys():
expected_bindings = all_expected_bindings.get(switch_ip, [])
try:
self.synchronize_switch(switch_ip, expected_acls,
expected_bindings)
except Exception:
LOG.exception("Failed to sync SGs for %(switch)s",
{'switch': switch_ip}) |
def n_forking_points(neurites, neurite_type=NeuriteType.all):
'''number of forking points in a collection of neurites'''
return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.iforking_point) | number of forking points in a collection of neurites | Below is the the instruction that describes the task:
### Input:
number of forking points in a collection of neurites
### Response:
def n_forking_points(neurites, neurite_type=NeuriteType.all):
'''number of forking points in a collection of neurites'''
return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.iforking_point) |
def _send_tune_ok(self, frame_in):
"""Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return:
"""
self.max_allowed_channels = self._negotiate(frame_in.channel_max,
MAX_CHANNELS)
self.max_frame_size = self._negotiate(frame_in.frame_max,
MAX_FRAME_SIZE)
LOGGER.debug(
'Negotiated max frame size %d, max channels %d',
self.max_frame_size, self.max_allowed_channels
)
tune_ok_frame = specification.Connection.TuneOk(
channel_max=self.max_allowed_channels,
frame_max=self.max_frame_size,
heartbeat=self._heartbeat)
self._write_frame(tune_ok_frame) | Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return: | Below is the the instruction that describes the task:
### Input:
Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return:
### Response:
def _send_tune_ok(self, frame_in):
"""Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return:
"""
self.max_allowed_channels = self._negotiate(frame_in.channel_max,
MAX_CHANNELS)
self.max_frame_size = self._negotiate(frame_in.frame_max,
MAX_FRAME_SIZE)
LOGGER.debug(
'Negotiated max frame size %d, max channels %d',
self.max_frame_size, self.max_allowed_channels
)
tune_ok_frame = specification.Connection.TuneOk(
channel_max=self.max_allowed_channels,
frame_max=self.max_frame_size,
heartbeat=self._heartbeat)
self._write_frame(tune_ok_frame) |
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3 | Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result | Below is the the instruction that describes the task:
### Input:
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
### Response:
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3 |
def keyword(self, name=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Keyword)
model_queries_config = (
(name, models.Keyword.name),
(identifier, models.Keyword.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Keyword.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame` | Below is the the instruction that describes the task:
### Input:
Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame`
### Response:
def keyword(self, name=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Keyword)
model_queries_config = (
(name, models.Keyword.name),
(identifier, models.Keyword.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Keyword.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) |
def run(self, order=None):
"""
self.runner must be present
"""
for event in self.runner.run(order=order):
self.receive(event) | self.runner must be present | Below is the the instruction that describes the task:
### Input:
self.runner must be present
### Response:
def run(self, order=None):
"""
self.runner must be present
"""
for event in self.runner.run(order=order):
self.receive(event) |
def search(request, template="search_results.html", extra_context=None):
"""
Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model.
"""
query = request.GET.get("q", "")
page = request.GET.get("page", 1)
per_page = settings.SEARCH_PER_PAGE
max_paging_links = settings.MAX_PAGING_LINKS
try:
parts = request.GET.get("type", "").split(".", 1)
search_model = apps.get_model(*parts)
search_model.objects.search # Attribute check
except (ValueError, TypeError, LookupError, AttributeError):
search_model = Displayable
search_type = _("Everything")
else:
search_type = search_model._meta.verbose_name_plural.capitalize()
results = search_model.objects.search(query, for_user=request.user)
paginated = paginate(results, page, per_page, max_paging_links)
context = {"query": query, "results": paginated,
"search_type": search_type}
context.update(extra_context or {})
return TemplateResponse(request, template, context) | Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model. | Below is the the instruction that describes the task:
### Input:
Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model.
### Response:
def search(request, template="search_results.html", extra_context=None):
"""
Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model.
"""
query = request.GET.get("q", "")
page = request.GET.get("page", 1)
per_page = settings.SEARCH_PER_PAGE
max_paging_links = settings.MAX_PAGING_LINKS
try:
parts = request.GET.get("type", "").split(".", 1)
search_model = apps.get_model(*parts)
search_model.objects.search # Attribute check
except (ValueError, TypeError, LookupError, AttributeError):
search_model = Displayable
search_type = _("Everything")
else:
search_type = search_model._meta.verbose_name_plural.capitalize()
results = search_model.objects.search(query, for_user=request.user)
paginated = paginate(results, page, per_page, max_paging_links)
context = {"query": query, "results": paginated,
"search_type": search_type}
context.update(extra_context or {})
return TemplateResponse(request, template, context) |
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams | separate rgb embeddings. | Below is the the instruction that describes the task:
### Input:
separate rgb embeddings.
### Response:
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams |
def hello(self):
"""http://docs.fiesta.cc/index.html#getting-started"""
path = 'hello'
response = self.request(path, do_authentication=False)
return response | http://docs.fiesta.cc/index.html#getting-started | Below is the the instruction that describes the task:
### Input:
http://docs.fiesta.cc/index.html#getting-started
### Response:
def hello(self):
"""http://docs.fiesta.cc/index.html#getting-started"""
path = 'hello'
response = self.request(path, do_authentication=False)
return response |
def list_revisions_groups(self, url, group_id):
"""
List revisions.
List the revisions of a page. Callers must have update rights on the page in order to see page history.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - url
"""ID"""
path["url"] = url
self.logger.debug("GET /api/v1/groups/{group_id}/pages/{url}/revisions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/pages/{url}/revisions".format(**path), data=data, params=params, all_pages=True) | List revisions.
List the revisions of a page. Callers must have update rights on the page in order to see page history. | Below is the the instruction that describes the task:
### Input:
List revisions.
List the revisions of a page. Callers must have update rights on the page in order to see page history.
### Response:
def list_revisions_groups(self, url, group_id):
"""
List revisions.
List the revisions of a page. Callers must have update rights on the page in order to see page history.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - url
"""ID"""
path["url"] = url
self.logger.debug("GET /api/v1/groups/{group_id}/pages/{url}/revisions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/pages/{url}/revisions".format(**path), data=data, params=params, all_pages=True) |
def find_subdirectories(basepath):
'''
Return directories (and sub) starting from a base
'''
directories = []
for root, dirnames, filenames in os.walk(basepath):
new_directories = [d for d in dirnames if d not in directories]
directories = directories + new_directories
return directories | Return directories (and sub) starting from a base | Below is the the instruction that describes the task:
### Input:
Return directories (and sub) starting from a base
### Response:
def find_subdirectories(basepath):
'''
Return directories (and sub) starting from a base
'''
directories = []
for root, dirnames, filenames in os.walk(basepath):
new_directories = [d for d in dirnames if d not in directories]
directories = directories + new_directories
return directories |
def get(self):
"""
Get selected color.
:return: color under cursor as a (RGB, HSV, HEX) tuple
"""
x = self.coords('cross_v')[0]
y = self.coords('cross_h')[1]
xp = min(x, self.bg.width() - 1)
yp = min(y, self.bg.height() - 1)
try:
r, g, b = self.bg.get(round2(xp), round2(yp))
except ValueError:
r, g, b = self.bg.get(round2(xp), round2(yp)).split()
r, g, b = int(r), int(g), int(b)
hexa = rgb_to_hexa(r, g, b)
h = self.get_hue()
s = round2((1 - float(y) / self.winfo_height()) * 100)
v = round2(100 * float(x) / self.winfo_width())
return (r, g, b), (h, s, v), hexa | Get selected color.
:return: color under cursor as a (RGB, HSV, HEX) tuple | Below is the the instruction that describes the task:
### Input:
Get selected color.
:return: color under cursor as a (RGB, HSV, HEX) tuple
### Response:
def get(self):
"""
Get selected color.
:return: color under cursor as a (RGB, HSV, HEX) tuple
"""
x = self.coords('cross_v')[0]
y = self.coords('cross_h')[1]
xp = min(x, self.bg.width() - 1)
yp = min(y, self.bg.height() - 1)
try:
r, g, b = self.bg.get(round2(xp), round2(yp))
except ValueError:
r, g, b = self.bg.get(round2(xp), round2(yp)).split()
r, g, b = int(r), int(g), int(b)
hexa = rgb_to_hexa(r, g, b)
h = self.get_hue()
s = round2((1 - float(y) / self.winfo_height()) * 100)
v = round2(100 * float(x) / self.winfo_width())
return (r, g, b), (h, s, v), hexa |
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self))) | Tell if self is partly contained in other. | Below is the the instruction that describes the task:
### Input:
Tell if self is partly contained in other.
### Response:
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self))) |
def on_namreply(self, connection, event):
"""
Initial list of nicknames received - remove op/voice prefixes,
and send the list to the WebSocket.
"""
for nickname in event.arguments()[-1].split():
nickname = nickname.lstrip("@+")
self.nicknames[nickname] = color(nickname)
self.emit_nicknames() | Initial list of nicknames received - remove op/voice prefixes,
and send the list to the WebSocket. | Below is the the instruction that describes the task:
### Input:
Initial list of nicknames received - remove op/voice prefixes,
and send the list to the WebSocket.
### Response:
def on_namreply(self, connection, event):
"""
Initial list of nicknames received - remove op/voice prefixes,
and send the list to the WebSocket.
"""
for nickname in event.arguments()[-1].split():
nickname = nickname.lstrip("@+")
self.nicknames[nickname] = color(nickname)
self.emit_nicknames() |
def start(self, timeout=None):
"""
Start child process
:param timeout: the maximum time to wait for child process to report it has actually started.
None waits until the context manager has been entered, but update might not have been called yet.
"""
# we lazily create our process delegate (with same arguments)
if self.daemon:
daemonic = True
else:
daemonic = False
pargs = self._pargs.copy()
pargs.pop('daemonic', None)
self._process = multiprocessing.Process(**pargs)
self._process.daemon = daemonic
if self.is_alive():
# if already started, we shutdown and join before restarting
# not timeout will bock here (default join behavior).
# otherwise we simply use the same timeout.
self.shutdown(join=True, timeout=timeout) # TODO : only restart if no error (check exitcode)
self.start(timeout=timeout) # recursive to try again if needed
else:
self._process.start()
# timeout None means we want to wait and ensure it has started
# deterministic behavior, like is_alive() from multiprocess.Process is always true after start()
if self.started.wait(timeout=timeout): # blocks until we know true or false
return True
# return self._svc_address # returning the zmp url as a way to connect to the node
# CAREFUL : doesnt make sense if this node only run a one-time task...
# TODO: futures and ThreadPoolExecutor (so we dont need to manage the pool ourselves)
else:
return False | Start child process
:param timeout: the maximum time to wait for child process to report it has actually started.
None waits until the context manager has been entered, but update might not have been called yet. | Below is the the instruction that describes the task:
### Input:
Start child process
:param timeout: the maximum time to wait for child process to report it has actually started.
None waits until the context manager has been entered, but update might not have been called yet.
### Response:
def start(self, timeout=None):
"""
Start child process
:param timeout: the maximum time to wait for child process to report it has actually started.
None waits until the context manager has been entered, but update might not have been called yet.
"""
# we lazily create our process delegate (with same arguments)
if self.daemon:
daemonic = True
else:
daemonic = False
pargs = self._pargs.copy()
pargs.pop('daemonic', None)
self._process = multiprocessing.Process(**pargs)
self._process.daemon = daemonic
if self.is_alive():
# if already started, we shutdown and join before restarting
# not timeout will bock here (default join behavior).
# otherwise we simply use the same timeout.
self.shutdown(join=True, timeout=timeout) # TODO : only restart if no error (check exitcode)
self.start(timeout=timeout) # recursive to try again if needed
else:
self._process.start()
# timeout None means we want to wait and ensure it has started
# deterministic behavior, like is_alive() from multiprocess.Process is always true after start()
if self.started.wait(timeout=timeout): # blocks until we know true or false
return True
# return self._svc_address # returning the zmp url as a way to connect to the node
# CAREFUL : doesnt make sense if this node only run a one-time task...
# TODO: futures and ThreadPoolExecutor (so we dont need to manage the pool ourselves)
else:
return False |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<With ' + \
(' instance="{0}"'.format(self.instance) if self.instance else '') +\
(' list="{0}"'.format(self.list) if self.list else '') + \
(' index="{0}"'.format(self.index) if self.index else '') + \
' as="{1}"/>'.format(self.instance, self.as_) | Exports this object into a LEMS XML object | Below is the the instruction that describes the task:
### Input:
Exports this object into a LEMS XML object
### Response:
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<With ' + \
(' instance="{0}"'.format(self.instance) if self.instance else '') +\
(' list="{0}"'.format(self.list) if self.list else '') + \
(' index="{0}"'.format(self.index) if self.index else '') + \
' as="{1}"/>'.format(self.instance, self.as_) |
def get_fieldsets(self, request, obj=None):
"""
Add fieldsets of placeholders to the list of already
existing fieldsets.
"""
# some ugly business to remove freeze_date
# from the field list
general_module = {
'fields': list(self.general_fields),
'classes': ('module-general',),
}
default_fieldsets = list(self.fieldsets)
if not request.user.has_perm('pages.can_freeze'):
general_module['fields'].remove('freeze_date')
if not request.user.has_perm('pages.can_publish'):
general_module['fields'].remove('status')
default_fieldsets[0][1] = general_module
placeholder_fieldsets = []
template = get_template_from_request(request, obj)
for placeholder in get_placeholders(template):
if placeholder.name not in self.mandatory_placeholders:
placeholder_fieldsets.append(placeholder.name)
additional_fieldsets = []
# meta fields
metadata_fieldsets = [f['name'] for f in self.metadata_fields]
additional_fieldsets.append((_('Metadata'), {
'fields': metadata_fieldsets,
'classes': ('module-content', 'grp-collapse grp-closed'),
}))
additional_fieldsets.append((_('Content'), {
'fields': placeholder_fieldsets,
'classes': ('module-content',),
}))
return default_fieldsets + additional_fieldsets | Add fieldsets of placeholders to the list of already
existing fieldsets. | Below is the the instruction that describes the task:
### Input:
Add fieldsets of placeholders to the list of already
existing fieldsets.
### Response:
def get_fieldsets(self, request, obj=None):
"""
Add fieldsets of placeholders to the list of already
existing fieldsets.
"""
# some ugly business to remove freeze_date
# from the field list
general_module = {
'fields': list(self.general_fields),
'classes': ('module-general',),
}
default_fieldsets = list(self.fieldsets)
if not request.user.has_perm('pages.can_freeze'):
general_module['fields'].remove('freeze_date')
if not request.user.has_perm('pages.can_publish'):
general_module['fields'].remove('status')
default_fieldsets[0][1] = general_module
placeholder_fieldsets = []
template = get_template_from_request(request, obj)
for placeholder in get_placeholders(template):
if placeholder.name not in self.mandatory_placeholders:
placeholder_fieldsets.append(placeholder.name)
additional_fieldsets = []
# meta fields
metadata_fieldsets = [f['name'] for f in self.metadata_fields]
additional_fieldsets.append((_('Metadata'), {
'fields': metadata_fieldsets,
'classes': ('module-content', 'grp-collapse grp-closed'),
}))
additional_fieldsets.append((_('Content'), {
'fields': placeholder_fieldsets,
'classes': ('module-content',),
}))
return default_fieldsets + additional_fieldsets |
def _write_tree(self, tree: dict, output: Union[str, BinaryIO], file_mode: int=0o666) -> None:
"""
Write the model to disk.
:param tree: The data dict - will be the ASDF tree.
:param output: The output file path or a file object.
:param file_mode: The output file's permissions.
:return: None
"""
self.meta["created_at"] = get_datetime_now()
meta = self.meta.copy()
meta["environment"] = collect_environment()
final_tree = {}
final_tree.update(tree)
final_tree["meta"] = meta
isfileobj = not isinstance(output, str)
if not isfileobj:
self._source = output
path = output
output = open(output, "wb")
os.chmod(path, file_mode)
pos = 0
else:
pos = output.tell()
try:
with asdf.AsdfFile(final_tree) as file:
queue = [("", tree)]
while queue:
path, element = queue.pop()
if isinstance(element, dict):
for key, val in element.items():
queue.append((path + "/" + key, val))
elif isinstance(element, (list, tuple)):
for child in element:
queue.append((path, child))
elif isinstance(element, numpy.ndarray):
path += "/"
if path not in self._compression_prefixes:
self._log.debug("%s -> %s compression", path, self.ARRAY_COMPRESSION)
file.set_array_compression(element, self.ARRAY_COMPRESSION)
else:
self._log.debug("%s -> compression disabled", path)
file.write_to(output)
self._size = output.seek(0, os.SEEK_END) - pos
finally:
if not isfileobj:
output.close() | Write the model to disk.
:param tree: The data dict - will be the ASDF tree.
:param output: The output file path or a file object.
:param file_mode: The output file's permissions.
:return: None | Below is the the instruction that describes the task:
### Input:
Write the model to disk.
:param tree: The data dict - will be the ASDF tree.
:param output: The output file path or a file object.
:param file_mode: The output file's permissions.
:return: None
### Response:
def _write_tree(self, tree: dict, output: Union[str, BinaryIO], file_mode: int=0o666) -> None:
"""
Write the model to disk.
:param tree: The data dict - will be the ASDF tree.
:param output: The output file path or a file object.
:param file_mode: The output file's permissions.
:return: None
"""
self.meta["created_at"] = get_datetime_now()
meta = self.meta.copy()
meta["environment"] = collect_environment()
final_tree = {}
final_tree.update(tree)
final_tree["meta"] = meta
isfileobj = not isinstance(output, str)
if not isfileobj:
self._source = output
path = output
output = open(output, "wb")
os.chmod(path, file_mode)
pos = 0
else:
pos = output.tell()
try:
with asdf.AsdfFile(final_tree) as file:
queue = [("", tree)]
while queue:
path, element = queue.pop()
if isinstance(element, dict):
for key, val in element.items():
queue.append((path + "/" + key, val))
elif isinstance(element, (list, tuple)):
for child in element:
queue.append((path, child))
elif isinstance(element, numpy.ndarray):
path += "/"
if path not in self._compression_prefixes:
self._log.debug("%s -> %s compression", path, self.ARRAY_COMPRESSION)
file.set_array_compression(element, self.ARRAY_COMPRESSION)
else:
self._log.debug("%s -> compression disabled", path)
file.write_to(output)
self._size = output.seek(0, os.SEEK_END) - pos
finally:
if not isfileobj:
output.close() |
def inspect_image(name):
'''
Retrieves image information. Equivalent to running the ``docker inspect``
Docker CLI command, but will only look for image information.
.. note::
To inspect an image, it must have been pulled from a registry or built
locally. Images on a Docker registry which have not been pulled cannot
be inspected.
name
Image name or ID
**RETURN DATA**
A dictionary of image information
CLI Examples:
.. code-block:: bash
salt myminion docker.inspect_image busybox
salt myminion docker.inspect_image centos:6
salt myminion docker.inspect_image 0123456789ab
'''
ret = _client_wrapper('inspect_image', name)
for param in ('Size', 'VirtualSize'):
if param in ret:
ret['{0}_Human'.format(param)] = _size_fmt(ret[param])
return ret | Retrieves image information. Equivalent to running the ``docker inspect``
Docker CLI command, but will only look for image information.
.. note::
To inspect an image, it must have been pulled from a registry or built
locally. Images on a Docker registry which have not been pulled cannot
be inspected.
name
Image name or ID
**RETURN DATA**
A dictionary of image information
CLI Examples:
.. code-block:: bash
salt myminion docker.inspect_image busybox
salt myminion docker.inspect_image centos:6
salt myminion docker.inspect_image 0123456789ab | Below is the the instruction that describes the task:
### Input:
Retrieves image information. Equivalent to running the ``docker inspect``
Docker CLI command, but will only look for image information.
.. note::
To inspect an image, it must have been pulled from a registry or built
locally. Images on a Docker registry which have not been pulled cannot
be inspected.
name
Image name or ID
**RETURN DATA**
A dictionary of image information
CLI Examples:
.. code-block:: bash
salt myminion docker.inspect_image busybox
salt myminion docker.inspect_image centos:6
salt myminion docker.inspect_image 0123456789ab
### Response:
def inspect_image(name):
'''
Retrieves image information. Equivalent to running the ``docker inspect``
Docker CLI command, but will only look for image information.
.. note::
To inspect an image, it must have been pulled from a registry or built
locally. Images on a Docker registry which have not been pulled cannot
be inspected.
name
Image name or ID
**RETURN DATA**
A dictionary of image information
CLI Examples:
.. code-block:: bash
salt myminion docker.inspect_image busybox
salt myminion docker.inspect_image centos:6
salt myminion docker.inspect_image 0123456789ab
'''
ret = _client_wrapper('inspect_image', name)
for param in ('Size', 'VirtualSize'):
if param in ret:
ret['{0}_Human'.format(param)] = _size_fmt(ret[param])
return ret |
def refresh(self):
"""Force a refresh of the Elasticsearch index
"""
self.client.indices.refresh(index=self.model.search_objects.mapping.index) | Force a refresh of the Elasticsearch index | Below is the the instruction that describes the task:
### Input:
Force a refresh of the Elasticsearch index
### Response:
def refresh(self):
"""Force a refresh of the Elasticsearch index
"""
self.client.indices.refresh(index=self.model.search_objects.mapping.index) |
def mean_imls(self):
"""
Compute the mean IMLs (Intensity Measure Level)
for the given vulnerability function.
:param vulnerability_function: the vulnerability function where
the IMLs (Intensity Measure Level) are taken from.
:type vuln_function:
:py:class:`openquake.risklib.vulnerability_function.\
VulnerabilityFunction`
"""
return numpy.array(
[max(0, self.imls[0] - (self.imls[1] - self.imls[0]) / 2.)] +
[numpy.mean(pair) for pair in pairwise(self.imls)] +
[self.imls[-1] + (self.imls[-1] - self.imls[-2]) / 2.]) | Compute the mean IMLs (Intensity Measure Level)
for the given vulnerability function.
:param vulnerability_function: the vulnerability function where
the IMLs (Intensity Measure Level) are taken from.
:type vuln_function:
:py:class:`openquake.risklib.vulnerability_function.\
VulnerabilityFunction` | Below is the the instruction that describes the task:
### Input:
Compute the mean IMLs (Intensity Measure Level)
for the given vulnerability function.
:param vulnerability_function: the vulnerability function where
the IMLs (Intensity Measure Level) are taken from.
:type vuln_function:
:py:class:`openquake.risklib.vulnerability_function.\
VulnerabilityFunction`
### Response:
def mean_imls(self):
"""
Compute the mean IMLs (Intensity Measure Level)
for the given vulnerability function.
:param vulnerability_function: the vulnerability function where
the IMLs (Intensity Measure Level) are taken from.
:type vuln_function:
:py:class:`openquake.risklib.vulnerability_function.\
VulnerabilityFunction`
"""
return numpy.array(
[max(0, self.imls[0] - (self.imls[1] - self.imls[0]) / 2.)] +
[numpy.mean(pair) for pair in pairwise(self.imls)] +
[self.imls[-1] + (self.imls[-1] - self.imls[-2]) / 2.]) |
def fcsp_sa_fcsp_auth_proto_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
proto = ET.SubElement(auth, "proto")
group = ET.SubElement(proto, "group")
group.text = kwargs.pop('group')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcsp_sa_fcsp_auth_proto_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
proto = ET.SubElement(auth, "proto")
group = ET.SubElement(proto, "group")
group.text = kwargs.pop('group')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def require_openid(f):
"""Require user to be logged in."""
@wraps(f)
def decorator(*args, **kwargs):
if g.user is None:
next_url = url_for("login") + "?next=" + request.url
return redirect(next_url)
else:
return f(*args, **kwargs)
return decorator | Require user to be logged in. | Below is the the instruction that describes the task:
### Input:
Require user to be logged in.
### Response:
def require_openid(f):
"""Require user to be logged in."""
@wraps(f)
def decorator(*args, **kwargs):
if g.user is None:
next_url = url_for("login") + "?next=" + request.url
return redirect(next_url)
else:
return f(*args, **kwargs)
return decorator |
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else 'command'
if raw_python:
command = {'raw_command': function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type='RequestResponse',
)
if 'LogResult' in response:
if no_color:
print(base64.b64decode(response['LogResult']))
else:
decoded = base64.b64decode(response['LogResult']).decode()
formatted = self.format_invoke_command(decoded)
colorized = self.colorize_invoke_command(formatted)
print(colorized)
else:
print(response)
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if 'FunctionError' in response:
raise ClickException(
"{} error occurred while invoking command.".format(response['FunctionError'])
) | Invoke a remote function. | Below is the the instruction that describes the task:
### Input:
Invoke a remote function.
### Response:
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else 'command'
if raw_python:
command = {'raw_command': function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type='RequestResponse',
)
if 'LogResult' in response:
if no_color:
print(base64.b64decode(response['LogResult']))
else:
decoded = base64.b64decode(response['LogResult']).decode()
formatted = self.format_invoke_command(decoded)
colorized = self.colorize_invoke_command(formatted)
print(colorized)
else:
print(response)
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if 'FunctionError' in response:
raise ClickException(
"{} error occurred while invoking command.".format(response['FunctionError'])
) |
def run_step(self, is_shell):
"""Run a command.
Runs a program or executable. If is_shell is True, executes the command
through the shell.
Args:
is_shell: bool. defaults False. Set to true to execute cmd through
the default shell.
"""
assert is_shell is not None, ("is_shell param must exist for CmdStep.")
# why? If shell is True, it is recommended to pass args as a string
# rather than as a sequence.
if is_shell:
args = self.cmd_text
else:
args = shlex.split(self.cmd_text)
if self.is_save:
completed_process = subprocess.run(args,
cwd=self.cwd,
shell=is_shell,
# capture_output=True,only>py3.7
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# text=True, only>=py3.7,
universal_newlines=True)
self.context['cmdOut'] = {
'returncode': completed_process.returncode,
'stdout': completed_process.stdout,
'stderr': completed_process.stderr
}
# when capture is true, output doesn't write to stdout
self.logger.info(f"stdout: {completed_process.stdout}")
if completed_process.stderr:
self.logger.error(f"stderr: {completed_process.stderr}")
# don't swallow the error, because it's the Step swallow decorator
# responsibility to decide to ignore or not.
completed_process.check_returncode()
else:
# check=True throws CalledProcessError if exit code != 0
subprocess.run(args, shell=is_shell, check=True, cwd=self.cwd) | Run a command.
Runs a program or executable. If is_shell is True, executes the command
through the shell.
Args:
is_shell: bool. defaults False. Set to true to execute cmd through
the default shell. | Below is the the instruction that describes the task:
### Input:
Run a command.
Runs a program or executable. If is_shell is True, executes the command
through the shell.
Args:
is_shell: bool. defaults False. Set to true to execute cmd through
the default shell.
### Response:
def run_step(self, is_shell):
"""Run a command.
Runs a program or executable. If is_shell is True, executes the command
through the shell.
Args:
is_shell: bool. defaults False. Set to true to execute cmd through
the default shell.
"""
assert is_shell is not None, ("is_shell param must exist for CmdStep.")
# why? If shell is True, it is recommended to pass args as a string
# rather than as a sequence.
if is_shell:
args = self.cmd_text
else:
args = shlex.split(self.cmd_text)
if self.is_save:
completed_process = subprocess.run(args,
cwd=self.cwd,
shell=is_shell,
# capture_output=True,only>py3.7
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# text=True, only>=py3.7,
universal_newlines=True)
self.context['cmdOut'] = {
'returncode': completed_process.returncode,
'stdout': completed_process.stdout,
'stderr': completed_process.stderr
}
# when capture is true, output doesn't write to stdout
self.logger.info(f"stdout: {completed_process.stdout}")
if completed_process.stderr:
self.logger.error(f"stderr: {completed_process.stderr}")
# don't swallow the error, because it's the Step swallow decorator
# responsibility to decide to ignore or not.
completed_process.check_returncode()
else:
# check=True throws CalledProcessError if exit code != 0
subprocess.run(args, shell=is_shell, check=True, cwd=self.cwd) |
def connexity(self):
"""
A boolean matrix, m[i, j] == True if there is a relation term(i) -> term(j)
:return: a np.matrix (len(dictionary), len(dictionary)) of boolean
"""
return np.matrix(sum(self.relations.values()).todense(), dtype=bool) | A boolean matrix, m[i, j] == True if there is a relation term(i) -> term(j)
:return: a np.matrix (len(dictionary), len(dictionary)) of boolean | Below is the the instruction that describes the task:
### Input:
A boolean matrix, m[i, j] == True if there is a relation term(i) -> term(j)
:return: a np.matrix (len(dictionary), len(dictionary)) of boolean
### Response:
def connexity(self):
"""
A boolean matrix, m[i, j] == True if there is a relation term(i) -> term(j)
:return: a np.matrix (len(dictionary), len(dictionary)) of boolean
"""
return np.matrix(sum(self.relations.values()).todense(), dtype=bool) |
def public_key_to_connection_id(self, public_key):
"""
Get stored connection id for a public key.
"""
with self._connections_lock:
for connection_id, connection_info in self._connections.items():
if connection_info.public_key == public_key:
return connection_id
return None | Get stored connection id for a public key. | Below is the the instruction that describes the task:
### Input:
Get stored connection id for a public key.
### Response:
def public_key_to_connection_id(self, public_key):
"""
Get stored connection id for a public key.
"""
with self._connections_lock:
for connection_id, connection_info in self._connections.items():
if connection_info.public_key == public_key:
return connection_id
return None |
async def get_txn(self, seq_no: int) -> str:
"""
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
"""
LOGGER.debug('BaseAnchor.get_txn >>> seq_no: %s', seq_no)
rv_json = json.dumps({})
req_json = await ledger.build_get_txn_request(self.did, None, seq_no)
resp = json.loads(await self._submit(req_json))
rv_json = self.pool.protocol.txn2data(resp)
LOGGER.debug('BaseAnchor.get_txn <<< %s', rv_json)
return rv_json | Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match | Below is the the instruction that describes the task:
### Input:
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
### Response:
async def get_txn(self, seq_no: int) -> str:
"""
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
"""
LOGGER.debug('BaseAnchor.get_txn >>> seq_no: %s', seq_no)
rv_json = json.dumps({})
req_json = await ledger.build_get_txn_request(self.did, None, seq_no)
resp = json.loads(await self._submit(req_json))
rv_json = self.pool.protocol.txn2data(resp)
LOGGER.debug('BaseAnchor.get_txn <<< %s', rv_json)
return rv_json |
def decode_complementarity(comp, control):
'''
# comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2"
'''
try:
res = regex.match(comp).groups()
except:
raise Exception("Unable to parse complementarity condition '{}'".format(comp))
res = [r.strip() for r in res]
if res[1] != control:
msg = "Complementarity condition '{}' incorrect. Expected {} instead of {}.".format(comp, control, res[1])
raise Exception(msg)
return [res[0], res[2]] | # comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2" | Below is the the instruction that describes the task:
### Input:
# comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2"
### Response:
def decode_complementarity(comp, control):
'''
# comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2"
'''
try:
res = regex.match(comp).groups()
except:
raise Exception("Unable to parse complementarity condition '{}'".format(comp))
res = [r.strip() for r in res]
if res[1] != control:
msg = "Complementarity condition '{}' incorrect. Expected {} instead of {}.".format(comp, control, res[1])
raise Exception(msg)
return [res[0], res[2]] |
def press_enter(multiple=False, silent=False):
"""Return a generator function which yields every time the user presses
return."""
def f():
try:
while True:
if silent:
yield input()
else:
sys.stderr.write("<press enter> ")
sys.stderr.flush()
yield input()
if not multiple:
break
except (EOFError, KeyboardInterrupt):
# User Ctrl+D or Ctrl+C'd
if not silent:
# Prevents the user's terminal getting clobbered
sys.stderr.write("\n")
sys.stderr.flush()
return
return f | Return a generator function which yields every time the user presses
return. | Below is the the instruction that describes the task:
### Input:
Return a generator function which yields every time the user presses
return.
### Response:
def press_enter(multiple=False, silent=False):
"""Return a generator function which yields every time the user presses
return."""
def f():
try:
while True:
if silent:
yield input()
else:
sys.stderr.write("<press enter> ")
sys.stderr.flush()
yield input()
if not multiple:
break
except (EOFError, KeyboardInterrupt):
# User Ctrl+D or Ctrl+C'd
if not silent:
# Prevents the user's terminal getting clobbered
sys.stderr.write("\n")
sys.stderr.flush()
return
return f |
def error_response(self, kwargs_lens, kwargs_ps):
"""
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
"""
model_error = self.error_map(kwargs_lens, kwargs_ps)
error_map_1d = self.ImageNumerics.image2array(model_error)
C_D_response = self.ImageNumerics.C_D_response + error_map_1d
return C_D_response, model_error | returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties) | Below is the the instruction that describes the task:
### Input:
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
### Response:
def error_response(self, kwargs_lens, kwargs_ps):
"""
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
"""
model_error = self.error_map(kwargs_lens, kwargs_ps)
error_map_1d = self.ImageNumerics.image2array(model_error)
C_D_response = self.ImageNumerics.C_D_response + error_map_1d
return C_D_response, model_error |
def eval_table(tbl, expression, vm='python', blen=None, storage=None,
create='array', vm_kwargs=None, **kwargs):
"""Evaluate `expression` against columns of a table."""
# setup
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
length = len(columns[0])
if vm_kwargs is None:
vm_kwargs = dict()
# setup vm
if vm == 'numexpr':
import numexpr
evaluate = numexpr.evaluate
elif vm == 'python':
# noinspection PyUnusedLocal
def evaluate(expr, local_dict=None, **kw):
# takes no keyword arguments
return eval(expr, dict(), local_dict)
else:
raise ValueError('expected vm either "numexpr" or "python"')
# compile expression and get required columns
variables = _get_expression_variables(expression, vm)
required_columns = {v: columns[names.index(v)] for v in variables}
# determine block size for evaluation
blen = _util.get_blen_table(required_columns, blen=blen)
# build output
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
blocals = {v: c[i:j] for v, c in required_columns.items()}
res = evaluate(expression, local_dict=blocals, **vm_kwargs)
if out is None:
out = getattr(storage, create)(res, expectedlen=length, **kwargs)
else:
out.append(res)
return out | Evaluate `expression` against columns of a table. | Below is the the instruction that describes the task:
### Input:
Evaluate `expression` against columns of a table.
### Response:
def eval_table(tbl, expression, vm='python', blen=None, storage=None,
create='array', vm_kwargs=None, **kwargs):
"""Evaluate `expression` against columns of a table."""
# setup
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
length = len(columns[0])
if vm_kwargs is None:
vm_kwargs = dict()
# setup vm
if vm == 'numexpr':
import numexpr
evaluate = numexpr.evaluate
elif vm == 'python':
# noinspection PyUnusedLocal
def evaluate(expr, local_dict=None, **kw):
# takes no keyword arguments
return eval(expr, dict(), local_dict)
else:
raise ValueError('expected vm either "numexpr" or "python"')
# compile expression and get required columns
variables = _get_expression_variables(expression, vm)
required_columns = {v: columns[names.index(v)] for v in variables}
# determine block size for evaluation
blen = _util.get_blen_table(required_columns, blen=blen)
# build output
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
blocals = {v: c[i:j] for v, c in required_columns.items()}
res = evaluate(expression, local_dict=blocals, **vm_kwargs)
if out is None:
out = getattr(storage, create)(res, expectedlen=length, **kwargs)
else:
out.append(res)
return out |
def set_model(self, model):
'''
Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz
'''
self.model = model
if self.model:
self.model.set_notification_handler(self)
self.handle_stage_changed(model)
return self | Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz | Below is the the instruction that describes the task:
### Input:
Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz
### Response:
def set_model(self, model):
'''
Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz
'''
self.model = model
if self.model:
self.model.set_notification_handler(self)
self.handle_stage_changed(model)
return self |
def bitstring_probs_to_z_moments(p):
"""
Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array
"""
zmat = np.array([[1, 1],
[1, -1]])
return _apply_local_transforms(p, (zmat for _ in range(p.ndim))) | Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array | Below is the the instruction that describes the task:
### Input:
Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array
### Response:
def bitstring_probs_to_z_moments(p):
"""
Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array
"""
zmat = np.array([[1, 1],
[1, -1]])
return _apply_local_transforms(p, (zmat for _ in range(p.ndim))) |
def parse_deps(orig_doc, options={}):
"""Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs.
"""
doc = Doc(orig_doc.vocab).from_bytes(orig_doc.to_bytes())
if not doc.is_parsed:
user_warning(Warnings.W005)
if options.get("collapse_phrases", False):
with doc.retokenize() as retokenizer:
for np in list(doc.noun_chunks):
attrs = {
"tag": np.root.tag_,
"lemma": np.root.lemma_,
"ent_type": np.root.ent_type_,
}
retokenizer.merge(np, attrs=attrs)
if options.get("collapse_punct", True):
spans = []
for word in doc[:-1]:
if word.is_punct or not word.nbor(1).is_punct:
continue
start = word.i
end = word.i + 1
while end < len(doc) and doc[end].is_punct:
end += 1
span = doc[start:end]
spans.append((span, word.tag_, word.lemma_, word.ent_type_))
with doc.retokenize() as retokenizer:
for span, tag, lemma, ent_type in spans:
attrs = {"tag": tag, "lemma": lemma, "ent_type": ent_type}
retokenizer.merge(span, attrs=attrs)
if options.get("fine_grained"):
words = [{"text": w.text, "tag": w.tag_} for w in doc]
else:
words = [{"text": w.text, "tag": w.pos_} for w in doc]
arcs = []
for word in doc:
if word.i < word.head.i:
arcs.append(
{"start": word.i, "end": word.head.i, "label": word.dep_, "dir": "left"}
)
elif word.i > word.head.i:
arcs.append(
{
"start": word.head.i,
"end": word.i,
"label": word.dep_,
"dir": "right",
}
)
return {"words": words, "arcs": arcs, "settings": get_doc_settings(orig_doc)} | Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs. | Below is the the instruction that describes the task:
### Input:
Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs.
### Response:
def parse_deps(orig_doc, options={}):
"""Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs.
"""
doc = Doc(orig_doc.vocab).from_bytes(orig_doc.to_bytes())
if not doc.is_parsed:
user_warning(Warnings.W005)
if options.get("collapse_phrases", False):
with doc.retokenize() as retokenizer:
for np in list(doc.noun_chunks):
attrs = {
"tag": np.root.tag_,
"lemma": np.root.lemma_,
"ent_type": np.root.ent_type_,
}
retokenizer.merge(np, attrs=attrs)
if options.get("collapse_punct", True):
spans = []
for word in doc[:-1]:
if word.is_punct or not word.nbor(1).is_punct:
continue
start = word.i
end = word.i + 1
while end < len(doc) and doc[end].is_punct:
end += 1
span = doc[start:end]
spans.append((span, word.tag_, word.lemma_, word.ent_type_))
with doc.retokenize() as retokenizer:
for span, tag, lemma, ent_type in spans:
attrs = {"tag": tag, "lemma": lemma, "ent_type": ent_type}
retokenizer.merge(span, attrs=attrs)
if options.get("fine_grained"):
words = [{"text": w.text, "tag": w.tag_} for w in doc]
else:
words = [{"text": w.text, "tag": w.pos_} for w in doc]
arcs = []
for word in doc:
if word.i < word.head.i:
arcs.append(
{"start": word.i, "end": word.head.i, "label": word.dep_, "dir": "left"}
)
elif word.i > word.head.i:
arcs.append(
{
"start": word.head.i,
"end": word.i,
"label": word.dep_,
"dir": "right",
}
)
return {"words": words, "arcs": arcs, "settings": get_doc_settings(orig_doc)} |
async def create_guild(self, name, region=None, icon=None):
"""|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
Raises
------
HTTPException
Guild creation failed.
InvalidArgument
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
if region is None:
region = VoiceRegion.us_west.value
else:
region = region.value
data = await self.http.create_guild(name, region, icon)
return Guild(data=data, state=self._connection) | |coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
Raises
------
HTTPException
Guild creation failed.
InvalidArgument
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache. | Below is the the instruction that describes the task:
### Input:
|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
Raises
------
HTTPException
Guild creation failed.
InvalidArgument
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
### Response:
async def create_guild(self, name, region=None, icon=None):
"""|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
Raises
------
HTTPException
Guild creation failed.
InvalidArgument
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
if region is None:
region = VoiceRegion.us_west.value
else:
region = region.value
data = await self.http.create_guild(name, region, icon)
return Guild(data=data, state=self._connection) |
def get_object_type_by_name(object_type_name):
"""
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown"""
if object_type_name == b"commit":
from . import commit
return commit.Commit
elif object_type_name == b"tag":
from . import tag
return tag.TagObject
elif object_type_name == b"blob":
from . import blob
return blob.Blob
elif object_type_name == b"tree":
from . import tree
return tree.Tree
else:
raise ValueError("Cannot handle unknown object type: %s" % object_type_name) | :return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown | Below is the the instruction that describes the task:
### Input:
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown
### Response:
def get_object_type_by_name(object_type_name):
"""
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown"""
if object_type_name == b"commit":
from . import commit
return commit.Commit
elif object_type_name == b"tag":
from . import tag
return tag.TagObject
elif object_type_name == b"blob":
from . import blob
return blob.Blob
elif object_type_name == b"tree":
from . import tree
return tree.Tree
else:
raise ValueError("Cannot handle unknown object type: %s" % object_type_name) |
def check_order_triggers(self, current_price):
"""
Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned.
"""
if self.triggered:
return (self.stop_reached, self.limit_reached, False)
stop_reached = False
limit_reached = False
sl_stop_reached = False
order_type = 0
if self.amount > 0:
order_type |= BUY
else:
order_type |= SELL
if self.stop is not None:
order_type |= STOP
if self.limit is not None:
order_type |= LIMIT
if order_type == BUY | STOP | LIMIT:
if current_price >= self.stop:
sl_stop_reached = True
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | STOP | LIMIT:
if current_price <= self.stop:
sl_stop_reached = True
if current_price >= self.limit:
limit_reached = True
elif order_type == BUY | STOP:
if current_price >= self.stop:
stop_reached = True
elif order_type == SELL | STOP:
if current_price <= self.stop:
stop_reached = True
elif order_type == BUY | LIMIT:
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | LIMIT:
# This is a SELL LIMIT order
if current_price >= self.limit:
limit_reached = True
return (stop_reached, limit_reached, sl_stop_reached) | Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned. | Below is the the instruction that describes the task:
### Input:
Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned.
### Response:
def check_order_triggers(self, current_price):
"""
Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned.
"""
if self.triggered:
return (self.stop_reached, self.limit_reached, False)
stop_reached = False
limit_reached = False
sl_stop_reached = False
order_type = 0
if self.amount > 0:
order_type |= BUY
else:
order_type |= SELL
if self.stop is not None:
order_type |= STOP
if self.limit is not None:
order_type |= LIMIT
if order_type == BUY | STOP | LIMIT:
if current_price >= self.stop:
sl_stop_reached = True
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | STOP | LIMIT:
if current_price <= self.stop:
sl_stop_reached = True
if current_price >= self.limit:
limit_reached = True
elif order_type == BUY | STOP:
if current_price >= self.stop:
stop_reached = True
elif order_type == SELL | STOP:
if current_price <= self.stop:
stop_reached = True
elif order_type == BUY | LIMIT:
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | LIMIT:
# This is a SELL LIMIT order
if current_price >= self.limit:
limit_reached = True
return (stop_reached, limit_reached, sl_stop_reached) |
def _get_prelim_dependencies(command_template, all_templates):
""" Given a command_template determine which other templates it
depends on. This should not be used as the be-all end-all of
dependencies and before calling each command, ensure that it's
requirements are met.
"""
deps = []
for input in command_template.input_parts:
if '.' not in input.alias:
continue
for template in all_templates:
for output in template.output_parts:
if input.fuzzy_match(output):
deps.append(template)
break
return list(set(deps)) | Given a command_template determine which other templates it
depends on. This should not be used as the be-all end-all of
dependencies and before calling each command, ensure that it's
requirements are met. | Below is the the instruction that describes the task:
### Input:
Given a command_template determine which other templates it
depends on. This should not be used as the be-all end-all of
dependencies and before calling each command, ensure that it's
requirements are met.
### Response:
def _get_prelim_dependencies(command_template, all_templates):
""" Given a command_template determine which other templates it
depends on. This should not be used as the be-all end-all of
dependencies and before calling each command, ensure that it's
requirements are met.
"""
deps = []
for input in command_template.input_parts:
if '.' not in input.alias:
continue
for template in all_templates:
for output in template.output_parts:
if input.fuzzy_match(output):
deps.append(template)
break
return list(set(deps)) |
def get_err_msg(self):
"""
Return a multi-line error message for being printed, in the following
format. The text in angle brackets refers to the same-named properties
of the exception instance:
::
Syntax error:<file>:<lineno>: <msg>
<context - MOF line>
<context - position indicator line>
Returns:
:term:`string`: Multi-line error message.
"""
ret_str = 'Syntax error:'
disp_file = 'NoFile' if self.file is None else self.file
if self.lineno is not None:
ret_str += _format("{0}:{1}:{2}",
disp_file, self.lineno, self.column)
if self.msg:
ret_str += _format(" {0}", self.msg)
if self.context is not None:
ret_str += '\n'
ret_str += '\n'.join(self.context)
return ret_str | Return a multi-line error message for being printed, in the following
format. The text in angle brackets refers to the same-named properties
of the exception instance:
::
Syntax error:<file>:<lineno>: <msg>
<context - MOF line>
<context - position indicator line>
Returns:
:term:`string`: Multi-line error message. | Below is the the instruction that describes the task:
### Input:
Return a multi-line error message for being printed, in the following
format. The text in angle brackets refers to the same-named properties
of the exception instance:
::
Syntax error:<file>:<lineno>: <msg>
<context - MOF line>
<context - position indicator line>
Returns:
:term:`string`: Multi-line error message.
### Response:
def get_err_msg(self):
"""
Return a multi-line error message for being printed, in the following
format. The text in angle brackets refers to the same-named properties
of the exception instance:
::
Syntax error:<file>:<lineno>: <msg>
<context - MOF line>
<context - position indicator line>
Returns:
:term:`string`: Multi-line error message.
"""
ret_str = 'Syntax error:'
disp_file = 'NoFile' if self.file is None else self.file
if self.lineno is not None:
ret_str += _format("{0}:{1}:{2}",
disp_file, self.lineno, self.column)
if self.msg:
ret_str += _format(" {0}", self.msg)
if self.context is not None:
ret_str += '\n'
ret_str += '\n'.join(self.context)
return ret_str |
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(func, %s)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = (None,) + defaults
return dec | decorator(caller) converts a caller function into a decorator | Below is the the instruction that describes the task:
### Input:
decorator(caller) converts a caller function into a decorator
### Response:
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(func, %s)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = (None,) + defaults
return dec |
def positionMinError(G, vmini, extension=0.0):
"""
Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the
smallest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)
return _astrometricErrorFactors['alphaStar'].min()*parallaxError, \
_astrometricErrorFactors['delta'].min()*parallaxError | Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the
smallest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond. | Below is the the instruction that describes the task:
### Input:
Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the
smallest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.
### Response:
def positionMinError(G, vmini, extension=0.0):
"""
Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the
smallest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)
return _astrometricErrorFactors['alphaStar'].min()*parallaxError, \
_astrometricErrorFactors['delta'].min()*parallaxError |
def docker_list(registry_pass):
# type: (str) -> None
""" List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
"""
registry = conf.get('docker.registry', None)
if registry is None:
log.err("You must define docker.registry conf variable to list images")
sys.exit(-1)
registry_user = conf.get('docker.registry_user', None)
if registry_user is None:
registry_user = click.prompt("Username")
rc = client.RegistryClient(registry, registry_user, registry_pass)
images = {x: rc.list_tags(x) for x in rc.list_images()}
shell.cprint("<32>Images in <34>{} <32>registry:", registry)
for image, tags in images.items():
shell.cprint(' <92>{}', image)
for tag in tags:
shell.cprint(' <90>{}:<35>{}', image, tag) | List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password. | Below is the the instruction that describes the task:
### Input:
List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
### Response:
def docker_list(registry_pass):
# type: (str) -> None
""" List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
"""
registry = conf.get('docker.registry', None)
if registry is None:
log.err("You must define docker.registry conf variable to list images")
sys.exit(-1)
registry_user = conf.get('docker.registry_user', None)
if registry_user is None:
registry_user = click.prompt("Username")
rc = client.RegistryClient(registry, registry_user, registry_pass)
images = {x: rc.list_tags(x) for x in rc.list_images()}
shell.cprint("<32>Images in <34>{} <32>registry:", registry)
for image, tags in images.items():
shell.cprint(' <92>{}', image)
for tag in tags:
shell.cprint(' <90>{}:<35>{}', image, tag) |
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone() | Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable | Below is the the instruction that describes the task:
### Input:
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
### Response:
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone() |
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex) | Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list) | Below is the the instruction that describes the task:
### Input:
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
### Response:
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex) |
def set_settings(key, value):
"""Set Hitman internal settings."""
with Database("settings") as settings:
if value in ['0', 'false', 'no', 'off', 'False']:
del settings[key]
print("Disabled setting")
else:
print(value)
settings[key] = value
print("Setting saved") | Set Hitman internal settings. | Below is the the instruction that describes the task:
### Input:
Set Hitman internal settings.
### Response:
def set_settings(key, value):
"""Set Hitman internal settings."""
with Database("settings") as settings:
if value in ['0', 'false', 'no', 'off', 'False']:
del settings[key]
print("Disabled setting")
else:
print(value)
settings[key] = value
print("Setting saved") |
def convert_video(in_file, out_file, print_cmd=False, pre_options='',
**kwargs):
"""Convert a video with ffmpeg.
This provides a general api to ffmpeg, the executed command is::
`ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`
Options(kwargs) are mapped to ffmpeg commands with the following rules:
- key=val: "-key val"
- key=True: "-key"
- key=False: ""
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
pre_options (str): Options appears before "-i <in_file>".
print_cmd (bool): Whether to print the final ffmpeg command.
"""
options = []
for k, v in kwargs.items():
if isinstance(v, bool):
if v:
options.append('-{}'.format(k))
elif k == 'log_level':
assert v in [
'quiet', 'panic', 'fatal', 'error', 'warning', 'info',
'verbose', 'debug', 'trace'
]
options.append('-loglevel {}'.format(v))
else:
options.append('-{} {}'.format(k, v))
cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file,
' '.join(options), out_file)
if print_cmd:
print(cmd)
subprocess.call(cmd, shell=True) | Convert a video with ffmpeg.
This provides a general api to ffmpeg, the executed command is::
`ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`
Options(kwargs) are mapped to ffmpeg commands with the following rules:
- key=val: "-key val"
- key=True: "-key"
- key=False: ""
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
pre_options (str): Options appears before "-i <in_file>".
print_cmd (bool): Whether to print the final ffmpeg command. | Below is the the instruction that describes the task:
### Input:
Convert a video with ffmpeg.
This provides a general api to ffmpeg, the executed command is::
`ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`
Options(kwargs) are mapped to ffmpeg commands with the following rules:
- key=val: "-key val"
- key=True: "-key"
- key=False: ""
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
pre_options (str): Options appears before "-i <in_file>".
print_cmd (bool): Whether to print the final ffmpeg command.
### Response:
def convert_video(in_file, out_file, print_cmd=False, pre_options='',
**kwargs):
"""Convert a video with ffmpeg.
This provides a general api to ffmpeg, the executed command is::
`ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`
Options(kwargs) are mapped to ffmpeg commands with the following rules:
- key=val: "-key val"
- key=True: "-key"
- key=False: ""
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
pre_options (str): Options appears before "-i <in_file>".
print_cmd (bool): Whether to print the final ffmpeg command.
"""
options = []
for k, v in kwargs.items():
if isinstance(v, bool):
if v:
options.append('-{}'.format(k))
elif k == 'log_level':
assert v in [
'quiet', 'panic', 'fatal', 'error', 'warning', 'info',
'verbose', 'debug', 'trace'
]
options.append('-loglevel {}'.format(v))
else:
options.append('-{} {}'.format(k, v))
cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file,
' '.join(options), out_file)
if print_cmd:
print(cmd)
subprocess.call(cmd, shell=True) |
def make_shared_result(result, key, trajectory, new_class=None):
"""Turns an ordinary data item into a shared one.
Removes the old result from the trajectory and replaces it.
Empties the given result.
:param result: The result containing ordinary data
:param key: Name of ordinary data item
:param trajectory: Trajectory container
:param new_class:
Class of new shared data item.
Leave `None` for automatic detection.
:return: The `result`
"""
data = result.f_get(key)
if new_class is None:
if isinstance(data, ObjectTable):
new_class = SharedTable
elif isinstance(data, pd.DataFrame):
new_class = SharedPandasFrame
elif isinstance(data, (tuple, list)):
new_class = SharedArray
elif isinstance(data, (np.ndarray, np.matrix)):
new_class = SharedCArray
else:
raise RuntimeError('Your data `%s` is not understood.' % key)
shared_data = new_class(result.f_translate_key(key), result, trajectory=trajectory)
result[key] = shared_data
shared_data._request_data('make_shared')
return result | Turns an ordinary data item into a shared one.
Removes the old result from the trajectory and replaces it.
Empties the given result.
:param result: The result containing ordinary data
:param key: Name of ordinary data item
:param trajectory: Trajectory container
:param new_class:
Class of new shared data item.
Leave `None` for automatic detection.
:return: The `result` | Below is the the instruction that describes the task:
### Input:
Turns an ordinary data item into a shared one.
Removes the old result from the trajectory and replaces it.
Empties the given result.
:param result: The result containing ordinary data
:param key: Name of ordinary data item
:param trajectory: Trajectory container
:param new_class:
Class of new shared data item.
Leave `None` for automatic detection.
:return: The `result`
### Response:
def make_shared_result(result, key, trajectory, new_class=None):
"""Turns an ordinary data item into a shared one.
Removes the old result from the trajectory and replaces it.
Empties the given result.
:param result: The result containing ordinary data
:param key: Name of ordinary data item
:param trajectory: Trajectory container
:param new_class:
Class of new shared data item.
Leave `None` for automatic detection.
:return: The `result`
"""
data = result.f_get(key)
if new_class is None:
if isinstance(data, ObjectTable):
new_class = SharedTable
elif isinstance(data, pd.DataFrame):
new_class = SharedPandasFrame
elif isinstance(data, (tuple, list)):
new_class = SharedArray
elif isinstance(data, (np.ndarray, np.matrix)):
new_class = SharedCArray
else:
raise RuntimeError('Your data `%s` is not understood.' % key)
shared_data = new_class(result.f_translate_key(key), result, trajectory=trajectory)
result[key] = shared_data
shared_data._request_data('make_shared')
return result |
def linkHasRel(link_attrs, target_rel):
"""Does this link have target_rel as a relationship?"""
# XXX: TESTME
rel_attr = link_attrs.get('rel')
return rel_attr and relMatches(rel_attr, target_rel) | Does this link have target_rel as a relationship? | Below is the the instruction that describes the task:
### Input:
Does this link have target_rel as a relationship?
### Response:
def linkHasRel(link_attrs, target_rel):
"""Does this link have target_rel as a relationship?"""
# XXX: TESTME
rel_attr = link_attrs.get('rel')
return rel_attr and relMatches(rel_attr, target_rel) |
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time | Return watering_time from zone. | Below is the the instruction that describes the task:
### Input:
Return watering_time from zone.
### Response:
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time |
def _get_calculated_value(self, value):
"""
Get's the final value of the field and runs the lambda functions
recursively until a final value is derived.
:param value: The value to calculate/expand
:return: The final value
"""
if isinstance(value, types.LambdaType):
expanded_value = value(self.structure)
return self._get_calculated_value(expanded_value)
else:
# perform one final parsing of the value in case lambda value
# returned a different type
return self._parse_value(value) | Get's the final value of the field and runs the lambda functions
recursively until a final value is derived.
:param value: The value to calculate/expand
:return: The final value | Below is the the instruction that describes the task:
### Input:
Get's the final value of the field and runs the lambda functions
recursively until a final value is derived.
:param value: The value to calculate/expand
:return: The final value
### Response:
def _get_calculated_value(self, value):
"""
Get's the final value of the field and runs the lambda functions
recursively until a final value is derived.
:param value: The value to calculate/expand
:return: The final value
"""
if isinstance(value, types.LambdaType):
expanded_value = value(self.structure)
return self._get_calculated_value(expanded_value)
else:
# perform one final parsing of the value in case lambda value
# returned a different type
return self._parse_value(value) |
def decode_ay(ay):
"""Convert binary blob from DBus queries to strings."""
if ay is None:
return ''
elif isinstance(ay, str):
return ay
elif isinstance(ay, bytes):
return ay.decode('utf-8')
else:
# dbus.Array([dbus.Byte]) or any similar sequence type:
return bytearray(ay).rstrip(bytearray((0,))).decode('utf-8') | Convert binary blob from DBus queries to strings. | Below is the the instruction that describes the task:
### Input:
Convert binary blob from DBus queries to strings.
### Response:
def decode_ay(ay):
"""Convert binary blob from DBus queries to strings."""
if ay is None:
return ''
elif isinstance(ay, str):
return ay
elif isinstance(ay, bytes):
return ay.decode('utf-8')
else:
# dbus.Array([dbus.Byte]) or any similar sequence type:
return bytearray(ay).rstrip(bytearray((0,))).decode('utf-8') |
def _set_interface_dynamic_bypass_name_prefix(self, v, load=False):
"""
Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_dynamic_bypass_name_prefix must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__interface_dynamic_bypass_name_prefix = t
if hasattr(self, '_set'):
self._set() | Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly.
### Response:
def _set_interface_dynamic_bypass_name_prefix(self, v, load=False):
"""
Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_dynamic_bypass_name_prefix must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__interface_dynamic_bypass_name_prefix = t
if hasattr(self, '_set'):
self._set() |
def parse_sidebar(self, user_page):
"""Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError`
"""
user_info = {}
# if MAL says the series doesn't exist, raise an InvalidUserError.
error_tag = user_page.find(u'div', {u'class': u'badresult'})
if error_tag:
raise InvalidUserError(self.username)
try:
username_tag = user_page.find(u'div', {u'id': u'contentWrapper'}).find(u'h1')
if not username_tag.find(u'div'):
# otherwise, raise a MalformedUserPageError.
raise MalformedUserPageError(self.username, user_page, message=u"Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
info_panel_first = user_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'td')
try:
picture_tag = info_panel_first.find(u'img')
user_info[u'picture'] = picture_tag.get(u'src').decode('utf-8')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# the user ID is always present in the blogfeed link.
all_comments_link = info_panel_first.find(u'a', text=u'Blog Feed')
user_info[u'id'] = int(all_comments_link.get(u'href').split(u'&id=')[1])
except:
if not self.session.suppress_parse_exceptions:
raise
infobar_headers = info_panel_first.find_all(u'div', {u'class': u'normal_header'})
if infobar_headers:
try:
favorite_anime_header = infobar_headers[0]
if u'Favorite Anime' in favorite_anime_header.text:
user_info[u'favorite_anime'] = []
favorite_anime_table = favorite_anime_header.nextSibling.nextSibling
if favorite_anime_table.name == u'table':
for row in favorite_anime_table.find_all(u'tr'):
cols = row.find_all(u'td')
anime_link = cols[1].find(u'a')
link_parts = anime_link.get(u'href').split(u'/')
# of the form /anime/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_anime'].append(self.session.anime(int(link_parts[2])).set({u'title': anime_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_manga_header = infobar_headers[1]
if u'Favorite Manga' in favorite_manga_header.text:
user_info[u'favorite_manga'] = []
favorite_manga_table = favorite_manga_header.nextSibling.nextSibling
if favorite_manga_table.name == u'table':
for row in favorite_manga_table.find_all(u'tr'):
cols = row.find_all(u'td')
manga_link = cols[1].find(u'a')
link_parts = manga_link.get(u'href').split(u'/')
# of the form /manga/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_manga'].append(self.session.manga(int(link_parts[2])).set({u'title': manga_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_character_header = infobar_headers[2]
if u'Favorite Characters' in favorite_character_header.text:
user_info[u'favorite_characters'] = {}
favorite_character_table = favorite_character_header.nextSibling.nextSibling
if favorite_character_table.name == u'table':
for row in favorite_character_table.find_all(u'tr'):
cols = row.find_all(u'td')
character_link = cols[1].find(u'a')
link_parts = character_link.get(u'href').split(u'/')
# of the form /character/467/Ghost_in_the_Shell:_Stand_Alone_Complex
character = self.session.character(int(link_parts[2])).set({u'title': character_link.text})
media_link = cols[1].find(u'div').find(u'a')
link_parts = media_link.get(u'href').split(u'/')
# of the form /anime|manga/467
anime = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': media_link.text})
user_info[u'favorite_characters'][character] = anime
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_people_header = infobar_headers[3]
if u'Favorite People' in favorite_people_header.text:
user_info[u'favorite_people'] = []
favorite_person_table = favorite_people_header.nextSibling.nextSibling
if favorite_person_table.name == u'table':
for row in favorite_person_table.find_all(u'tr'):
cols = row.find_all(u'td')
person_link = cols[1].find(u'a')
link_parts = person_link.get(u'href').split(u'/')
# of the form /person/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_people'].append(self.session.person(int(link_parts[2])).set({u'title': person_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info | Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError` | Below is the the instruction that describes the task:
### Input:
Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError`
### Response:
def parse_sidebar(self, user_page):
"""Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError`
"""
user_info = {}
# if MAL says the series doesn't exist, raise an InvalidUserError.
error_tag = user_page.find(u'div', {u'class': u'badresult'})
if error_tag:
raise InvalidUserError(self.username)
try:
username_tag = user_page.find(u'div', {u'id': u'contentWrapper'}).find(u'h1')
if not username_tag.find(u'div'):
# otherwise, raise a MalformedUserPageError.
raise MalformedUserPageError(self.username, user_page, message=u"Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
info_panel_first = user_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'td')
try:
picture_tag = info_panel_first.find(u'img')
user_info[u'picture'] = picture_tag.get(u'src').decode('utf-8')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# the user ID is always present in the blogfeed link.
all_comments_link = info_panel_first.find(u'a', text=u'Blog Feed')
user_info[u'id'] = int(all_comments_link.get(u'href').split(u'&id=')[1])
except:
if not self.session.suppress_parse_exceptions:
raise
infobar_headers = info_panel_first.find_all(u'div', {u'class': u'normal_header'})
if infobar_headers:
try:
favorite_anime_header = infobar_headers[0]
if u'Favorite Anime' in favorite_anime_header.text:
user_info[u'favorite_anime'] = []
favorite_anime_table = favorite_anime_header.nextSibling.nextSibling
if favorite_anime_table.name == u'table':
for row in favorite_anime_table.find_all(u'tr'):
cols = row.find_all(u'td')
anime_link = cols[1].find(u'a')
link_parts = anime_link.get(u'href').split(u'/')
# of the form /anime/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_anime'].append(self.session.anime(int(link_parts[2])).set({u'title': anime_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_manga_header = infobar_headers[1]
if u'Favorite Manga' in favorite_manga_header.text:
user_info[u'favorite_manga'] = []
favorite_manga_table = favorite_manga_header.nextSibling.nextSibling
if favorite_manga_table.name == u'table':
for row in favorite_manga_table.find_all(u'tr'):
cols = row.find_all(u'td')
manga_link = cols[1].find(u'a')
link_parts = manga_link.get(u'href').split(u'/')
# of the form /manga/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_manga'].append(self.session.manga(int(link_parts[2])).set({u'title': manga_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_character_header = infobar_headers[2]
if u'Favorite Characters' in favorite_character_header.text:
user_info[u'favorite_characters'] = {}
favorite_character_table = favorite_character_header.nextSibling.nextSibling
if favorite_character_table.name == u'table':
for row in favorite_character_table.find_all(u'tr'):
cols = row.find_all(u'td')
character_link = cols[1].find(u'a')
link_parts = character_link.get(u'href').split(u'/')
# of the form /character/467/Ghost_in_the_Shell:_Stand_Alone_Complex
character = self.session.character(int(link_parts[2])).set({u'title': character_link.text})
media_link = cols[1].find(u'div').find(u'a')
link_parts = media_link.get(u'href').split(u'/')
# of the form /anime|manga/467
anime = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': media_link.text})
user_info[u'favorite_characters'][character] = anime
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_people_header = infobar_headers[3]
if u'Favorite People' in favorite_people_header.text:
user_info[u'favorite_people'] = []
favorite_person_table = favorite_people_header.nextSibling.nextSibling
if favorite_person_table.name == u'table':
for row in favorite_person_table.find_all(u'tr'):
cols = row.find_all(u'td')
person_link = cols[1].find(u'a')
link_parts = person_link.get(u'href').split(u'/')
# of the form /person/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_people'].append(self.session.person(int(link_parts[2])).set({u'title': person_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info |
def as_tuple(ireq):
"""
Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement.
"""
if not is_pinned_requirement(ireq):
raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq))
name = key_from_req(ireq.req)
version = first(ireq.specifier._specs)._spec[1]
extras = tuple(sorted(ireq.extras))
return name, version, extras | Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement. | Below is the the instruction that describes the task:
### Input:
Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement.
### Response:
def as_tuple(ireq):
"""
Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement.
"""
if not is_pinned_requirement(ireq):
raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq))
name = key_from_req(ireq.req)
version = first(ireq.specifier._specs)._spec[1]
extras = tuple(sorted(ireq.extras))
return name, version, extras |
def hyperbolic_errors(hyp_axes, xvals,
transformation=None, axes=None,
means=None, correct_apparent_dip=True,
reverse=False):
"""
Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
"""
if means is None:
means = N.array([0,0])
arr = augment_tensor(N.diag(hyp_axes))
# Transform ellipsoid to dual hyperboloid
hyp = conic(arr).dual()
if len(hyp_axes) == 3:
# Three_dimensional case
if transformation is None:
transformation = N.identity(3)
if axes is None:
axes = N.array([[0,1,0],[0,0,1]])
hyp = hyp.transform(augment_tensor(transformation))
n_ = N.cross(axes[0],axes[1])
# Create a plane containing the two axes specified
# in the function call
p = plane(n_) # no offset (goes through origin)
h1 = hyp.slice(p, axes=axes)[0]
else:
# We have a 2d geometry
h1 = hyp
# Major axes of the conic sliced in the requested viewing
# geometry
A = N.sqrt(h1.semiaxes())
yvals = A[1]*N.cosh(N.arcsinh(xvals/A[0]))
vals = N.array([xvals,yvals]).transpose()
nom = N.array([xvals,N.zeros(xvals.shape)]).transpose()
# Rotate the whole result if the PCA axes aren't aligned to the
# major axes of the view coordinate system
ax1 = apparent_dip_correction(axes)
# This is a dirty hack to flip things left to right
if reverse:
ax1 = ax1.T
# Top
t = dot(vals,ax1).T+means[:,N.newaxis]
# Btm
vals[:,-1] *= -1
b = dot(vals,ax1).T+means[:,N.newaxis]
nom = dot(nom,ax1).T+means[:,N.newaxis]
return nom, b, t[:,::-1] | Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data | Below is the the instruction that describes the task:
### Input:
Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
### Response:
def hyperbolic_errors(hyp_axes, xvals,
transformation=None, axes=None,
means=None, correct_apparent_dip=True,
reverse=False):
"""
Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
"""
if means is None:
means = N.array([0,0])
arr = augment_tensor(N.diag(hyp_axes))
# Transform ellipsoid to dual hyperboloid
hyp = conic(arr).dual()
if len(hyp_axes) == 3:
# Three_dimensional case
if transformation is None:
transformation = N.identity(3)
if axes is None:
axes = N.array([[0,1,0],[0,0,1]])
hyp = hyp.transform(augment_tensor(transformation))
n_ = N.cross(axes[0],axes[1])
# Create a plane containing the two axes specified
# in the function call
p = plane(n_) # no offset (goes through origin)
h1 = hyp.slice(p, axes=axes)[0]
else:
# We have a 2d geometry
h1 = hyp
# Major axes of the conic sliced in the requested viewing
# geometry
A = N.sqrt(h1.semiaxes())
yvals = A[1]*N.cosh(N.arcsinh(xvals/A[0]))
vals = N.array([xvals,yvals]).transpose()
nom = N.array([xvals,N.zeros(xvals.shape)]).transpose()
# Rotate the whole result if the PCA axes aren't aligned to the
# major axes of the view coordinate system
ax1 = apparent_dip_correction(axes)
# This is a dirty hack to flip things left to right
if reverse:
ax1 = ax1.T
# Top
t = dot(vals,ax1).T+means[:,N.newaxis]
# Btm
vals[:,-1] *= -1
b = dot(vals,ax1).T+means[:,N.newaxis]
nom = dot(nom,ax1).T+means[:,N.newaxis]
return nom, b, t[:,::-1] |
def from_id(cls, context: InstaloaderContext, profile_id: int):
"""Create a Profile instance from a given userid. If possible, use :meth:`Profile.from_username`
or constructor directly rather than this method, since it requires more requests.
:param context: :attr:`Instaloader.context`
:param profile_id: userid
:raises: :class:`ProfileNotExistsException`
"""
if profile_id in context.profile_id_cache:
return context.profile_id_cache[profile_id]
data = context.graphql_query('7c16654f22c819fb63d1183034a5162f',
{'user_id': str(profile_id),
'include_chaining': False,
'include_reel': True,
'include_suggested_users': False,
'include_logged_out_extras': False,
'include_highlight_reels': False},
rhx_gis=context.root_rhx_gis)['data']['user']
if data:
profile = cls(context, data['reel']['owner'])
else:
raise ProfileNotExistsException("No profile found, the user may have blocked you (ID: " +
str(profile_id) + ").")
context.profile_id_cache[profile_id] = profile
return profile | Create a Profile instance from a given userid. If possible, use :meth:`Profile.from_username`
or constructor directly rather than this method, since it requires more requests.
:param context: :attr:`Instaloader.context`
:param profile_id: userid
:raises: :class:`ProfileNotExistsException` | Below is the the instruction that describes the task:
### Input:
Create a Profile instance from a given userid. If possible, use :meth:`Profile.from_username`
or constructor directly rather than this method, since it requires more requests.
:param context: :attr:`Instaloader.context`
:param profile_id: userid
:raises: :class:`ProfileNotExistsException`
### Response:
def from_id(cls, context: InstaloaderContext, profile_id: int):
"""Create a Profile instance from a given userid. If possible, use :meth:`Profile.from_username`
or constructor directly rather than this method, since it requires more requests.
:param context: :attr:`Instaloader.context`
:param profile_id: userid
:raises: :class:`ProfileNotExistsException`
"""
if profile_id in context.profile_id_cache:
return context.profile_id_cache[profile_id]
data = context.graphql_query('7c16654f22c819fb63d1183034a5162f',
{'user_id': str(profile_id),
'include_chaining': False,
'include_reel': True,
'include_suggested_users': False,
'include_logged_out_extras': False,
'include_highlight_reels': False},
rhx_gis=context.root_rhx_gis)['data']['user']
if data:
profile = cls(context, data['reel']['owner'])
else:
raise ProfileNotExistsException("No profile found, the user may have blocked you (ID: " +
str(profile_id) + ").")
context.profile_id_cache[profile_id] = profile
return profile |
def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id):
"""
A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this.
"""
####################################################################################################################
# Log into my application
####################################################################################################################
twitter = login(twitter_app_key, twitter_app_secret)
twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000)
for twitter_list in twitter_lists_list:
print(twitter_list)
return twitter_lists_list | A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this. | Below is the the instruction that describes the task:
### Input:
A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this.
### Response:
def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id):
"""
A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this.
"""
####################################################################################################################
# Log into my application
####################################################################################################################
twitter = login(twitter_app_key, twitter_app_secret)
twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000)
for twitter_list in twitter_lists_list:
print(twitter_list)
return twitter_lists_list |
def ensure_utf8(image_tag):
"""wrapper for ensuring image_tag returns utf8-encoded str on Python 2"""
if py3compat.PY3:
# nothing to do on Python 3
return image_tag
def utf8_image_tag(*args, **kwargs):
s = image_tag(*args, **kwargs)
if isinstance(s, unicode):
s = s.encode('utf8')
return s
return utf8_image_tag | wrapper for ensuring image_tag returns utf8-encoded str on Python 2 | Below is the the instruction that describes the task:
### Input:
wrapper for ensuring image_tag returns utf8-encoded str on Python 2
### Response:
def ensure_utf8(image_tag):
"""wrapper for ensuring image_tag returns utf8-encoded str on Python 2"""
if py3compat.PY3:
# nothing to do on Python 3
return image_tag
def utf8_image_tag(*args, **kwargs):
s = image_tag(*args, **kwargs)
if isinstance(s, unicode):
s = s.encode('utf8')
return s
return utf8_image_tag |
def shared_client(ClientType, *args, **kwargs):
"""Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
"""
kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs))
cache_key = (ClientType, args, kwarg_key)
client = None
if cache_key in _client_cache:
# resolve cached weakref
# client can still be None after this!
client = _client_cache[cache_key]()
if client is None:
Client = getattr(kubernetes.client, ClientType)
client = Client(*args, **kwargs)
# cache weakref so that clients can be garbage collected
_client_cache[cache_key] = weakref.ref(client)
return client | Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared. | Below is the the instruction that describes the task:
### Input:
Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
### Response:
def shared_client(ClientType, *args, **kwargs):
"""Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
"""
kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs))
cache_key = (ClientType, args, kwarg_key)
client = None
if cache_key in _client_cache:
# resolve cached weakref
# client can still be None after this!
client = _client_cache[cache_key]()
if client is None:
Client = getattr(kubernetes.client, ClientType)
client = Client(*args, **kwargs)
# cache weakref so that clients can be garbage collected
_client_cache[cache_key] = weakref.ref(client)
return client |
def pawn_from_dummy(self, dummy):
"""Make a real thing and its pawn from a dummy pawn.
Create a new :class:`board.Pawn` instance, along with the
underlying :class:`LiSE.Place` instance, and give it the name,
location, and imagery of the provided dummy.
"""
dummy.pos = self.to_local(*dummy.pos)
for spot in self.board.spotlayout.children:
if spot.collide_widget(dummy):
whereat = spot
break
else:
return
whereat.add_widget(
self.board.make_pawn(
self.board.character.new_thing(
dummy.name,
whereat.place.name,
_image_paths=list(dummy.paths)
)
)
)
dummy.num += 1 | Make a real thing and its pawn from a dummy pawn.
Create a new :class:`board.Pawn` instance, along with the
underlying :class:`LiSE.Place` instance, and give it the name,
location, and imagery of the provided dummy. | Below is the the instruction that describes the task:
### Input:
Make a real thing and its pawn from a dummy pawn.
Create a new :class:`board.Pawn` instance, along with the
underlying :class:`LiSE.Place` instance, and give it the name,
location, and imagery of the provided dummy.
### Response:
def pawn_from_dummy(self, dummy):
"""Make a real thing and its pawn from a dummy pawn.
Create a new :class:`board.Pawn` instance, along with the
underlying :class:`LiSE.Place` instance, and give it the name,
location, and imagery of the provided dummy.
"""
dummy.pos = self.to_local(*dummy.pos)
for spot in self.board.spotlayout.children:
if spot.collide_widget(dummy):
whereat = spot
break
else:
return
whereat.add_widget(
self.board.make_pawn(
self.board.character.new_thing(
dummy.name,
whereat.place.name,
_image_paths=list(dummy.paths)
)
)
)
dummy.num += 1 |
def map_(cache: Mapping[Domain, Range]) -> Operator[Map[Domain, Range]]:
"""
Returns decorator that calls wrapped function
if nothing was found in cache for its argument.
Wrapped function arguments should be hashable.
"""
def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]:
@wraps(function)
def wrapped(argument: Domain) -> Range:
try:
return cache[argument]
except KeyError:
return function(argument)
return wrapped
return wrapper | Returns decorator that calls wrapped function
if nothing was found in cache for its argument.
Wrapped function arguments should be hashable. | Below is the the instruction that describes the task:
### Input:
Returns decorator that calls wrapped function
if nothing was found in cache for its argument.
Wrapped function arguments should be hashable.
### Response:
def map_(cache: Mapping[Domain, Range]) -> Operator[Map[Domain, Range]]:
"""
Returns decorator that calls wrapped function
if nothing was found in cache for its argument.
Wrapped function arguments should be hashable.
"""
def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]:
@wraps(function)
def wrapped(argument: Domain) -> Range:
try:
return cache[argument]
except KeyError:
return function(argument)
return wrapped
return wrapper |
def ids2strids(ids: Iterable[int]) -> str:
"""
Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence
"""
return C.TOKEN_SEPARATOR.join(map(str, ids)) | Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence | Below is the the instruction that describes the task:
### Input:
Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence
### Response:
def ids2strids(ids: Iterable[int]) -> str:
"""
Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence
"""
return C.TOKEN_SEPARATOR.join(map(str, ids)) |
def _sim_WA(trace, PAZ, seedresp, water_level, velocity=False):
"""
Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace`
"""
# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990
PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j],
'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080}
if velocity:
PAZ_WA['zeros'] = [0 + 0j, 0 + 0j]
# De-trend data
trace.detrend('simple')
# Simulate Wood Anderson
if PAZ:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=PAZ, paz_simulate=PAZ_WA,
water_level=water_level,
remove_sensitivity=True)
elif seedresp:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level, seedresp=seedresp)
else:
UserWarning('No response given to remove, will just simulate WA')
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level)
return trace | Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace` | Below is the the instruction that describes the task:
### Input:
Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace`
### Response:
def _sim_WA(trace, PAZ, seedresp, water_level, velocity=False):
"""
Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace`
"""
# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990
PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j],
'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080}
if velocity:
PAZ_WA['zeros'] = [0 + 0j, 0 + 0j]
# De-trend data
trace.detrend('simple')
# Simulate Wood Anderson
if PAZ:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=PAZ, paz_simulate=PAZ_WA,
water_level=water_level,
remove_sensitivity=True)
elif seedresp:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level, seedresp=seedresp)
else:
UserWarning('No response given to remove, will just simulate WA')
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level)
return trace |
def parse_args(args):
''' Parse an argument string
http://stackoverflow.com/questions/18160078/
how-do-you-write-tests-for-the-argparse-portion-of-a-python-module
'''
parser = argparse.ArgumentParser()
parser.add_argument('config_file', nargs='?',
help='Configuration yaml file', default=None)
parser.add_argument(
'--log', '-l',
help='Logging level (e.g. DEBUG, INFO, WARNING, ERROR, CRITICAL)',
default='INFO')
args_parsed = parser.parse_args(args)
if not args_parsed.config_file:
parser.error("You have to specify "
"a configuration file") # pragma: no cover, sysexit
return args_parsed | Parse an argument string
http://stackoverflow.com/questions/18160078/
how-do-you-write-tests-for-the-argparse-portion-of-a-python-module | Below is the the instruction that describes the task:
### Input:
Parse an argument string
http://stackoverflow.com/questions/18160078/
how-do-you-write-tests-for-the-argparse-portion-of-a-python-module
### Response:
def parse_args(args):
''' Parse an argument string
http://stackoverflow.com/questions/18160078/
how-do-you-write-tests-for-the-argparse-portion-of-a-python-module
'''
parser = argparse.ArgumentParser()
parser.add_argument('config_file', nargs='?',
help='Configuration yaml file', default=None)
parser.add_argument(
'--log', '-l',
help='Logging level (e.g. DEBUG, INFO, WARNING, ERROR, CRITICAL)',
default='INFO')
args_parsed = parser.parse_args(args)
if not args_parsed.config_file:
parser.error("You have to specify "
"a configuration file") # pragma: no cover, sysexit
return args_parsed |
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
return (self.Wdf/self.rho)*np.sign(self.Y) | Return initialiser for working variable U. | Below is the the instruction that describes the task:
### Input:
Return initialiser for working variable U.
### Response:
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
return (self.Wdf/self.rho)*np.sign(self.Y) |
def upload_to_picture(instance, filename):
"""
Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.id)
return '%(path)s/%(hash)s.%(extension)s' % {
'path': getattr(defaults,
'ACCOUNTS_PICTURE_PATH','%s/%s' % (
str(instance._meta.app_label),
str(instance._meta.model_name))),
'hash': hash[:10],
'extension': extension} | Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory. | Below is the the instruction that describes the task:
### Input:
Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory.
### Response:
def upload_to_picture(instance, filename):
"""
Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.id)
return '%(path)s/%(hash)s.%(extension)s' % {
'path': getattr(defaults,
'ACCOUNTS_PICTURE_PATH','%s/%s' % (
str(instance._meta.app_label),
str(instance._meta.model_name))),
'hash': hash[:10],
'extension': extension} |
def get(cls, bucket, key, upload_id, with_completed=False):
"""Fetch a specific multipart object."""
q = cls.query.filter_by(
upload_id=upload_id,
bucket_id=as_bucket_id(bucket),
key=key,
)
if not with_completed:
q = q.filter(cls.completed.is_(False))
return q.one_or_none() | Fetch a specific multipart object. | Below is the the instruction that describes the task:
### Input:
Fetch a specific multipart object.
### Response:
def get(cls, bucket, key, upload_id, with_completed=False):
"""Fetch a specific multipart object."""
q = cls.query.filter_by(
upload_id=upload_id,
bucket_id=as_bucket_id(bucket),
key=key,
)
if not with_completed:
q = q.filter(cls.completed.is_(False))
return q.one_or_none() |
def delete(self, **kwds):
"""
Endpoint: /tag/<id>/delete.json
Deletes this tag.
Returns True if successful.
Raises a TroveboxError if not.
"""
result = self._client.tag.delete(self, **kwds)
self._delete_fields()
return result | Endpoint: /tag/<id>/delete.json
Deletes this tag.
Returns True if successful.
Raises a TroveboxError if not. | Below is the the instruction that describes the task:
### Input:
Endpoint: /tag/<id>/delete.json
Deletes this tag.
Returns True if successful.
Raises a TroveboxError if not.
### Response:
def delete(self, **kwds):
"""
Endpoint: /tag/<id>/delete.json
Deletes this tag.
Returns True if successful.
Raises a TroveboxError if not.
"""
result = self._client.tag.delete(self, **kwds)
self._delete_fields()
return result |
def general_eq(a, b, attributes):
"""Return whether two objects are equal up to the given attributes.
If an attribute is called ``'phi'``, it is compared up to |PRECISION|.
If an attribute is called ``'mechanism'`` or ``'purview'``, it is
compared using set equality. All other attributes are compared with
:func:`numpy_aware_eq`.
"""
try:
for attr in attributes:
_a, _b = getattr(a, attr), getattr(b, attr)
if attr in ['phi', 'alpha']:
if not utils.eq(_a, _b):
return False
elif attr in ['mechanism', 'purview']:
if _a is None or _b is None:
if _a != _b:
return False
elif not set(_a) == set(_b):
return False
else:
if not numpy_aware_eq(_a, _b):
return False
return True
except AttributeError:
return False | Return whether two objects are equal up to the given attributes.
If an attribute is called ``'phi'``, it is compared up to |PRECISION|.
If an attribute is called ``'mechanism'`` or ``'purview'``, it is
compared using set equality. All other attributes are compared with
:func:`numpy_aware_eq`. | Below is the the instruction that describes the task:
### Input:
Return whether two objects are equal up to the given attributes.
If an attribute is called ``'phi'``, it is compared up to |PRECISION|.
If an attribute is called ``'mechanism'`` or ``'purview'``, it is
compared using set equality. All other attributes are compared with
:func:`numpy_aware_eq`.
### Response:
def general_eq(a, b, attributes):
"""Return whether two objects are equal up to the given attributes.
If an attribute is called ``'phi'``, it is compared up to |PRECISION|.
If an attribute is called ``'mechanism'`` or ``'purview'``, it is
compared using set equality. All other attributes are compared with
:func:`numpy_aware_eq`.
"""
try:
for attr in attributes:
_a, _b = getattr(a, attr), getattr(b, attr)
if attr in ['phi', 'alpha']:
if not utils.eq(_a, _b):
return False
elif attr in ['mechanism', 'purview']:
if _a is None or _b is None:
if _a != _b:
return False
elif not set(_a) == set(_b):
return False
else:
if not numpy_aware_eq(_a, _b):
return False
return True
except AttributeError:
return False |
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
# if sys.hexversion >= 0x02020000:
# __all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string) | Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result. | Below is the the instruction that describes the task:
### Input:
Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result.
### Response:
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
# if sys.hexversion >= 0x02020000:
# __all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string) |
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys) | Looks up x and returns the corresponding value of y. | Below is the the instruction that describes the task:
### Input:
Looks up x and returns the corresponding value of y.
### Response:
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys) |
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int) | The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True | Below is the the instruction that describes the task:
### Input:
The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
### Response:
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int) |
def failure_message(description, options):
"""
Returns a expectation failure message for the given query description.
Args:
description (str): A description of the failed query.
options (Dict[str, Any]): The query options.
Returns:
str: A message describing the failure.
"""
message = "expected to find {}".format(description)
if options["count"] is not None:
message += " {count} {times}".format(
count=options["count"],
times=declension("time", "times", options["count"]))
elif options["between"] is not None:
between = options["between"]
if between:
first, last = between[0], between[-1]
else:
first, last = None, None
message += " between {first} and {last} times".format(
first=first,
last=last)
elif options["maximum"] is not None:
message += " at most {maximum} {times}".format(
maximum=options["maximum"],
times=declension("time", "times", options["maximum"]))
elif options["minimum"] is not None:
message += " at least {minimum} {times}".format(
minimum=options["minimum"],
times=declension("time", "times", options["minimum"]))
return message | Returns a expectation failure message for the given query description.
Args:
description (str): A description of the failed query.
options (Dict[str, Any]): The query options.
Returns:
str: A message describing the failure. | Below is the the instruction that describes the task:
### Input:
Returns a expectation failure message for the given query description.
Args:
description (str): A description of the failed query.
options (Dict[str, Any]): The query options.
Returns:
str: A message describing the failure.
### Response:
def failure_message(description, options):
"""
Returns a expectation failure message for the given query description.
Args:
description (str): A description of the failed query.
options (Dict[str, Any]): The query options.
Returns:
str: A message describing the failure.
"""
message = "expected to find {}".format(description)
if options["count"] is not None:
message += " {count} {times}".format(
count=options["count"],
times=declension("time", "times", options["count"]))
elif options["between"] is not None:
between = options["between"]
if between:
first, last = between[0], between[-1]
else:
first, last = None, None
message += " between {first} and {last} times".format(
first=first,
last=last)
elif options["maximum"] is not None:
message += " at most {maximum} {times}".format(
maximum=options["maximum"],
times=declension("time", "times", options["maximum"]))
elif options["minimum"] is not None:
message += " at least {minimum} {times}".format(
minimum=options["minimum"],
times=declension("time", "times", options["minimum"]))
return message |
def killBatchJobs(self, jobIDs):
"""Kills jobs by ID."""
log.debug('Killing jobs: {}'.format(jobIDs))
for jobID in jobIDs:
if jobID in self.runningJobs:
info = self.runningJobs[jobID]
info.killIntended = True
if info.popen != None:
os.kill(info.popen.pid, 9)
else:
# No popen if running in forkless mode currently
assert self.debugWorker
log.critical("Can't kill job: %s in debug mode" % jobID)
while jobID in self.runningJobs:
pass | Kills jobs by ID. | Below is the the instruction that describes the task:
### Input:
Kills jobs by ID.
### Response:
def killBatchJobs(self, jobIDs):
"""Kills jobs by ID."""
log.debug('Killing jobs: {}'.format(jobIDs))
for jobID in jobIDs:
if jobID in self.runningJobs:
info = self.runningJobs[jobID]
info.killIntended = True
if info.popen != None:
os.kill(info.popen.pid, 9)
else:
# No popen if running in forkless mode currently
assert self.debugWorker
log.critical("Can't kill job: %s in debug mode" % jobID)
while jobID in self.runningJobs:
pass |
def t_UINTN(t):
r"uint(?P<size>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)"
size = int(t.lexer.lexmatch.group('size'))
t.value = ('uint', size)
return t | r"uint(?P<size>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8) | Below is the the instruction that describes the task:
### Input:
r"uint(?P<size>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)
### Response:
def t_UINTN(t):
r"uint(?P<size>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)"
size = int(t.lexer.lexmatch.group('size'))
t.value = ('uint', size)
return t |
def cublasDspr(handle, uplo, n, alpha, x, incx, AP):
"""
Rank-1 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasDspr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(AP))
cublasCheckStatus(status) | Rank-1 operation on real symmetric-packed matrix. | Below is the the instruction that describes the task:
### Input:
Rank-1 operation on real symmetric-packed matrix.
### Response:
def cublasDspr(handle, uplo, n, alpha, x, incx, AP):
"""
Rank-1 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasDspr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(AP))
cublasCheckStatus(status) |
def update_version(self, service_id, version_number, **kwargs):
"""Update a particular version for a particular service."""
body = self._formdata(kwargs, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body)
return FastlyVersion(self, content) | Update a particular version for a particular service. | Below is the the instruction that describes the task:
### Input:
Update a particular version for a particular service.
### Response:
def update_version(self, service_id, version_number, **kwargs):
"""Update a particular version for a particular service."""
body = self._formdata(kwargs, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body)
return FastlyVersion(self, content) |
def populate_from_staging(self, staging_table, from_column_list, output_table):
"""
generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially)
"""
self.sql_text += 'INSERT INTO ' + output_table + ' (\n'
for c in self.col_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' ' + self.date_updated_col + ') (\n'
self.sql_text += ' SELECT \n'
for c in from_column_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' SYSDATE \n FROM ' + staging_table
self.sql_text += '\n);\n' | generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially) | Below is the the instruction that describes the task:
### Input:
generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially)
### Response:
def populate_from_staging(self, staging_table, from_column_list, output_table):
"""
generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially)
"""
self.sql_text += 'INSERT INTO ' + output_table + ' (\n'
for c in self.col_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' ' + self.date_updated_col + ') (\n'
self.sql_text += ' SELECT \n'
for c in from_column_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' SYSDATE \n FROM ' + staging_table
self.sql_text += '\n);\n' |
def begin(self):
""" At the start of the run, we want to record the test
execution information in the database. """
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload) | At the start of the run, we want to record the test
execution information in the database. | Below is the the instruction that describes the task:
### Input:
At the start of the run, we want to record the test
execution information in the database.
### Response:
def begin(self):
""" At the start of the run, we want to record the test
execution information in the database. """
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload) |
def update_from_xlsx_blob(self, xlsx_blob):
"""
Replace the Excel spreadsheet in the related |EmbeddedXlsxPart| with
the Excel binary in *xlsx_blob*, adding a new |EmbeddedXlsxPart| if
there isn't one.
"""
xlsx_part = self.xlsx_part
if xlsx_part is None:
self.xlsx_part = EmbeddedXlsxPart.new(xlsx_blob, self._package)
return
xlsx_part.blob = xlsx_blob | Replace the Excel spreadsheet in the related |EmbeddedXlsxPart| with
the Excel binary in *xlsx_blob*, adding a new |EmbeddedXlsxPart| if
there isn't one. | Below is the the instruction that describes the task:
### Input:
Replace the Excel spreadsheet in the related |EmbeddedXlsxPart| with
the Excel binary in *xlsx_blob*, adding a new |EmbeddedXlsxPart| if
there isn't one.
### Response:
def update_from_xlsx_blob(self, xlsx_blob):
"""
Replace the Excel spreadsheet in the related |EmbeddedXlsxPart| with
the Excel binary in *xlsx_blob*, adding a new |EmbeddedXlsxPart| if
there isn't one.
"""
xlsx_part = self.xlsx_part
if xlsx_part is None:
self.xlsx_part = EmbeddedXlsxPart.new(xlsx_blob, self._package)
return
xlsx_part.blob = xlsx_blob |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.