code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def bfill(self, dim, limit=None):
'''Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
'''
from .missing import bfill, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)
return new | Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset | Below is the the instruction that describes the task:
### Input:
Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
### Response:
def bfill(self, dim, limit=None):
'''Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
'''
from .missing import bfill, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)
return new |
def apply_grad_cartesian_tensor(grad_X, zmat_dist):
"""Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space.
"""
columns = ['bond', 'angle', 'dihedral']
C_dist = zmat_dist.loc[:, columns].values.T
try:
C_dist = C_dist.astype('f8')
C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :])
except (TypeError, AttributeError):
C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :])
cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
return Cartesian(atoms=zmat_dist['atom'],
coords=cart_dist, index=zmat_dist.index) | Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space. | Below is the the instruction that describes the task:
### Input:
Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space.
### Response:
def apply_grad_cartesian_tensor(grad_X, zmat_dist):
"""Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space.
"""
columns = ['bond', 'angle', 'dihedral']
C_dist = zmat_dist.loc[:, columns].values.T
try:
C_dist = C_dist.astype('f8')
C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :])
except (TypeError, AttributeError):
C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :])
cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
return Cartesian(atoms=zmat_dist['atom'],
coords=cart_dist, index=zmat_dist.index) |
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
offset = self.xml_children.index(refNode)
self.xml_insert(node, offset) | Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node | Below is the the instruction that describes the task:
### Input:
Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node
### Response:
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
offset = self.xml_children.index(refNode)
self.xml_insert(node, offset) |
def setDateTimeStart(self, dtime):
"""
Sets the starting date time for this gantt chart.
:param dtime | <QDateTime>
"""
self._dateStart = dtime.date()
self._timeStart = dtime.time()
self._allDay = False | Sets the starting date time for this gantt chart.
:param dtime | <QDateTime> | Below is the the instruction that describes the task:
### Input:
Sets the starting date time for this gantt chart.
:param dtime | <QDateTime>
### Response:
def setDateTimeStart(self, dtime):
"""
Sets the starting date time for this gantt chart.
:param dtime | <QDateTime>
"""
self._dateStart = dtime.date()
self._timeStart = dtime.time()
self._allDay = False |
def info(self, action=None):
"""
returns cached request info for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['info']
return self.cache.keys() or None | returns cached request info for given action,
or list of cached actions | Below is the the instruction that describes the task:
### Input:
returns cached request info for given action,
or list of cached actions
### Response:
def info(self, action=None):
"""
returns cached request info for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['info']
return self.cache.keys() or None |
def write_matrix_to_tsv(net, filename=None, df=None):
'''
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
'''
import pandas as pd
if df is None:
df = net.dat_to_df()
return df['mat'].to_csv(filename, sep='\t') | This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object. | Below is the the instruction that describes the task:
### Input:
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
### Response:
def write_matrix_to_tsv(net, filename=None, df=None):
'''
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
'''
import pandas as pd
if df is None:
df = net.dat_to_df()
return df['mat'].to_csv(filename, sep='\t') |
def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True):
"""Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
"""
seriesDict = {}
for R in self:
#This should be faster than using get, since get is a wrapper for __getitem__
try:
year = R['year']
except KeyError:
continue
if tag is None:
seriesDict[R] = {year : 1}
else:
try:
val = R[tag]
except KeyError:
continue
if not isinstance(val, list):
val = [val]
for entry in val:
if limitTo and entry not in limitTo:
continue
if entry in seriesDict:
try:
seriesDict[entry][year] += 1
except KeyError:
seriesDict[entry][year] = 1
else:
seriesDict[entry] = {year : 1}
seriesList = []
for e, yd in seriesDict.items():
seriesList += [(e, y) for y in yd.keys()]
seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst)
if outputFile is not None:
with open(outputFile, 'w') as f:
writer = csv.writer(f, dialect = 'excel')
writer.writerow((str(tag), 'years'))
writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items()))
if pandasMode:
panDict = {'entry' : [], 'count' : [], 'year' : []}
for entry, year in seriesList:
panDict['entry'].append(entry)
panDict['year'].append(year)
panDict['count'].append(seriesDict[entry][year])
return panDict
elif giveYears:
return seriesList
else:
return [e for e,c in seriesList] | Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True` | Below is the the instruction that describes the task:
### Input:
Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
### Response:
def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True):
"""Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file.
If no _tag_ is given the `Records` in the collection will be used
# Parameters
_tag_ : `optional str`
> Default `None`, if provided the tag will be ordered
_outputFile_ : `optional str`
> A file path to write a csv with 2 columns, one the tag values the other their years
_giveYears_ : `optional bool`
> Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years.
_greatestFirst_ : `optional bool`
> Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first.
_pandasMode_ : `optional bool`
> Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list
_limitTo_ : `optional list[values]`
> Default `None`, if a list is provided only those values in the list will be counted or returned
# Returns
`dict[str:list[value]] or list[str]`
> A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
"""
seriesDict = {}
for R in self:
#This should be faster than using get, since get is a wrapper for __getitem__
try:
year = R['year']
except KeyError:
continue
if tag is None:
seriesDict[R] = {year : 1}
else:
try:
val = R[tag]
except KeyError:
continue
if not isinstance(val, list):
val = [val]
for entry in val:
if limitTo and entry not in limitTo:
continue
if entry in seriesDict:
try:
seriesDict[entry][year] += 1
except KeyError:
seriesDict[entry][year] = 1
else:
seriesDict[entry] = {year : 1}
seriesList = []
for e, yd in seriesDict.items():
seriesList += [(e, y) for y in yd.keys()]
seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst)
if outputFile is not None:
with open(outputFile, 'w') as f:
writer = csv.writer(f, dialect = 'excel')
writer.writerow((str(tag), 'years'))
writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items()))
if pandasMode:
panDict = {'entry' : [], 'count' : [], 'year' : []}
for entry, year in seriesList:
panDict['entry'].append(entry)
panDict['year'].append(year)
panDict['count'].append(seriesDict[entry][year])
return panDict
elif giveYears:
return seriesList
else:
return [e for e,c in seriesList] |
def path_wrapper(func):
"""return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion
"""
@functools.wraps(func)
def wrapped(node, context=None, _func=func, **kwargs):
"""wrapper function handling context"""
if context is None:
context = contextmod.InferenceContext()
if context.push(node):
return None
yielded = set()
generator = _func(node, context, **kwargs)
try:
while True:
res = next(generator)
# unproxy only true instance, not const, tuple, dict...
if res.__class__.__name__ == "Instance":
ares = res._proxied
else:
ares = res
if ares not in yielded:
yield res
yielded.add(ares)
except StopIteration as error:
if error.args:
return error.args[0]
return None
return wrapped | return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion | Below is the the instruction that describes the task:
### Input:
return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion
### Response:
def path_wrapper(func):
"""return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion
"""
@functools.wraps(func)
def wrapped(node, context=None, _func=func, **kwargs):
"""wrapper function handling context"""
if context is None:
context = contextmod.InferenceContext()
if context.push(node):
return None
yielded = set()
generator = _func(node, context, **kwargs)
try:
while True:
res = next(generator)
# unproxy only true instance, not const, tuple, dict...
if res.__class__.__name__ == "Instance":
ares = res._proxied
else:
ares = res
if ares not in yielded:
yield res
yielded.add(ares)
except StopIteration as error:
if error.args:
return error.args[0]
return None
return wrapped |
def set_multi(self, mappings, time=0, compress_level=-1):
"""
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
returns = []
if mappings:
for server in self.servers:
returns.append(server.set_multi(mappings, time, compress_level=compress_level))
return all(returns) | Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
### Response:
def set_multi(self, mappings, time=0, compress_level=-1):
"""
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
returns = []
if mappings:
for server in self.servers:
returns.append(server.set_multi(mappings, time, compress_level=compress_level))
return all(returns) |
def text(length=None, at_least=10, at_most=15, lowercase=True,
uppercase=True, digits=True, spaces=True, punctuation=False):
"""
Random text.
If `length` is present the text will be exactly this chars long. Else the
text will be something between `at_least` and `at_most` chars long.
"""
base_string = ''
if lowercase:
base_string += string.ascii_lowercase
if uppercase:
base_string += string.ascii_uppercase
if digits:
base_string += string.digits
if spaces:
base_string += ' '
if punctuation:
base_string += string.punctuation
if len(base_string) == 0:
return ''
if not length:
length = random.randint(at_least, at_most)
result = ''
for i in xrange(0, length):
result += random.choice(base_string)
return result | Random text.
If `length` is present the text will be exactly this chars long. Else the
text will be something between `at_least` and `at_most` chars long. | Below is the the instruction that describes the task:
### Input:
Random text.
If `length` is present the text will be exactly this chars long. Else the
text will be something between `at_least` and `at_most` chars long.
### Response:
def text(length=None, at_least=10, at_most=15, lowercase=True,
uppercase=True, digits=True, spaces=True, punctuation=False):
"""
Random text.
If `length` is present the text will be exactly this chars long. Else the
text will be something between `at_least` and `at_most` chars long.
"""
base_string = ''
if lowercase:
base_string += string.ascii_lowercase
if uppercase:
base_string += string.ascii_uppercase
if digits:
base_string += string.digits
if spaces:
base_string += ' '
if punctuation:
base_string += string.punctuation
if len(base_string) == 0:
return ''
if not length:
length = random.randint(at_least, at_most)
result = ''
for i in xrange(0, length):
result += random.choice(base_string)
return result |
def get_reporter_state():
"""Get pep8 reporter state from stack."""
# Stack
# 1. get_reporter_state (i.e. this function)
# 2. putty_ignore_code
# 3. QueueReport.error or pep8.StandardReport.error for flake8 -j 1
# 4. pep8.Checker.check_ast or check_physical or check_logical
# locals contains `tree` (ast) for check_ast
frame = sys._getframe(3)
reporter = frame.f_locals['self']
line_number = frame.f_locals['line_number']
offset = frame.f_locals['offset']
text = frame.f_locals['text']
check = frame.f_locals['check']
return reporter, line_number, offset, text, check | Get pep8 reporter state from stack. | Below is the the instruction that describes the task:
### Input:
Get pep8 reporter state from stack.
### Response:
def get_reporter_state():
"""Get pep8 reporter state from stack."""
# Stack
# 1. get_reporter_state (i.e. this function)
# 2. putty_ignore_code
# 3. QueueReport.error or pep8.StandardReport.error for flake8 -j 1
# 4. pep8.Checker.check_ast or check_physical or check_logical
# locals contains `tree` (ast) for check_ast
frame = sys._getframe(3)
reporter = frame.f_locals['self']
line_number = frame.f_locals['line_number']
offset = frame.f_locals['offset']
text = frame.f_locals['text']
check = frame.f_locals['check']
return reporter, line_number, offset, text, check |
def visible(self, request):
'''
Checks the both, check_visible and apply_visible, against the owned model and it's instance set
'''
return self.apply_visible(self.get_queryset(), request) if self.check_visible(self.model, request) is not False else self.get_queryset().none() | Checks the both, check_visible and apply_visible, against the owned model and it's instance set | Below is the the instruction that describes the task:
### Input:
Checks the both, check_visible and apply_visible, against the owned model and it's instance set
### Response:
def visible(self, request):
'''
Checks the both, check_visible and apply_visible, against the owned model and it's instance set
'''
return self.apply_visible(self.get_queryset(), request) if self.check_visible(self.model, request) is not False else self.get_queryset().none() |
def reset(self):
"""Reset accumulated components and metric values"""
if self.parallel:
from pyannote.metrics import manager_
self.accumulated_ = manager_.dict()
self.results_ = manager_.list()
self.uris_ = manager_.dict()
else:
self.accumulated_ = dict()
self.results_ = list()
self.uris_ = dict()
for value in self.components_:
self.accumulated_[value] = 0. | Reset accumulated components and metric values | Below is the the instruction that describes the task:
### Input:
Reset accumulated components and metric values
### Response:
def reset(self):
"""Reset accumulated components and metric values"""
if self.parallel:
from pyannote.metrics import manager_
self.accumulated_ = manager_.dict()
self.results_ = manager_.list()
self.uris_ = manager_.dict()
else:
self.accumulated_ = dict()
self.results_ = list()
self.uris_ = dict()
for value in self.components_:
self.accumulated_[value] = 0. |
def add_channels_to_list(self, l, add_ref=False):
"""Create list of channels (one for those to plot, one for ref).
Parameters
----------
l : instance of QListWidget
one of the two lists (chan_to_plot or ref_chan)
"""
l.clear()
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
for chan in self.chan_name:
item = QListWidgetItem(chan)
l.addItem(item)
if add_ref:
item = QListWidgetItem('_REF')
l.addItem(item) | Create list of channels (one for those to plot, one for ref).
Parameters
----------
l : instance of QListWidget
one of the two lists (chan_to_plot or ref_chan) | Below is the the instruction that describes the task:
### Input:
Create list of channels (one for those to plot, one for ref).
Parameters
----------
l : instance of QListWidget
one of the two lists (chan_to_plot or ref_chan)
### Response:
def add_channels_to_list(self, l, add_ref=False):
"""Create list of channels (one for those to plot, one for ref).
Parameters
----------
l : instance of QListWidget
one of the two lists (chan_to_plot or ref_chan)
"""
l.clear()
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
for chan in self.chan_name:
item = QListWidgetItem(chan)
l.addItem(item)
if add_ref:
item = QListWidgetItem('_REF')
l.addItem(item) |
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
raise ValueError("Adding or subtraction operations can only be "
"performed for volumetric data with the exact "
"same structure.")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix) | Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other. | Below is the the instruction that describes the task:
### Input:
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
### Response:
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
raise ValueError("Adding or subtraction operations can only be "
"performed for volumetric data with the exact "
"same structure.")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix) |
def add(client, name, urls, link, relative_to, target, force):
"""Add data to a dataset."""
try:
with client.with_dataset(name=name) as dataset:
target = target if target else None
with progressbar(urls, label='Adding data to dataset') as bar:
for url in bar:
client.add_data_to_dataset(
dataset,
url,
link=link,
target=target,
relative_to=relative_to,
force=force,
)
except FileNotFoundError:
raise BadParameter('Could not process {0}'.format(url)) | Add data to a dataset. | Below is the the instruction that describes the task:
### Input:
Add data to a dataset.
### Response:
def add(client, name, urls, link, relative_to, target, force):
"""Add data to a dataset."""
try:
with client.with_dataset(name=name) as dataset:
target = target if target else None
with progressbar(urls, label='Adding data to dataset') as bar:
for url in bar:
client.add_data_to_dataset(
dataset,
url,
link=link,
target=target,
relative_to=relative_to,
force=force,
)
except FileNotFoundError:
raise BadParameter('Could not process {0}'.format(url)) |
def _read_message(self):
"""
必须启动新的greenlet,否则会有内存泄漏
"""
job = gevent.spawn(super(GConnection, self)._read_message)
job.join() | 必须启动新的greenlet,否则会有内存泄漏 | Below is the the instruction that describes the task:
### Input:
必须启动新的greenlet,否则会有内存泄漏
### Response:
def _read_message(self):
"""
必须启动新的greenlet,否则会有内存泄漏
"""
job = gevent.spawn(super(GConnection, self)._read_message)
job.join() |
def iter_chunks(cls, sock, return_bytes=False, timeout_object=None):
"""Generates chunks from a connected socket until an Exit chunk is sent or a timeout occurs.
:param sock: the socket to read from.
:param bool return_bytes: If False, decode the payload into a utf-8 string.
:param cls.TimeoutProvider timeout_object: If provided, will be checked every iteration for a
possible timeout.
:raises: :class:`cls.ProcessStreamTimeout`
"""
assert(timeout_object is None or isinstance(timeout_object, cls.TimeoutProvider))
orig_timeout_time = None
timeout_interval = None
while 1:
if orig_timeout_time is not None:
remaining_time = time.time() - (orig_timeout_time + timeout_interval)
if remaining_time > 0:
original_timestamp = datetime.datetime.fromtimestamp(orig_timeout_time).isoformat()
raise cls.ProcessStreamTimeout(
"iterating over bytes from nailgun timed out with timeout interval {} starting at {}, "
"overtime seconds: {}"
.format(timeout_interval, original_timestamp, remaining_time))
elif timeout_object is not None:
opts = timeout_object.maybe_timeout_options()
if opts:
orig_timeout_time = opts.start_time
timeout_interval = opts.interval
continue
remaining_time = None
else:
remaining_time = None
with cls._set_socket_timeout(sock, timeout=remaining_time):
chunk_type, payload = cls.read_chunk(sock, return_bytes)
yield chunk_type, payload
if chunk_type == ChunkType.EXIT:
break | Generates chunks from a connected socket until an Exit chunk is sent or a timeout occurs.
:param sock: the socket to read from.
:param bool return_bytes: If False, decode the payload into a utf-8 string.
:param cls.TimeoutProvider timeout_object: If provided, will be checked every iteration for a
possible timeout.
:raises: :class:`cls.ProcessStreamTimeout` | Below is the the instruction that describes the task:
### Input:
Generates chunks from a connected socket until an Exit chunk is sent or a timeout occurs.
:param sock: the socket to read from.
:param bool return_bytes: If False, decode the payload into a utf-8 string.
:param cls.TimeoutProvider timeout_object: If provided, will be checked every iteration for a
possible timeout.
:raises: :class:`cls.ProcessStreamTimeout`
### Response:
def iter_chunks(cls, sock, return_bytes=False, timeout_object=None):
"""Generates chunks from a connected socket until an Exit chunk is sent or a timeout occurs.
:param sock: the socket to read from.
:param bool return_bytes: If False, decode the payload into a utf-8 string.
:param cls.TimeoutProvider timeout_object: If provided, will be checked every iteration for a
possible timeout.
:raises: :class:`cls.ProcessStreamTimeout`
"""
assert(timeout_object is None or isinstance(timeout_object, cls.TimeoutProvider))
orig_timeout_time = None
timeout_interval = None
while 1:
if orig_timeout_time is not None:
remaining_time = time.time() - (orig_timeout_time + timeout_interval)
if remaining_time > 0:
original_timestamp = datetime.datetime.fromtimestamp(orig_timeout_time).isoformat()
raise cls.ProcessStreamTimeout(
"iterating over bytes from nailgun timed out with timeout interval {} starting at {}, "
"overtime seconds: {}"
.format(timeout_interval, original_timestamp, remaining_time))
elif timeout_object is not None:
opts = timeout_object.maybe_timeout_options()
if opts:
orig_timeout_time = opts.start_time
timeout_interval = opts.interval
continue
remaining_time = None
else:
remaining_time = None
with cls._set_socket_timeout(sock, timeout=remaining_time):
chunk_type, payload = cls.read_chunk(sock, return_bytes)
yield chunk_type, payload
if chunk_type == ChunkType.EXIT:
break |
def reconnect(self):
'''Connected the stream if needed.
Coroutine.
'''
if self._connection.closed():
self._connection.reset()
yield from self._connection.connect() | Connected the stream if needed.
Coroutine. | Below is the the instruction that describes the task:
### Input:
Connected the stream if needed.
Coroutine.
### Response:
def reconnect(self):
'''Connected the stream if needed.
Coroutine.
'''
if self._connection.closed():
self._connection.reset()
yield from self._connection.connect() |
def makeSequenceRelative(absVSequence):
'''
Puts every value in a list on a continuum between 0 and 1
Also returns the min and max values (to reverse the process)
'''
if len(absVSequence) < 2 or len(set(absVSequence)) == 1:
raise RelativizeSequenceException(absVSequence)
minV = min(absVSequence)
maxV = max(absVSequence)
relativeSeq = [(value - minV) / (maxV - minV) for value in absVSequence]
return relativeSeq, minV, maxV | Puts every value in a list on a continuum between 0 and 1
Also returns the min and max values (to reverse the process) | Below is the the instruction that describes the task:
### Input:
Puts every value in a list on a continuum between 0 and 1
Also returns the min and max values (to reverse the process)
### Response:
def makeSequenceRelative(absVSequence):
'''
Puts every value in a list on a continuum between 0 and 1
Also returns the min and max values (to reverse the process)
'''
if len(absVSequence) < 2 or len(set(absVSequence)) == 1:
raise RelativizeSequenceException(absVSequence)
minV = min(absVSequence)
maxV = max(absVSequence)
relativeSeq = [(value - minV) / (maxV - minV) for value in absVSequence]
return relativeSeq, minV, maxV |
def request_xml(url, auth=None):
'''
Returns an etree.XMLRoot object loaded from the url
:param str url: URL for the resource to load as an XML
'''
try:
r = requests.get(url, auth=auth, verify=False)
return r.text.encode('utf-8')
except BaseException:
logger.error("Skipping %s (error parsing the XML)" % url)
return | Returns an etree.XMLRoot object loaded from the url
:param str url: URL for the resource to load as an XML | Below is the the instruction that describes the task:
### Input:
Returns an etree.XMLRoot object loaded from the url
:param str url: URL for the resource to load as an XML
### Response:
def request_xml(url, auth=None):
'''
Returns an etree.XMLRoot object loaded from the url
:param str url: URL for the resource to load as an XML
'''
try:
r = requests.get(url, auth=auth, verify=False)
return r.text.encode('utf-8')
except BaseException:
logger.error("Skipping %s (error parsing the XML)" % url)
return |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label._to_dict()
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label._to_dict()
return _dict |
def project_update_event(self, proj_info):
"""Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM.
"""
LOG.debug("Processing project_update_event %(proj)s.",
{'proj': proj_info})
proj_id = proj_info.get('resource_info')
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
new_proj_name, new_dci_id = self._get_dci_id_and_proj_name(proj.name)
# Check if project name and dci_id are the same, there is no change.
orig_proj_name = self.get_project_name(proj_id)
orig_dci_id = self.get_dci_id(proj_id)
if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id:
# This is an invalid update event.
LOG.warning('Project update event for %(proj)s is received '
'without changing in the project name: '
'%(orig_proj)s. Ignoring the event.',
{'proj': proj_id, 'orig_proj': orig_proj_name})
return
if orig_proj_name != new_proj_name:
# Project has new name and in DCNM the name of project cannot be
# modified. It is an invalid update. Do not process the event.
LOG.debug('Update request cannot be processed as name of project'
' is changed: %(proj)s %(orig_name)s %(orig_dci)s to '
'%(new_name)s %(new_dci)s.', (
{'proj': proj_id, 'orig_name': orig_proj_name,
'orig_dci': orig_dci_id, 'new_name': new_proj_name,
'new_dci': new_dci_id}))
return
# Valid update request.
LOG.debug('Changing project DCI id for %(proj)s from %(orig_dci)s to '
'%(new_dci)s.', {'proj': proj_id,
'orig_dci': orig_dci_id,
'new_dci': new_dci_id})
try:
self.dcnm_client.update_project(new_proj_name,
self.cfg.dcnm.
default_partition_name,
dci_id=new_dci_id)
except dexc.DfaClientRequestFailed:
# Failed to update project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to update project %s on DCNM.",
new_proj_name)
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update',
result=constants.UPDATE_FAIL)
else:
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update')
LOG.debug('Updated project %(proj)s %(name)s.',
{'proj': proj_id, 'name': proj.name}) | Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM. | Below is the the instruction that describes the task:
### Input:
Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM.
### Response:
def project_update_event(self, proj_info):
"""Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM.
"""
LOG.debug("Processing project_update_event %(proj)s.",
{'proj': proj_info})
proj_id = proj_info.get('resource_info')
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
new_proj_name, new_dci_id = self._get_dci_id_and_proj_name(proj.name)
# Check if project name and dci_id are the same, there is no change.
orig_proj_name = self.get_project_name(proj_id)
orig_dci_id = self.get_dci_id(proj_id)
if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id:
# This is an invalid update event.
LOG.warning('Project update event for %(proj)s is received '
'without changing in the project name: '
'%(orig_proj)s. Ignoring the event.',
{'proj': proj_id, 'orig_proj': orig_proj_name})
return
if orig_proj_name != new_proj_name:
# Project has new name and in DCNM the name of project cannot be
# modified. It is an invalid update. Do not process the event.
LOG.debug('Update request cannot be processed as name of project'
' is changed: %(proj)s %(orig_name)s %(orig_dci)s to '
'%(new_name)s %(new_dci)s.', (
{'proj': proj_id, 'orig_name': orig_proj_name,
'orig_dci': orig_dci_id, 'new_name': new_proj_name,
'new_dci': new_dci_id}))
return
# Valid update request.
LOG.debug('Changing project DCI id for %(proj)s from %(orig_dci)s to '
'%(new_dci)s.', {'proj': proj_id,
'orig_dci': orig_dci_id,
'new_dci': new_dci_id})
try:
self.dcnm_client.update_project(new_proj_name,
self.cfg.dcnm.
default_partition_name,
dci_id=new_dci_id)
except dexc.DfaClientRequestFailed:
# Failed to update project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to update project %s on DCNM.",
new_proj_name)
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update',
result=constants.UPDATE_FAIL)
else:
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update')
LOG.debug('Updated project %(proj)s %(name)s.',
{'proj': proj_id, 'name': proj.name}) |
def _pfp__restore_snapshot(self, recurse=True):
"""Restore the snapshotted value without triggering any events
"""
super(Struct, self)._pfp__restore_snapshot(recurse=recurse)
if recurse:
for child in self._pfp__children:
child._pfp__restore_snapshot(recurse=recurse) | Restore the snapshotted value without triggering any events | Below is the the instruction that describes the task:
### Input:
Restore the snapshotted value without triggering any events
### Response:
def _pfp__restore_snapshot(self, recurse=True):
"""Restore the snapshotted value without triggering any events
"""
super(Struct, self)._pfp__restore_snapshot(recurse=recurse)
if recurse:
for child in self._pfp__children:
child._pfp__restore_snapshot(recurse=recurse) |
def pandas(self):
"""get a pandas dataframe of prior and posterior for all predictions
Returns:
pandas.DataFrame : pandas.DataFrame
a dataframe with prior and posterior uncertainty estimates
for all forecasts (predictions)
"""
names,prior,posterior = [],[],[]
for iname,name in enumerate(self.posterior_parameter.row_names):
names.append(name)
posterior.append(np.sqrt(float(
self.posterior_parameter[iname, iname]. x)))
iprior = self.parcov.row_names.index(name)
prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))
for pred_name, pred_var in self.posterior_prediction.items():
names.append(pred_name)
posterior.append(np.sqrt(pred_var))
prior.append(self.prior_prediction[pred_name])
return pd.DataFrame({"posterior": posterior, "prior": prior},
index=names) | get a pandas dataframe of prior and posterior for all predictions
Returns:
pandas.DataFrame : pandas.DataFrame
a dataframe with prior and posterior uncertainty estimates
for all forecasts (predictions) | Below is the the instruction that describes the task:
### Input:
get a pandas dataframe of prior and posterior for all predictions
Returns:
pandas.DataFrame : pandas.DataFrame
a dataframe with prior and posterior uncertainty estimates
for all forecasts (predictions)
### Response:
def pandas(self):
"""get a pandas dataframe of prior and posterior for all predictions
Returns:
pandas.DataFrame : pandas.DataFrame
a dataframe with prior and posterior uncertainty estimates
for all forecasts (predictions)
"""
names,prior,posterior = [],[],[]
for iname,name in enumerate(self.posterior_parameter.row_names):
names.append(name)
posterior.append(np.sqrt(float(
self.posterior_parameter[iname, iname]. x)))
iprior = self.parcov.row_names.index(name)
prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))
for pred_name, pred_var in self.posterior_prediction.items():
names.append(pred_name)
posterior.append(np.sqrt(pred_var))
prior.append(self.prior_prediction[pred_name])
return pd.DataFrame({"posterior": posterior, "prior": prior},
index=names) |
def set_peripheral(self, power=None, pullup=None, aux=None, chip_select=None):
""" Set the peripheral config at runtime.
If a parameter is None then the config will not be changed.
:param power: Set to True to enable the power supply or False to disable
:param pullup: Set to True to enable the internal pull-up resistors. False to disable
:param aux: Set the AUX pin output state
:param chip_select: Set the CS pin output state
"""
if power is not None:
self.power = power
if pullup is not None:
self.pullup = pullup
if aux is not None:
self.aux = aux
if chip_select is not None:
self.chip_select = chip_select
# Set peripheral status
peripheral_byte = 64
if self.chip_select:
peripheral_byte |= 0x01
if self.aux:
peripheral_byte |= 0x02
if self.pullup:
peripheral_byte |= 0x04
if self.power:
peripheral_byte |= 0x08
self.device.write(bytearray([peripheral_byte]))
response = self.device.read(1)
if response != b"\x01":
raise Exception("Setting peripheral failed. Received: {}".format(repr(response))) | Set the peripheral config at runtime.
If a parameter is None then the config will not be changed.
:param power: Set to True to enable the power supply or False to disable
:param pullup: Set to True to enable the internal pull-up resistors. False to disable
:param aux: Set the AUX pin output state
:param chip_select: Set the CS pin output state | Below is the the instruction that describes the task:
### Input:
Set the peripheral config at runtime.
If a parameter is None then the config will not be changed.
:param power: Set to True to enable the power supply or False to disable
:param pullup: Set to True to enable the internal pull-up resistors. False to disable
:param aux: Set the AUX pin output state
:param chip_select: Set the CS pin output state
### Response:
def set_peripheral(self, power=None, pullup=None, aux=None, chip_select=None):
""" Set the peripheral config at runtime.
If a parameter is None then the config will not be changed.
:param power: Set to True to enable the power supply or False to disable
:param pullup: Set to True to enable the internal pull-up resistors. False to disable
:param aux: Set the AUX pin output state
:param chip_select: Set the CS pin output state
"""
if power is not None:
self.power = power
if pullup is not None:
self.pullup = pullup
if aux is not None:
self.aux = aux
if chip_select is not None:
self.chip_select = chip_select
# Set peripheral status
peripheral_byte = 64
if self.chip_select:
peripheral_byte |= 0x01
if self.aux:
peripheral_byte |= 0x02
if self.pullup:
peripheral_byte |= 0x04
if self.power:
peripheral_byte |= 0x08
self.device.write(bytearray([peripheral_byte]))
response = self.device.read(1)
if response != b"\x01":
raise Exception("Setting peripheral failed. Received: {}".format(repr(response))) |
def build_logging_param(logging_uri, util_class=OutputFileParamUtil):
"""Convenience function simplifies construction of the logging uri."""
if not logging_uri:
return job_model.LoggingParam(None, None)
recursive = not logging_uri.endswith('.log')
oututil = util_class('')
_, uri, provider = oututil.parse_uri(logging_uri, recursive)
if '*' in uri.basename:
raise ValueError('Wildcards not allowed in logging URI: %s' % uri)
return job_model.LoggingParam(uri, provider) | Convenience function simplifies construction of the logging uri. | Below is the the instruction that describes the task:
### Input:
Convenience function simplifies construction of the logging uri.
### Response:
def build_logging_param(logging_uri, util_class=OutputFileParamUtil):
"""Convenience function simplifies construction of the logging uri."""
if not logging_uri:
return job_model.LoggingParam(None, None)
recursive = not logging_uri.endswith('.log')
oututil = util_class('')
_, uri, provider = oututil.parse_uri(logging_uri, recursive)
if '*' in uri.basename:
raise ValueError('Wildcards not allowed in logging URI: %s' % uri)
return job_model.LoggingParam(uri, provider) |
def serialize_quantity(o):
"""
Serializes an :obj:`astropy.units.Quantity`, for JSONification.
Args:
o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
return dict(
_type='astropy.units.Quantity',
value=o.value,
unit=o.unit.to_string()) | Serializes an :obj:`astropy.units.Quantity`, for JSONification.
Args:
o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`. | Below is the the instruction that describes the task:
### Input:
Serializes an :obj:`astropy.units.Quantity`, for JSONification.
Args:
o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
### Response:
def serialize_quantity(o):
"""
Serializes an :obj:`astropy.units.Quantity`, for JSONification.
Args:
o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
return dict(
_type='astropy.units.Quantity',
value=o.value,
unit=o.unit.to_string()) |
def address(address=None, begin=None, end=None):
'''
HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/temperature/address
QUERY PARAMETERS
Parameter Type Default Description
text string n/a Address string
begin int 1880 beginning year for date range, inclusive
end int 2014 end year for date range, inclusive
api_key string DEMO_KEY api.nasa.gov key for expanded usage
EXAMPLE QUERY
https://api.nasa.gov/planetary/earth/temperature/address?text=1800 F Street, NW, Washington DC&begin=1990
'''
base_url = "https://api.nasa.gov/planetary/earth/temperature/address?"
if not address:
raise ValueError(
"address is missing, which is mandatory. example : 1800 F Street, NW, Washington DC")
elif not isinstance(address, str):
try:
address = str(address)
except:
raise ValueError("address has to be type of string")
else:
base_url += "text=" + address + "&"
if not begin:
raise ValueError(
"Begin year is missing, which is mandatory. Format : YYYY")
else:
try:
validate_year(begin)
base_url += "begin=" + begin + "&"
except:
raise ValueError("Incorrect begin year format, should be YYYY")
if end:
try:
validate_year(end)
base_url += "end=" + end + "&"
except:
raise ValueError("Incorrect end year format, should be YYYY")
req_url = base_url + "api_key=" + nasa_api_key()
return dispatch_http_get(req_url) | HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/temperature/address
QUERY PARAMETERS
Parameter Type Default Description
text string n/a Address string
begin int 1880 beginning year for date range, inclusive
end int 2014 end year for date range, inclusive
api_key string DEMO_KEY api.nasa.gov key for expanded usage
EXAMPLE QUERY
https://api.nasa.gov/planetary/earth/temperature/address?text=1800 F Street, NW, Washington DC&begin=1990 | Below is the the instruction that describes the task:
### Input:
HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/temperature/address
QUERY PARAMETERS
Parameter Type Default Description
text string n/a Address string
begin int 1880 beginning year for date range, inclusive
end int 2014 end year for date range, inclusive
api_key string DEMO_KEY api.nasa.gov key for expanded usage
EXAMPLE QUERY
https://api.nasa.gov/planetary/earth/temperature/address?text=1800 F Street, NW, Washington DC&begin=1990
### Response:
def address(address=None, begin=None, end=None):
'''
HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/temperature/address
QUERY PARAMETERS
Parameter Type Default Description
text string n/a Address string
begin int 1880 beginning year for date range, inclusive
end int 2014 end year for date range, inclusive
api_key string DEMO_KEY api.nasa.gov key for expanded usage
EXAMPLE QUERY
https://api.nasa.gov/planetary/earth/temperature/address?text=1800 F Street, NW, Washington DC&begin=1990
'''
base_url = "https://api.nasa.gov/planetary/earth/temperature/address?"
if not address:
raise ValueError(
"address is missing, which is mandatory. example : 1800 F Street, NW, Washington DC")
elif not isinstance(address, str):
try:
address = str(address)
except:
raise ValueError("address has to be type of string")
else:
base_url += "text=" + address + "&"
if not begin:
raise ValueError(
"Begin year is missing, which is mandatory. Format : YYYY")
else:
try:
validate_year(begin)
base_url += "begin=" + begin + "&"
except:
raise ValueError("Incorrect begin year format, should be YYYY")
if end:
try:
validate_year(end)
base_url += "end=" + end + "&"
except:
raise ValueError("Incorrect end year format, should be YYYY")
req_url = base_url + "api_key=" + nasa_api_key()
return dispatch_http_get(req_url) |
def _audience_condition_deserializer(obj_dict):
""" Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
"""
return [
obj_dict.get('name'),
obj_dict.get('value'),
obj_dict.get('type'),
obj_dict.get('match')
] | Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match. | Below is the the instruction that describes the task:
### Input:
Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
### Response:
def _audience_condition_deserializer(obj_dict):
""" Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
"""
return [
obj_dict.get('name'),
obj_dict.get('value'),
obj_dict.get('type'),
obj_dict.get('match')
] |
def list_nodes_full(conn=None, call=None):
'''
Return a list of VMs with all the information about them
CLI Example
.. code-block:: bash
salt-cloud -f list_nodes_full myopenstack
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if conn is None:
conn = get_conn()
ret = {}
for node in conn.list_servers(detailed=True):
ret[node.name] = dict(node)
ret[node.name]['id'] = node.id
ret[node.name]['name'] = node.name
ret[node.name]['size'] = node.flavor.name
ret[node.name]['state'] = node.status
ret[node.name]['private_ips'] = _get_ips(node, 'private')
ret[node.name]['public_ips'] = _get_ips(node, 'public')
ret[node.name]['floating_ips'] = _get_ips(node, 'floating')
ret[node.name]['fixed_ips'] = _get_ips(node, 'fixed')
ret[node.name]['image'] = node.image.name
return ret | Return a list of VMs with all the information about them
CLI Example
.. code-block:: bash
salt-cloud -f list_nodes_full myopenstack | Below is the the instruction that describes the task:
### Input:
Return a list of VMs with all the information about them
CLI Example
.. code-block:: bash
salt-cloud -f list_nodes_full myopenstack
### Response:
def list_nodes_full(conn=None, call=None):
'''
Return a list of VMs with all the information about them
CLI Example
.. code-block:: bash
salt-cloud -f list_nodes_full myopenstack
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if conn is None:
conn = get_conn()
ret = {}
for node in conn.list_servers(detailed=True):
ret[node.name] = dict(node)
ret[node.name]['id'] = node.id
ret[node.name]['name'] = node.name
ret[node.name]['size'] = node.flavor.name
ret[node.name]['state'] = node.status
ret[node.name]['private_ips'] = _get_ips(node, 'private')
ret[node.name]['public_ips'] = _get_ips(node, 'public')
ret[node.name]['floating_ips'] = _get_ips(node, 'floating')
ret[node.name]['fixed_ips'] = _get_ips(node, 'fixed')
ret[node.name]['image'] = node.image.name
return ret |
def get_dev_interface(devid, auth, url):
"""
Function takes devid as input to RESTFUL call to HP IMC platform and returns list of device interfaces
:param devid: requires devid as the only input
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list object which contains a dictionary per interface
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_interfaces = get_dev_interface('15', auth.creds, auth.url)
>>> assert type(dev_interfaces) is list
>>> assert 'ifAlias' in dev_interfaces[0]
"""
get_dev_interface_url = "/imcrs/plat/res/device/" + str(devid) + \
"/interface?start=0&size=1000&desc=false&total=false"
f_url = url + get_dev_interface_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
int_list = (json.loads(r.text))['interface']
return int_list
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_interface: An Error has occured" | Function takes devid as input to RESTFUL call to HP IMC platform and returns list of device interfaces
:param devid: requires devid as the only input
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list object which contains a dictionary per interface
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_interfaces = get_dev_interface('15', auth.creds, auth.url)
>>> assert type(dev_interfaces) is list
>>> assert 'ifAlias' in dev_interfaces[0] | Below is the the instruction that describes the task:
### Input:
Function takes devid as input to RESTFUL call to HP IMC platform and returns list of device interfaces
:param devid: requires devid as the only input
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list object which contains a dictionary per interface
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_interfaces = get_dev_interface('15', auth.creds, auth.url)
>>> assert type(dev_interfaces) is list
>>> assert 'ifAlias' in dev_interfaces[0]
### Response:
def get_dev_interface(devid, auth, url):
"""
Function takes devid as input to RESTFUL call to HP IMC platform and returns list of device interfaces
:param devid: requires devid as the only input
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list object which contains a dictionary per interface
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_interfaces = get_dev_interface('15', auth.creds, auth.url)
>>> assert type(dev_interfaces) is list
>>> assert 'ifAlias' in dev_interfaces[0]
"""
get_dev_interface_url = "/imcrs/plat/res/device/" + str(devid) + \
"/interface?start=0&size=1000&desc=false&total=false"
f_url = url + get_dev_interface_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
int_list = (json.loads(r.text))['interface']
return int_list
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_interface: An Error has occured" |
def extra_decorators(self):
"""The extra decorators that this function can have.
Additional decorators are considered when they are used as
assignments, as in ``method = staticmethod(method)``.
The property will return all the callables that are used for
decoration.
:type: list(NodeNG)
"""
frame = self.parent.frame()
if not isinstance(frame, ClassDef):
return []
decorators = []
for assign in frame._get_assign_nodes():
if isinstance(assign.value, node_classes.Call) and isinstance(
assign.value.func, node_classes.Name
):
for assign_node in assign.targets:
if not isinstance(assign_node, node_classes.AssignName):
# Support only `name = callable(name)`
continue
if assign_node.name != self.name:
# Interested only in the assignment nodes that
# decorates the current method.
continue
try:
meth = frame[self.name]
except KeyError:
continue
else:
# Must be a function and in the same frame as the
# original method.
if (
isinstance(meth, FunctionDef)
and assign_node.frame() == frame
):
decorators.append(assign.value)
return decorators | The extra decorators that this function can have.
Additional decorators are considered when they are used as
assignments, as in ``method = staticmethod(method)``.
The property will return all the callables that are used for
decoration.
:type: list(NodeNG) | Below is the the instruction that describes the task:
### Input:
The extra decorators that this function can have.
Additional decorators are considered when they are used as
assignments, as in ``method = staticmethod(method)``.
The property will return all the callables that are used for
decoration.
:type: list(NodeNG)
### Response:
def extra_decorators(self):
"""The extra decorators that this function can have.
Additional decorators are considered when they are used as
assignments, as in ``method = staticmethod(method)``.
The property will return all the callables that are used for
decoration.
:type: list(NodeNG)
"""
frame = self.parent.frame()
if not isinstance(frame, ClassDef):
return []
decorators = []
for assign in frame._get_assign_nodes():
if isinstance(assign.value, node_classes.Call) and isinstance(
assign.value.func, node_classes.Name
):
for assign_node in assign.targets:
if not isinstance(assign_node, node_classes.AssignName):
# Support only `name = callable(name)`
continue
if assign_node.name != self.name:
# Interested only in the assignment nodes that
# decorates the current method.
continue
try:
meth = frame[self.name]
except KeyError:
continue
else:
# Must be a function and in the same frame as the
# original method.
if (
isinstance(meth, FunctionDef)
and assign_node.frame() == frame
):
decorators.append(assign.value)
return decorators |
def _listen(self, protocols, From, description):
"""
Implementation of L{Listen}.
"""
# The peer is coming from a client-side representation of the user
# described by 'From', and talking *to* a server-side representation of
# the user described by 'From'.
self.verifyCertificateAllowed(From, From)
theirCert = Certificate.peerFromTransport(self.transport)
for protocolName in protocols:
if protocolName.startswith('.'):
raise VerifyError(
"Internal protocols are for server-server use _only_: %r" %
protocolName)
key = (From, protocolName)
value = (self, theirCert, description)
log.msg("%r listening for %r" % key)
self.listeningClient.append((key, value))
self.service.listeningClients.setdefault(key, []).append(value)
return {} | Implementation of L{Listen}. | Below is the the instruction that describes the task:
### Input:
Implementation of L{Listen}.
### Response:
def _listen(self, protocols, From, description):
"""
Implementation of L{Listen}.
"""
# The peer is coming from a client-side representation of the user
# described by 'From', and talking *to* a server-side representation of
# the user described by 'From'.
self.verifyCertificateAllowed(From, From)
theirCert = Certificate.peerFromTransport(self.transport)
for protocolName in protocols:
if protocolName.startswith('.'):
raise VerifyError(
"Internal protocols are for server-server use _only_: %r" %
protocolName)
key = (From, protocolName)
value = (self, theirCert, description)
log.msg("%r listening for %r" % key)
self.listeningClient.append((key, value))
self.service.listeningClients.setdefault(key, []).append(value)
return {} |
def get_buy(self, account_id, buy_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-buy"""
response = self._get('v2', 'accounts', account_id, 'buys', buy_id, params=params)
return self._make_api_object(response, Buy) | https://developers.coinbase.com/api/v2#show-a-buy | Below is the the instruction that describes the task:
### Input:
https://developers.coinbase.com/api/v2#show-a-buy
### Response:
def get_buy(self, account_id, buy_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-buy"""
response = self._get('v2', 'accounts', account_id, 'buys', buy_id, params=params)
return self._make_api_object(response, Buy) |
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p | Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition | Below is the the instruction that describes the task:
### Input:
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
### Response:
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p |
def main():
usage = "usage: %(prog)s [options] "
description = "Run gtselect and gtmktime on one or more FT1 files. "
"Note that gtmktime will be skipped if no FT2 file is provided."
parser = argparse.ArgumentParser(usage=usage, description=description)
add_lsf_args(parser)
parser.add_argument('--zmax', default=100., type=float, help='')
parser.add_argument('--dcostheta', default=0.025, type=float, help='')
parser.add_argument('--binsz', default=1.0, type=float, help='')
parser.add_argument('--outdir', default=None, type=str,
help='Path to output directory used when merge=False.')
parser.add_argument('--outfile', default=None, type=str,
help='Path to output file used when merge=True.')
parser.add_argument('--scfile', default=None, type=str, help='',
required=True)
parser.add_argument('--dry_run', default=False, action='store_true')
parser.add_argument('--overwrite', default=False, action='store_true')
parser.add_argument('--merge', default=False, action='store_true',
help='Merge input FT1 files into a single file.')
parser.add_argument('files', nargs='+', default=None,
help='List of directories in which the analysis will '
'be run.')
args = parser.parse_args()
args.outdir = os.path.abspath(args.outdir)
args.scfile = os.path.abspath(args.scfile)
mkdir(args.outdir)
input_files = [[os.path.abspath(x)] for x in args.files]
output_files = [os.path.join(args.outdir, os.path.basename(x))
for x in args.files]
if args.batch:
opts = copy.deepcopy(args.__dict__)
opts.pop('files')
opts.pop('batch')
submit_jobs('python ' + os.path.abspath(__file__.rstrip('cd')),
input_files, output_files, {k: v for k, v in opts.items()})
sys.exit(0)
logger = Logger.get(os.path.basename(__file__), None, logging.INFO)
logger.info('Starting.')
cwd = os.getcwd()
user = os.environ['USER']
tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch')
os.chdir(tmpdir)
logger.info('tmpdir %s', tmpdir)
logger.info('outdir %s', args.outdir)
logger.info('outfile %s', args.outfile)
for infiles, outfile in zip(input_files, output_files):
logger.info('infiles %s', pprint.pformat(infiles))
logger.info('outfile %s', outfile)
kw = dict(evfile='list.txt',
scfile=args.scfile,
outfile='ltcube.fits',
binsz=args.binsz,
dcostheta=args.dcostheta,
zmax=args.zmax)
create_filelist(infiles, 'list.txt')
staged_outfile = kw['outfile']
run_gtapp('gtltcube', logger, kw)
logger.info('cp %s %s', staged_outfile, outfile)
shutil.copy(staged_outfile, outfile)
os.chdir(cwd)
logger.info('Deleting %s', tmpdir)
shutil.rmtree(tmpdir)
logger.info('Done.') | Note that gtmktime will be skipped if no FT2 file is provided. | Below is the the instruction that describes the task:
### Input:
Note that gtmktime will be skipped if no FT2 file is provided.
### Response:
def main():
usage = "usage: %(prog)s [options] "
description = "Run gtselect and gtmktime on one or more FT1 files. "
"Note that gtmktime will be skipped if no FT2 file is provided."
parser = argparse.ArgumentParser(usage=usage, description=description)
add_lsf_args(parser)
parser.add_argument('--zmax', default=100., type=float, help='')
parser.add_argument('--dcostheta', default=0.025, type=float, help='')
parser.add_argument('--binsz', default=1.0, type=float, help='')
parser.add_argument('--outdir', default=None, type=str,
help='Path to output directory used when merge=False.')
parser.add_argument('--outfile', default=None, type=str,
help='Path to output file used when merge=True.')
parser.add_argument('--scfile', default=None, type=str, help='',
required=True)
parser.add_argument('--dry_run', default=False, action='store_true')
parser.add_argument('--overwrite', default=False, action='store_true')
parser.add_argument('--merge', default=False, action='store_true',
help='Merge input FT1 files into a single file.')
parser.add_argument('files', nargs='+', default=None,
help='List of directories in which the analysis will '
'be run.')
args = parser.parse_args()
args.outdir = os.path.abspath(args.outdir)
args.scfile = os.path.abspath(args.scfile)
mkdir(args.outdir)
input_files = [[os.path.abspath(x)] for x in args.files]
output_files = [os.path.join(args.outdir, os.path.basename(x))
for x in args.files]
if args.batch:
opts = copy.deepcopy(args.__dict__)
opts.pop('files')
opts.pop('batch')
submit_jobs('python ' + os.path.abspath(__file__.rstrip('cd')),
input_files, output_files, {k: v for k, v in opts.items()})
sys.exit(0)
logger = Logger.get(os.path.basename(__file__), None, logging.INFO)
logger.info('Starting.')
cwd = os.getcwd()
user = os.environ['USER']
tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch')
os.chdir(tmpdir)
logger.info('tmpdir %s', tmpdir)
logger.info('outdir %s', args.outdir)
logger.info('outfile %s', args.outfile)
for infiles, outfile in zip(input_files, output_files):
logger.info('infiles %s', pprint.pformat(infiles))
logger.info('outfile %s', outfile)
kw = dict(evfile='list.txt',
scfile=args.scfile,
outfile='ltcube.fits',
binsz=args.binsz,
dcostheta=args.dcostheta,
zmax=args.zmax)
create_filelist(infiles, 'list.txt')
staged_outfile = kw['outfile']
run_gtapp('gtltcube', logger, kw)
logger.info('cp %s %s', staged_outfile, outfile)
shutil.copy(staged_outfile, outfile)
os.chdir(cwd)
logger.info('Deleting %s', tmpdir)
shutil.rmtree(tmpdir)
logger.info('Done.') |
def parse_configuration(config):
'''
Parse and fix configuration:
- processed file should end up being same as input
- pipelines should contain CLI commands to run
- add missing sections
:param config: raw configuration object
:type config: dict
:return: configuration ready for `diet()`
:rtype: dict
'''
new_config = copy.deepcopy(config)
required_parts = ('commands', 'parameters', 'pipelines')
for part in required_parts:
new_config.setdefault(part, {})
# Parse only if it hasn't been parsed yet so it is safe to run it more than
# once.
if not new_config.get('parsed'):
try:
# Always end up with input file. If app outputs to a different one,
# then replace old one with it
for prog in new_config['parameters']:
if '{output_file}' in new_config['parameters'][prog]:
new_config['parameters'][prog] += " && mv '{output_file}' '{file}'"
# Build pipelines
for label, raw_pipeline in new_config['pipelines'].items():
commands = []
for app in raw_pipeline:
full_command = " ".join([new_config['commands'][app],
new_config['parameters'][app]])
commands.append(full_command)
new_config['pipelines'][label] = " && ".join(commands)
except KeyError as e:
error_msg = "Missing key(s) in configuration: {0}.".format(
",".join(e.args))
raise ConfigurationErrorDietException(error_msg)
new_config['parsed'] = True
return new_config | Parse and fix configuration:
- processed file should end up being same as input
- pipelines should contain CLI commands to run
- add missing sections
:param config: raw configuration object
:type config: dict
:return: configuration ready for `diet()`
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Parse and fix configuration:
- processed file should end up being same as input
- pipelines should contain CLI commands to run
- add missing sections
:param config: raw configuration object
:type config: dict
:return: configuration ready for `diet()`
:rtype: dict
### Response:
def parse_configuration(config):
'''
Parse and fix configuration:
- processed file should end up being same as input
- pipelines should contain CLI commands to run
- add missing sections
:param config: raw configuration object
:type config: dict
:return: configuration ready for `diet()`
:rtype: dict
'''
new_config = copy.deepcopy(config)
required_parts = ('commands', 'parameters', 'pipelines')
for part in required_parts:
new_config.setdefault(part, {})
# Parse only if it hasn't been parsed yet so it is safe to run it more than
# once.
if not new_config.get('parsed'):
try:
# Always end up with input file. If app outputs to a different one,
# then replace old one with it
for prog in new_config['parameters']:
if '{output_file}' in new_config['parameters'][prog]:
new_config['parameters'][prog] += " && mv '{output_file}' '{file}'"
# Build pipelines
for label, raw_pipeline in new_config['pipelines'].items():
commands = []
for app in raw_pipeline:
full_command = " ".join([new_config['commands'][app],
new_config['parameters'][app]])
commands.append(full_command)
new_config['pipelines'][label] = " && ".join(commands)
except KeyError as e:
error_msg = "Missing key(s) in configuration: {0}.".format(
",".join(e.args))
raise ConfigurationErrorDietException(error_msg)
new_config['parsed'] = True
return new_config |
def sum(x, weights=None):
'''
sum(x) yields either a potential-sum object if x is a potential function or the sum of x if x
is not. If x is not a potential-field then it must be a vector.
sum(x, weights=w) uses the given weights to produce a weighted sum.
'''
x = to_potential(x)
if is_const_potential(x): return PotentialConstant(np.sum(x.c))
else: return PotentialSum(x, weights=weights) | sum(x) yields either a potential-sum object if x is a potential function or the sum of x if x
is not. If x is not a potential-field then it must be a vector.
sum(x, weights=w) uses the given weights to produce a weighted sum. | Below is the the instruction that describes the task:
### Input:
sum(x) yields either a potential-sum object if x is a potential function or the sum of x if x
is not. If x is not a potential-field then it must be a vector.
sum(x, weights=w) uses the given weights to produce a weighted sum.
### Response:
def sum(x, weights=None):
'''
sum(x) yields either a potential-sum object if x is a potential function or the sum of x if x
is not. If x is not a potential-field then it must be a vector.
sum(x, weights=w) uses the given weights to produce a weighted sum.
'''
x = to_potential(x)
if is_const_potential(x): return PotentialConstant(np.sum(x.c))
else: return PotentialSum(x, weights=weights) |
def patch():
"""
Patch botocore client so it generates subsegments
when calling AWS services.
"""
if hasattr(botocore.client, '_xray_enabled'):
return
setattr(botocore.client, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'botocore.client',
'BaseClient._make_api_call',
_xray_traced_botocore,
)
wrapt.wrap_function_wrapper(
'botocore.endpoint',
'Endpoint.prepare_request',
inject_header,
) | Patch botocore client so it generates subsegments
when calling AWS services. | Below is the the instruction that describes the task:
### Input:
Patch botocore client so it generates subsegments
when calling AWS services.
### Response:
def patch():
"""
Patch botocore client so it generates subsegments
when calling AWS services.
"""
if hasattr(botocore.client, '_xray_enabled'):
return
setattr(botocore.client, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'botocore.client',
'BaseClient._make_api_call',
_xray_traced_botocore,
)
wrapt.wrap_function_wrapper(
'botocore.endpoint',
'Endpoint.prepare_request',
inject_header,
) |
def enableHook(self, msgObj):
"""
Enable yank-pop.
This method is connected to the 'yank-qtmacs_text_edit' hook
(triggered by the yank macro) to ensure that yank-pop only
gets activated afterwards.
"""
self.killListIdx = len(qte_global.kill_list) - 2
self.qteMain.qtesigKeyseqComplete.connect(self.disableHook) | Enable yank-pop.
This method is connected to the 'yank-qtmacs_text_edit' hook
(triggered by the yank macro) to ensure that yank-pop only
gets activated afterwards. | Below is the the instruction that describes the task:
### Input:
Enable yank-pop.
This method is connected to the 'yank-qtmacs_text_edit' hook
(triggered by the yank macro) to ensure that yank-pop only
gets activated afterwards.
### Response:
def enableHook(self, msgObj):
"""
Enable yank-pop.
This method is connected to the 'yank-qtmacs_text_edit' hook
(triggered by the yank macro) to ensure that yank-pop only
gets activated afterwards.
"""
self.killListIdx = len(qte_global.kill_list) - 2
self.qteMain.qtesigKeyseqComplete.connect(self.disableHook) |
def process_tags(inst_tags):
"""Create dict of instance tags as only name:value pairs."""
tag_dict = {}
for k in range(len(inst_tags)):
tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value']
return tag_dict | Create dict of instance tags as only name:value pairs. | Below is the the instruction that describes the task:
### Input:
Create dict of instance tags as only name:value pairs.
### Response:
def process_tags(inst_tags):
"""Create dict of instance tags as only name:value pairs."""
tag_dict = {}
for k in range(len(inst_tags)):
tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value']
return tag_dict |
def normalize_linked_references(
data: List[Dict[str, Any]]
) -> Generator[Tuple[int, str, str], None, None]:
"""
Return a tuple of information representing all insertions of a linked reference.
(offset, type, value)
"""
for deployment in data:
for offset in deployment["offsets"]:
yield offset, deployment["type"], deployment["value"] | Return a tuple of information representing all insertions of a linked reference.
(offset, type, value) | Below is the the instruction that describes the task:
### Input:
Return a tuple of information representing all insertions of a linked reference.
(offset, type, value)
### Response:
def normalize_linked_references(
data: List[Dict[str, Any]]
) -> Generator[Tuple[int, str, str], None, None]:
"""
Return a tuple of information representing all insertions of a linked reference.
(offset, type, value)
"""
for deployment in data:
for offset in deployment["offsets"]:
yield offset, deployment["type"], deployment["value"] |
def filter_nodes(graph: BELGraph, node_predicates: NodePredicates) -> Iterable[BaseEntity]:
"""Apply a set of predicates to the nodes iterator of a BEL graph."""
concatenated_predicate = concatenate_node_predicates(node_predicates=node_predicates)
for node in graph:
if concatenated_predicate(graph, node):
yield node | Apply a set of predicates to the nodes iterator of a BEL graph. | Below is the the instruction that describes the task:
### Input:
Apply a set of predicates to the nodes iterator of a BEL graph.
### Response:
def filter_nodes(graph: BELGraph, node_predicates: NodePredicates) -> Iterable[BaseEntity]:
"""Apply a set of predicates to the nodes iterator of a BEL graph."""
concatenated_predicate = concatenate_node_predicates(node_predicates=node_predicates)
for node in graph:
if concatenated_predicate(graph, node):
yield node |
def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
"""
self.disconnect(name)
if name in self._connections:
del self._connections[name] | Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None | Below is the the instruction that describes the task:
### Input:
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
### Response:
def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
"""
self.disconnect(name)
if name in self._connections:
del self._connections[name] |
def dbprint(*args):
"""print only if app.debug is truthy"""
if app and app.debug:
if USING_WINDOWS:
print("DEBUG: " + " ".join(args))
else:
CYELLOW2 = "\33[93m"
NORMAL = "\033[0m"
print(CYELLOW2 + "DEBUG: " + " ".join(args) + NORMAL) | print only if app.debug is truthy | Below is the the instruction that describes the task:
### Input:
print only if app.debug is truthy
### Response:
def dbprint(*args):
"""print only if app.debug is truthy"""
if app and app.debug:
if USING_WINDOWS:
print("DEBUG: " + " ".join(args))
else:
CYELLOW2 = "\33[93m"
NORMAL = "\033[0m"
print(CYELLOW2 + "DEBUG: " + " ".join(args) + NORMAL) |
def code(self, text):
"""Return the code instead of the comments.
"""
comm = self.nextValidComment(text)
while comm:
text = text[:comm.start()] + text[comm.end():]
comm = self.nextValidComment(text, comm.end(0))
return text | Return the code instead of the comments. | Below is the the instruction that describes the task:
### Input:
Return the code instead of the comments.
### Response:
def code(self, text):
"""Return the code instead of the comments.
"""
comm = self.nextValidComment(text)
while comm:
text = text[:comm.start()] + text[comm.end():]
comm = self.nextValidComment(text, comm.end(0))
return text |
def createSubtitle(self, fps, section):
"""Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression."""
matched = self._pattern.search(section)
if matched is not None:
matchedDict = matched.groupdict()
return Subtitle(
self.frametime(fps, matchedDict.get("time_from")),
self.frametime(fps, matchedDict.get("time_to")),
self.formatSub(matchedDict.get("text"))
)
return None | Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression. | Below is the the instruction that describes the task:
### Input:
Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression.
### Response:
def createSubtitle(self, fps, section):
"""Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression."""
matched = self._pattern.search(section)
if matched is not None:
matchedDict = matched.groupdict()
return Subtitle(
self.frametime(fps, matchedDict.get("time_from")),
self.frametime(fps, matchedDict.get("time_to")),
self.formatSub(matchedDict.get("text"))
)
return None |
def graph(networkx_graph, title='Axial Graph Visualization', scripts_mode="CDN", data_mode="directory",
output_dir=".", filename="graph.html", version=this_version):
"""
Arguments:
networkx_graph (networkx.Graph): any instance of networkx.Graph
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Scripts =======================
scripts = third_party_scripts + [CDN_url(version)+"js/cola.min.js", CDN_url(version)+"js/graph.js"]
scripts_block = _scripts_block(scripts, scripts_mode, output_dir)
# Data =======================
graph_json = nx_json.node_link_data(networkx_graph)
for node in graph_json['nodes']:
for attr, val in node.items():
if isinstance(val, numbers.Number):
node[attr] = round(val, 2)
for link in graph_json['links']:
for attr, val in link.items():
if isinstance(val, numbers.Number):
link[attr] = round(val, 2)
graph_json = f"var graph = {json.dumps(graph_json)};"
data_block = _data_block(data_mode, [('graph', graph_json)], output_dir)
html = templateEnv.get_template('graph.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, nodes=networkx_graph.nodes())
(output_dir / filename).write_text(html)
return (output_dir / filename).resolve() | Arguments:
networkx_graph (networkx.Graph): any instance of networkx.Graph
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to. | Below is the the instruction that describes the task:
### Input:
Arguments:
networkx_graph (networkx.Graph): any instance of networkx.Graph
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
### Response:
def graph(networkx_graph, title='Axial Graph Visualization', scripts_mode="CDN", data_mode="directory",
output_dir=".", filename="graph.html", version=this_version):
"""
Arguments:
networkx_graph (networkx.Graph): any instance of networkx.Graph
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Scripts =======================
scripts = third_party_scripts + [CDN_url(version)+"js/cola.min.js", CDN_url(version)+"js/graph.js"]
scripts_block = _scripts_block(scripts, scripts_mode, output_dir)
# Data =======================
graph_json = nx_json.node_link_data(networkx_graph)
for node in graph_json['nodes']:
for attr, val in node.items():
if isinstance(val, numbers.Number):
node[attr] = round(val, 2)
for link in graph_json['links']:
for attr, val in link.items():
if isinstance(val, numbers.Number):
link[attr] = round(val, 2)
graph_json = f"var graph = {json.dumps(graph_json)};"
data_block = _data_block(data_mode, [('graph', graph_json)], output_dir)
html = templateEnv.get_template('graph.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, nodes=networkx_graph.nodes())
(output_dir / filename).write_text(html)
return (output_dir / filename).resolve() |
def read(self, output_tile, **kwargs):
"""
Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array
"""
try:
return read_raster_no_crs(self.get_path(output_tile))
except FileNotFoundError:
return self.empty(output_tile) | Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array | Below is the the instruction that describes the task:
### Input:
Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array
### Response:
def read(self, output_tile, **kwargs):
"""
Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array
"""
try:
return read_raster_no_crs(self.get_path(output_tile))
except FileNotFoundError:
return self.empty(output_tile) |
def set_cdn_log_retention(self, container, enabled):
"""
Enables or disables whether CDN access logs for the specified container
are collected and stored on Cloud Files.
"""
headers = {"X-Log-Retention": "%s" % enabled}
self.api.cdn_request("/%s" % utils.get_name(container), method="PUT",
headers=headers) | Enables or disables whether CDN access logs for the specified container
are collected and stored on Cloud Files. | Below is the the instruction that describes the task:
### Input:
Enables or disables whether CDN access logs for the specified container
are collected and stored on Cloud Files.
### Response:
def set_cdn_log_retention(self, container, enabled):
"""
Enables or disables whether CDN access logs for the specified container
are collected and stored on Cloud Files.
"""
headers = {"X-Log-Retention": "%s" % enabled}
self.api.cdn_request("/%s" % utils.get_name(container), method="PUT",
headers=headers) |
def sonority_from_fts(self, seg):
"""Given a segment as features, returns the sonority on a scale of 1
to 9.
Args:
seg (list): collection of (value, feature) pairs representing
a segment (vowel or consonant)
Returns:
int: sonority of `seg` between 1 and 9
"""
def match(m):
return self.fm.match(fts(m), seg)
minusHi = BoolTree(match('-hi'), 9, 8)
minusNas = BoolTree(match('-nas'), 6, 5)
plusVoi1 = BoolTree(match('+voi'), 4, 3)
plusVoi2 = BoolTree(match('+voi'), 2, 1)
plusCont = BoolTree(match('+cont'), plusVoi1, plusVoi2)
plusSon = BoolTree(match('+son'), minusNas, plusCont)
minusCons = BoolTree(match('-cons'), 7, plusSon)
plusSyl = BoolTree(match('+syl'), minusHi, minusCons)
return plusSyl.get_value() | Given a segment as features, returns the sonority on a scale of 1
to 9.
Args:
seg (list): collection of (value, feature) pairs representing
a segment (vowel or consonant)
Returns:
int: sonority of `seg` between 1 and 9 | Below is the the instruction that describes the task:
### Input:
Given a segment as features, returns the sonority on a scale of 1
to 9.
Args:
seg (list): collection of (value, feature) pairs representing
a segment (vowel or consonant)
Returns:
int: sonority of `seg` between 1 and 9
### Response:
def sonority_from_fts(self, seg):
"""Given a segment as features, returns the sonority on a scale of 1
to 9.
Args:
seg (list): collection of (value, feature) pairs representing
a segment (vowel or consonant)
Returns:
int: sonority of `seg` between 1 and 9
"""
def match(m):
return self.fm.match(fts(m), seg)
minusHi = BoolTree(match('-hi'), 9, 8)
minusNas = BoolTree(match('-nas'), 6, 5)
plusVoi1 = BoolTree(match('+voi'), 4, 3)
plusVoi2 = BoolTree(match('+voi'), 2, 1)
plusCont = BoolTree(match('+cont'), plusVoi1, plusVoi2)
plusSon = BoolTree(match('+son'), minusNas, plusCont)
minusCons = BoolTree(match('-cons'), 7, plusSon)
plusSyl = BoolTree(match('+syl'), minusHi, minusCons)
return plusSyl.get_value() |
def contour_canny(image, radius, mult_coarse=.40, mult_fine=.1,
clip_rmin=.9, clip_rmax=1.1, maxiter=20,
verbose=True):
"""Heuristic Canny edge detection for circular objects
Two Canny-based edge detections with different filter sizes are
performed to find the outmost contour of an object in a phase image
while keeping artifacts at a minimum.
Parameters
----------
image: 2d ndarray
Image containing an approximately spherically symmetric object
radius: float
The approximate object radius in pixels (required for filtering)
mult_coarse: float
The coarse edge detection has a filter size of
``sigma = mult_coarse * radius``
mult_fine: float
The fine edge detection has a filter size of
``sigma = mult_fine * radius``
clip_rmin: float
Removes edge points that are closer than `clip_rmin` times the
average radial edge position from the center of the image.
clip_rmax: float
Removes edge points that are further than `clip_rmin` times the
average radial edge position from the center of the image.
maxiter: int
Maximum number iterations for coarse edge detection, see Notes
verbose: bool
If set to `True`, issues EdgeDetectionWarning where applicable
Returns
-------
edge : 2d boolean ndarray
The detected edge positions of the object.
Notes
-----
If no edge is found using the filter size defined by `mult_coarse`,
then the coarse filter size is reduced by a factor of 2 until an
edge is found or until `maxiter` is reached.
The edge found using the filter size defined by `mult_fine` is
heuristically filtered (parts at the center and at the edge of the
image are removed). This heuristic filtering assumes that the
circular object is centered in the image.
See Also
--------
skimage.feature.canny: Canny edge detection algorithm used
"""
image = (image - image.min()) / (image.max() - image.min())
if radius > image.shape[0] / 2:
msg = "`radius` in pixels exceeds image size: {}".format(radius)
raise RadiusExceedsImageSizeError(msg)
# 1. Perform a coarse Canny edge detection. If the edge found is empty,
# the coarse filter size is reduced by a factor of 2.
for ii in range(maxiter):
fact_coarse = .5**ii
sigma_coarse = radius * mult_coarse * fact_coarse
edge_coarse = feature.canny(image=image,
sigma=sigma_coarse)
if np.sum(edge_coarse) != 0:
break
else:
msg = "Could not find edge! Try to reducing `mult_coarse` " \
+ "or increasing `maxiter`."
raise EdgeDetectionError(msg)
fact_fine = .7**ii
if fact_fine != 1 and verbose:
msg = "The keyword argument `mult_coarse` is too large. " \
+ "If errors occur, adjust `mult_fine` as well.\n" \
+ "Given `mult_coarse`: {}\n".format(mult_coarse) \
+ "New `mult_coarse`: {}\n".format(mult_coarse * fact_coarse) \
+ "Given `mult_fine`: {}\n".format(mult_fine) \
+ "New `mult_fine`: {}".format(mult_fine * fact_fine)
warnings.warn(msg, EdgeDetectionWarning)
# 2. Perform a fine Canny edge detection.
sigma_fine = radius * mult_fine * fact_fine
edge_fine = feature.canny(image, sigma_fine)
# 3. Remove parts from the fine edge
# Assume that the object is centered.
sx, sy = image.shape
x = np.linspace(-sx / 2, sx / 2, sx, endpoint=True).reshape(-1, 1)
y = np.linspace(-sy / 2, sy / 2, sy, endpoint=True).reshape(1, -1)
# 3.a. Remove detected edge parts from the corners of the image
# Radius of this disk is approximately
ellipse = (x / sx)**2 + (y / sy)**2 < .25
edge_fine *= ellipse
edge_coarse *= ellipse
if np.sum(edge_fine):
# 3.b Also filter inside of edge
rad = np.sqrt(x**2 + y**2)
# Filter coarse edge with `clip_rmin` and `clip_rmax`
rad_coarse = rad * edge_coarse
avg_coarse = np.sum(rad_coarse) / np.sum(edge_coarse)
rad_coarse[rad_coarse < avg_coarse * clip_rmin] = 0
rad_coarse[rad_coarse > avg_coarse * clip_rmax] = 0
# Filter inside of fine edge with smallest radius of
# coarse edge, i.e. `rad_coarse.min()`.
rad_fine = rad * edge_fine
if np.sum(rad_coarse):
edge_fine[rad_fine < rad_coarse[rad_coarse != 0].min()] = 0
# Filter outside of fine edge with `clip_rmax` twice
for __ in range(2):
rad_fine = rad * edge_fine
avg_fine = np.sum(rad_fine) / np.sum(edge_fine)
edge_fine[rad_fine > avg_fine * clip_rmax] = 0
elif np.sum(edge_coarse):
# No fine edge detected.
edge_fine = edge_coarse
else:
msg = "Could not find edge! Try reducing `mult_coarse` " \
+ "and `mult_fine`."
raise EdgeDetectionError(msg)
# make sure there are more than 4 points
if np.sum(edge_fine) < 4:
msg = "Detected edge too small! Try increasing `maxiter`, " \
+ "modifying `radius`, or reducing `mult_coarse`."
raise EdgeDetectionError(msg)
return edge_fine | Heuristic Canny edge detection for circular objects
Two Canny-based edge detections with different filter sizes are
performed to find the outmost contour of an object in a phase image
while keeping artifacts at a minimum.
Parameters
----------
image: 2d ndarray
Image containing an approximately spherically symmetric object
radius: float
The approximate object radius in pixels (required for filtering)
mult_coarse: float
The coarse edge detection has a filter size of
``sigma = mult_coarse * radius``
mult_fine: float
The fine edge detection has a filter size of
``sigma = mult_fine * radius``
clip_rmin: float
Removes edge points that are closer than `clip_rmin` times the
average radial edge position from the center of the image.
clip_rmax: float
Removes edge points that are further than `clip_rmin` times the
average radial edge position from the center of the image.
maxiter: int
Maximum number iterations for coarse edge detection, see Notes
verbose: bool
If set to `True`, issues EdgeDetectionWarning where applicable
Returns
-------
edge : 2d boolean ndarray
The detected edge positions of the object.
Notes
-----
If no edge is found using the filter size defined by `mult_coarse`,
then the coarse filter size is reduced by a factor of 2 until an
edge is found or until `maxiter` is reached.
The edge found using the filter size defined by `mult_fine` is
heuristically filtered (parts at the center and at the edge of the
image are removed). This heuristic filtering assumes that the
circular object is centered in the image.
See Also
--------
skimage.feature.canny: Canny edge detection algorithm used | Below is the the instruction that describes the task:
### Input:
Heuristic Canny edge detection for circular objects
Two Canny-based edge detections with different filter sizes are
performed to find the outmost contour of an object in a phase image
while keeping artifacts at a minimum.
Parameters
----------
image: 2d ndarray
Image containing an approximately spherically symmetric object
radius: float
The approximate object radius in pixels (required for filtering)
mult_coarse: float
The coarse edge detection has a filter size of
``sigma = mult_coarse * radius``
mult_fine: float
The fine edge detection has a filter size of
``sigma = mult_fine * radius``
clip_rmin: float
Removes edge points that are closer than `clip_rmin` times the
average radial edge position from the center of the image.
clip_rmax: float
Removes edge points that are further than `clip_rmin` times the
average radial edge position from the center of the image.
maxiter: int
Maximum number iterations for coarse edge detection, see Notes
verbose: bool
If set to `True`, issues EdgeDetectionWarning where applicable
Returns
-------
edge : 2d boolean ndarray
The detected edge positions of the object.
Notes
-----
If no edge is found using the filter size defined by `mult_coarse`,
then the coarse filter size is reduced by a factor of 2 until an
edge is found or until `maxiter` is reached.
The edge found using the filter size defined by `mult_fine` is
heuristically filtered (parts at the center and at the edge of the
image are removed). This heuristic filtering assumes that the
circular object is centered in the image.
See Also
--------
skimage.feature.canny: Canny edge detection algorithm used
### Response:
def contour_canny(image, radius, mult_coarse=.40, mult_fine=.1,
clip_rmin=.9, clip_rmax=1.1, maxiter=20,
verbose=True):
"""Heuristic Canny edge detection for circular objects
Two Canny-based edge detections with different filter sizes are
performed to find the outmost contour of an object in a phase image
while keeping artifacts at a minimum.
Parameters
----------
image: 2d ndarray
Image containing an approximately spherically symmetric object
radius: float
The approximate object radius in pixels (required for filtering)
mult_coarse: float
The coarse edge detection has a filter size of
``sigma = mult_coarse * radius``
mult_fine: float
The fine edge detection has a filter size of
``sigma = mult_fine * radius``
clip_rmin: float
Removes edge points that are closer than `clip_rmin` times the
average radial edge position from the center of the image.
clip_rmax: float
Removes edge points that are further than `clip_rmin` times the
average radial edge position from the center of the image.
maxiter: int
Maximum number iterations for coarse edge detection, see Notes
verbose: bool
If set to `True`, issues EdgeDetectionWarning where applicable
Returns
-------
edge : 2d boolean ndarray
The detected edge positions of the object.
Notes
-----
If no edge is found using the filter size defined by `mult_coarse`,
then the coarse filter size is reduced by a factor of 2 until an
edge is found or until `maxiter` is reached.
The edge found using the filter size defined by `mult_fine` is
heuristically filtered (parts at the center and at the edge of the
image are removed). This heuristic filtering assumes that the
circular object is centered in the image.
See Also
--------
skimage.feature.canny: Canny edge detection algorithm used
"""
image = (image - image.min()) / (image.max() - image.min())
if radius > image.shape[0] / 2:
msg = "`radius` in pixels exceeds image size: {}".format(radius)
raise RadiusExceedsImageSizeError(msg)
# 1. Perform a coarse Canny edge detection. If the edge found is empty,
# the coarse filter size is reduced by a factor of 2.
for ii in range(maxiter):
fact_coarse = .5**ii
sigma_coarse = radius * mult_coarse * fact_coarse
edge_coarse = feature.canny(image=image,
sigma=sigma_coarse)
if np.sum(edge_coarse) != 0:
break
else:
msg = "Could not find edge! Try to reducing `mult_coarse` " \
+ "or increasing `maxiter`."
raise EdgeDetectionError(msg)
fact_fine = .7**ii
if fact_fine != 1 and verbose:
msg = "The keyword argument `mult_coarse` is too large. " \
+ "If errors occur, adjust `mult_fine` as well.\n" \
+ "Given `mult_coarse`: {}\n".format(mult_coarse) \
+ "New `mult_coarse`: {}\n".format(mult_coarse * fact_coarse) \
+ "Given `mult_fine`: {}\n".format(mult_fine) \
+ "New `mult_fine`: {}".format(mult_fine * fact_fine)
warnings.warn(msg, EdgeDetectionWarning)
# 2. Perform a fine Canny edge detection.
sigma_fine = radius * mult_fine * fact_fine
edge_fine = feature.canny(image, sigma_fine)
# 3. Remove parts from the fine edge
# Assume that the object is centered.
sx, sy = image.shape
x = np.linspace(-sx / 2, sx / 2, sx, endpoint=True).reshape(-1, 1)
y = np.linspace(-sy / 2, sy / 2, sy, endpoint=True).reshape(1, -1)
# 3.a. Remove detected edge parts from the corners of the image
# Radius of this disk is approximately
ellipse = (x / sx)**2 + (y / sy)**2 < .25
edge_fine *= ellipse
edge_coarse *= ellipse
if np.sum(edge_fine):
# 3.b Also filter inside of edge
rad = np.sqrt(x**2 + y**2)
# Filter coarse edge with `clip_rmin` and `clip_rmax`
rad_coarse = rad * edge_coarse
avg_coarse = np.sum(rad_coarse) / np.sum(edge_coarse)
rad_coarse[rad_coarse < avg_coarse * clip_rmin] = 0
rad_coarse[rad_coarse > avg_coarse * clip_rmax] = 0
# Filter inside of fine edge with smallest radius of
# coarse edge, i.e. `rad_coarse.min()`.
rad_fine = rad * edge_fine
if np.sum(rad_coarse):
edge_fine[rad_fine < rad_coarse[rad_coarse != 0].min()] = 0
# Filter outside of fine edge with `clip_rmax` twice
for __ in range(2):
rad_fine = rad * edge_fine
avg_fine = np.sum(rad_fine) / np.sum(edge_fine)
edge_fine[rad_fine > avg_fine * clip_rmax] = 0
elif np.sum(edge_coarse):
# No fine edge detected.
edge_fine = edge_coarse
else:
msg = "Could not find edge! Try reducing `mult_coarse` " \
+ "and `mult_fine`."
raise EdgeDetectionError(msg)
# make sure there are more than 4 points
if np.sum(edge_fine) < 4:
msg = "Detected edge too small! Try increasing `maxiter`, " \
+ "modifying `radius`, or reducing `mult_coarse`."
raise EdgeDetectionError(msg)
return edge_fine |
def protege_data(datas_str, sens):
"""
Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt
"""
return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8") | Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt | Below is the the instruction that describes the task:
### Input:
Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt
### Response:
def protege_data(datas_str, sens):
"""
Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt
"""
return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8") |
def make(keyvals):
"""
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
"""
kwargs = {}
for key, value in keyvals:
if key == "columns":
kwargs["col_formats"] = [c["format"] for c in value]
kwargs["col_types"] = [c["type"] for c in value]
kwargs["col_header"] = [c["name"] for c in value]
kwargs["row_header"] = len(value)
if key == "name": kwargs["table_header"] = value
if key == "description": kwargs["table_description"] = value
if key == "data": kwargs["raw_cell_values"] = value
return H2OTwoDimTable(**kwargs) | Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object | Below is the the instruction that describes the task:
### Input:
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
### Response:
def make(keyvals):
"""
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
"""
kwargs = {}
for key, value in keyvals:
if key == "columns":
kwargs["col_formats"] = [c["format"] for c in value]
kwargs["col_types"] = [c["type"] for c in value]
kwargs["col_header"] = [c["name"] for c in value]
kwargs["row_header"] = len(value)
if key == "name": kwargs["table_header"] = value
if key == "description": kwargs["table_description"] = value
if key == "data": kwargs["raw_cell_values"] = value
return H2OTwoDimTable(**kwargs) |
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True | Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs. | Below is the the instruction that describes the task:
### Input:
Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
### Response:
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True |
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
widget = self.widget_from_abbrev(abbrev, default)
if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):
if widget is None:
raise ValueError("{!r} cannot be transformed to a widget".format(abbrev))
else:
raise TypeError("{!r} is not a ValueWidget".format(widget))
if not widget.description:
widget.description = name
widget._kwarg = name
result.append(widget)
return result | Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets. | Below is the the instruction that describes the task:
### Input:
Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets.
### Response:
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
widget = self.widget_from_abbrev(abbrev, default)
if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):
if widget is None:
raise ValueError("{!r} cannot be transformed to a widget".format(abbrev))
else:
raise TypeError("{!r} is not a ValueWidget".format(widget))
if not widget.description:
widget.description = name
widget._kwarg = name
result.append(widget)
return result |
def error(args):
"""
%prog error version backup_folder
Find all errors in ../5-consensus/*.err and pull the error unitigs into
backup/ folder.
"""
p = OptionParser(error.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
version, backup_folder = args
mkdir(backup_folder)
fw = open("errors.log", "w")
seen = set()
for g in glob("../5-consensus/*.err"):
if "partitioned" in g:
continue
fp = open(g)
partID = op.basename(g).rsplit(".err", 1)[0]
partID = int(partID.split("_")[-1])
for row in fp:
if row.startswith(working):
unitigID = row.split("(")[0].split()[-1]
continue
if not failed.upper() in row.upper():
continue
uu = (version, partID, unitigID)
if uu in seen:
continue
seen.add(uu)
print("\t".join(str(x) for x in (partID, unitigID)), file=fw)
s = [str(x) for x in uu]
unitigfile = pull(s)
cmd = "mv {0} {1}".format(unitigfile, backup_folder)
sh(cmd)
fp.close()
logging.debug("A total of {0} unitigs saved to {1}.".\
format(len(seen), backup_folder)) | %prog error version backup_folder
Find all errors in ../5-consensus/*.err and pull the error unitigs into
backup/ folder. | Below is the the instruction that describes the task:
### Input:
%prog error version backup_folder
Find all errors in ../5-consensus/*.err and pull the error unitigs into
backup/ folder.
### Response:
def error(args):
"""
%prog error version backup_folder
Find all errors in ../5-consensus/*.err and pull the error unitigs into
backup/ folder.
"""
p = OptionParser(error.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
version, backup_folder = args
mkdir(backup_folder)
fw = open("errors.log", "w")
seen = set()
for g in glob("../5-consensus/*.err"):
if "partitioned" in g:
continue
fp = open(g)
partID = op.basename(g).rsplit(".err", 1)[0]
partID = int(partID.split("_")[-1])
for row in fp:
if row.startswith(working):
unitigID = row.split("(")[0].split()[-1]
continue
if not failed.upper() in row.upper():
continue
uu = (version, partID, unitigID)
if uu in seen:
continue
seen.add(uu)
print("\t".join(str(x) for x in (partID, unitigID)), file=fw)
s = [str(x) for x in uu]
unitigfile = pull(s)
cmd = "mv {0} {1}".format(unitigfile, backup_folder)
sh(cmd)
fp.close()
logging.debug("A total of {0} unitigs saved to {1}.".\
format(len(seen), backup_folder)) |
def __marshal_matches(matched):
"""Convert matches to JSON format.
:param matched: a list of matched identities
:returns json_matches: a list of matches in JSON format
"""
json_matches = []
for m in matched:
identities = [i.uuid for i in m]
if len(identities) == 1:
continue
json_match = {
'identities': identities,
'processed': False
}
json_matches.append(json_match)
return json_matches | Convert matches to JSON format.
:param matched: a list of matched identities
:returns json_matches: a list of matches in JSON format | Below is the the instruction that describes the task:
### Input:
Convert matches to JSON format.
:param matched: a list of matched identities
:returns json_matches: a list of matches in JSON format
### Response:
def __marshal_matches(matched):
"""Convert matches to JSON format.
:param matched: a list of matched identities
:returns json_matches: a list of matches in JSON format
"""
json_matches = []
for m in matched:
identities = [i.uuid for i in m]
if len(identities) == 1:
continue
json_match = {
'identities': identities,
'processed': False
}
json_matches.append(json_match)
return json_matches |
def post_user_contact_lists_contacts(self, id, contact_list_id, **data):
"""
POST /users/:id/contact_lists/:contact_list_id/contacts/
Adds a new contact to the contact list. Returns ``{"created": true}``.
There is no way to update entries in the list; just delete the old one
and add the updated version.
"""
return self.post("/users/{0}/contact_lists/{0}/contacts/".format(id,contact_list_id), data=data) | POST /users/:id/contact_lists/:contact_list_id/contacts/
Adds a new contact to the contact list. Returns ``{"created": true}``.
There is no way to update entries in the list; just delete the old one
and add the updated version. | Below is the the instruction that describes the task:
### Input:
POST /users/:id/contact_lists/:contact_list_id/contacts/
Adds a new contact to the contact list. Returns ``{"created": true}``.
There is no way to update entries in the list; just delete the old one
and add the updated version.
### Response:
def post_user_contact_lists_contacts(self, id, contact_list_id, **data):
"""
POST /users/:id/contact_lists/:contact_list_id/contacts/
Adds a new contact to the contact list. Returns ``{"created": true}``.
There is no way to update entries in the list; just delete the old one
and add the updated version.
"""
return self.post("/users/{0}/contact_lists/{0}/contacts/".format(id,contact_list_id), data=data) |
def help(self, *args):
"""
Can be overridden (and for example _Menu does).
"""
if args:
self.messages.error(
self.messages.command_does_not_accept_arguments)
else:
print(self.helpfull) | Can be overridden (and for example _Menu does). | Below is the the instruction that describes the task:
### Input:
Can be overridden (and for example _Menu does).
### Response:
def help(self, *args):
"""
Can be overridden (and for example _Menu does).
"""
if args:
self.messages.error(
self.messages.command_does_not_accept_arguments)
else:
print(self.helpfull) |
def manual_close(self):
"""
Close the underlying connection without returning it to the pool.
"""
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True) | Close the underlying connection without returning it to the pool. | Below is the the instruction that describes the task:
### Input:
Close the underlying connection without returning it to the pool.
### Response:
def manual_close(self):
"""
Close the underlying connection without returning it to the pool.
"""
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True) |
def difference(iterable, func=sub):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
"""
a, b = tee(iterable)
try:
item = next(b)
except StopIteration:
return iter([])
return chain([item], map(lambda x: func(x[1], x[0]), zip(a, b))) | By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5] | Below is the the instruction that describes the task:
### Input:
By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
### Response:
def difference(iterable, func=sub):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
"""
a, b = tee(iterable)
try:
item = next(b)
except StopIteration:
return iter([])
return chain([item], map(lambda x: func(x[1], x[0]), zip(a, b))) |
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
if (self.root):
self.xml = XmlPrinter(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"}) | Start serialization -- open the XML document and the root element. | Below is the the instruction that describes the task:
### Input:
Start serialization -- open the XML document and the root element.
### Response:
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
if (self.root):
self.xml = XmlPrinter(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"}) |
def list(self):
"""
List the contents of the directory.
"""
return [File(f, parent=self) for f in os.listdir(self.path)] | List the contents of the directory. | Below is the the instruction that describes the task:
### Input:
List the contents of the directory.
### Response:
def list(self):
"""
List the contents of the directory.
"""
return [File(f, parent=self) for f in os.listdir(self.path)] |
def is_valid_uuid (uuid):
"""
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
"""
try:
if len (uuid) == 4:
if int (uuid, 16) < 0: return False
elif len (uuid) == 8:
if int (uuid, 16) < 0: return False
elif len (uuid) == 36:
pieces = uuid.split ("-")
if len (pieces) != 5 or \
len (pieces[0]) != 8 or \
len (pieces[1]) != 4 or \
len (pieces[2]) != 4 or \
len (pieces[3]) != 4 or \
len (pieces[4]) != 12:
return False
[ int (p, 16) for p in pieces ]
else:
return False
except ValueError:
return False
except TypeError:
return False
return True | is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive) | Below is the the instruction that describes the task:
### Input:
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
### Response:
def is_valid_uuid (uuid):
"""
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
"""
try:
if len (uuid) == 4:
if int (uuid, 16) < 0: return False
elif len (uuid) == 8:
if int (uuid, 16) < 0: return False
elif len (uuid) == 36:
pieces = uuid.split ("-")
if len (pieces) != 5 or \
len (pieces[0]) != 8 or \
len (pieces[1]) != 4 or \
len (pieces[2]) != 4 or \
len (pieces[3]) != 4 or \
len (pieces[4]) != 12:
return False
[ int (p, 16) for p in pieces ]
else:
return False
except ValueError:
return False
except TypeError:
return False
return True |
def load(cls, path: str, password: str = None) -> 'Account':
"""Load an account from a keystore file.
Args:
path: full path to the keyfile
password: the password to decrypt the key file or `None` to leave it encrypted
"""
with open(path) as f:
keystore = json.load(f)
if not check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path) | Load an account from a keystore file.
Args:
path: full path to the keyfile
password: the password to decrypt the key file or `None` to leave it encrypted | Below is the the instruction that describes the task:
### Input:
Load an account from a keystore file.
Args:
path: full path to the keyfile
password: the password to decrypt the key file or `None` to leave it encrypted
### Response:
def load(cls, path: str, password: str = None) -> 'Account':
"""Load an account from a keystore file.
Args:
path: full path to the keyfile
password: the password to decrypt the key file or `None` to leave it encrypted
"""
with open(path) as f:
keystore = json.load(f)
if not check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path) |
def exec_request(self, URL):
"""Sends the actual request; returns response."""
## Throttle request, if need be
interval = time.time() - self.__ts_last_req
if (interval < self.__min_req_interval):
time.sleep( self.__min_req_interval - interval )
## Construct and execute request
headers = {
"X-ELS-APIKey" : self.api_key,
"User-Agent" : self.__user_agent,
"Accept" : 'application/json'
}
if self.inst_token:
headers["X-ELS-Insttoken"] = self.inst_token
logger.info('Sending GET request to ' + URL)
r = requests.get(
URL,
headers = headers
)
self.__ts_last_req = time.time()
self._status_code=r.status_code
if r.status_code == 200:
self._status_msg='data retrieved'
return json.loads(r.text)
else:
self._status_msg="HTTP " + str(r.status_code) + " Error from " + URL + " and using headers " + str(headers) + ": " + r.text
raise requests.HTTPError("HTTP " + str(r.status_code) + " Error from " + URL + "\nand using headers " + str(headers) + ":\n" + r.text) | Sends the actual request; returns response. | Below is the the instruction that describes the task:
### Input:
Sends the actual request; returns response.
### Response:
def exec_request(self, URL):
"""Sends the actual request; returns response."""
## Throttle request, if need be
interval = time.time() - self.__ts_last_req
if (interval < self.__min_req_interval):
time.sleep( self.__min_req_interval - interval )
## Construct and execute request
headers = {
"X-ELS-APIKey" : self.api_key,
"User-Agent" : self.__user_agent,
"Accept" : 'application/json'
}
if self.inst_token:
headers["X-ELS-Insttoken"] = self.inst_token
logger.info('Sending GET request to ' + URL)
r = requests.get(
URL,
headers = headers
)
self.__ts_last_req = time.time()
self._status_code=r.status_code
if r.status_code == 200:
self._status_msg='data retrieved'
return json.loads(r.text)
else:
self._status_msg="HTTP " + str(r.status_code) + " Error from " + URL + " and using headers " + str(headers) + ": " + r.text
raise requests.HTTPError("HTTP " + str(r.status_code) + " Error from " + URL + "\nand using headers " + str(headers) + ":\n" + r.text) |
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = "Analyze of {}\n".format(self.slither.filename)
txt += self.get_detectors_result()
for contract in self.slither.contracts_derived:
txt += "\nContract {}\n".format(contract.name)
txt += self.is_complex_code(contract)
is_erc20 = contract.is_erc20()
txt += '\tNumber of functions:{}'.format(self._number_functions(contract))
txt += "\tIs ERC20 token: {}\n".format(contract.is_erc20())
if is_erc20:
txt += self.get_summary_erc20(contract)
self.info(txt) | _filename is not used
Args:
_filename(string) | Below is the the instruction that describes the task:
### Input:
_filename is not used
Args:
_filename(string)
### Response:
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = "Analyze of {}\n".format(self.slither.filename)
txt += self.get_detectors_result()
for contract in self.slither.contracts_derived:
txt += "\nContract {}\n".format(contract.name)
txt += self.is_complex_code(contract)
is_erc20 = contract.is_erc20()
txt += '\tNumber of functions:{}'.format(self._number_functions(contract))
txt += "\tIs ERC20 token: {}\n".format(contract.is_erc20())
if is_erc20:
txt += self.get_summary_erc20(contract)
self.info(txt) |
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k]) | Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong. | Below is the the instruction that describes the task:
### Input:
Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
### Response:
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k]) |
def get_dataset(self, key, info, out=None):
"""Get a dataset from the file."""
logger.debug("Reading %s.", key.name)
values = self.file_content[key.name]
selected = np.array(self.selected)
if key.name in ("Latitude", "Longitude"):
values = values / 10000.
if key.name in ('Tsurf', 'CloudTopPres', 'CloudTopTemp'):
goods = values > -9998.
selected = np.array(selected & goods)
if key.name in ('Tsurf', "Alt_surface", "CloudTopTemp"):
values = values / 100.
if key.name in ("CloudTopPres"):
values = values / 10.
else:
selected = self.selected
info.update(self.finfo)
fill_value = np.nan
if key.name == 'ct':
fill_value = 0
info['_FillValue'] = 0
ds = DataArray(values, dims=['y', 'x'], attrs=info).where(selected, fill_value)
# update dataset info with file_info
return ds | Get a dataset from the file. | Below is the the instruction that describes the task:
### Input:
Get a dataset from the file.
### Response:
def get_dataset(self, key, info, out=None):
"""Get a dataset from the file."""
logger.debug("Reading %s.", key.name)
values = self.file_content[key.name]
selected = np.array(self.selected)
if key.name in ("Latitude", "Longitude"):
values = values / 10000.
if key.name in ('Tsurf', 'CloudTopPres', 'CloudTopTemp'):
goods = values > -9998.
selected = np.array(selected & goods)
if key.name in ('Tsurf', "Alt_surface", "CloudTopTemp"):
values = values / 100.
if key.name in ("CloudTopPres"):
values = values / 10.
else:
selected = self.selected
info.update(self.finfo)
fill_value = np.nan
if key.name == 'ct':
fill_value = 0
info['_FillValue'] = 0
ds = DataArray(values, dims=['y', 'x'], attrs=info).where(selected, fill_value)
# update dataset info with file_info
return ds |
def from_path(path):
'''
create from path.
return `None` if path is not exists.
'''
if os.path.isdir(path):
return DirectoryInfo(path)
if os.path.isfile(path):
return FileInfo(path)
return None | create from path.
return `None` if path is not exists. | Below is the the instruction that describes the task:
### Input:
create from path.
return `None` if path is not exists.
### Response:
def from_path(path):
'''
create from path.
return `None` if path is not exists.
'''
if os.path.isdir(path):
return DirectoryInfo(path)
if os.path.isfile(path):
return FileInfo(path)
return None |
def setup(self, *args, **kwargs):
"""
Dynamically reset the interface to expose the services / topics / params whose names are passed as args
The interface class can be specified with a module to be dynamically imported
:param publishers:
:param subscribers:
:param services:
:param topics: BW COMPAT ONLY !
:param params:
:return:
"""
#BWCOMPAT
if kwargs.get('topics'):
if kwargs.get('publishers'):
kwargs['publishers'] = kwargs.get('publishers', []) + kwargs.get('topics', [])
else:
kwargs['publishers'] = kwargs.get('topics', [])
if kwargs.get('subscribers'):
kwargs['subscribers'] = kwargs.get('subscribers', []) + kwargs.get('topics', [])
else:
kwargs['subscribers'] = kwargs.get('topics', [])
kwargs.pop('topics')
# We can now import RosInterface and setup will be done ( we re in another process ).
# TODO : context to make it cleaner (maybe use zmp.Node context ?)
if isinstance(self.interface_class, tuple):
m = None
class_name = self.interface_class[-1] # last element is always the class_name
if len(self.interface_class) >= 3:
# load the relative module, will raise ImportError if module cannot be loaded
m = importlib.import_module(self.interface_class[1], self.interface_class[0])
elif len(self.interface_class) == 2:
# load the relative module, will raise ImportError if module cannot be loaded
m = importlib.import_module(self.interface_class[0])
# get the class, will raise AttributeError if class cannot be found
self.interface_class = getattr(m, class_name)
if not (
# TODO : we should pre check all the used members are present...
hasattr(self.interface_class, 'services')
# TODO : etc.
):
raise PyrosException("The interface class is missing some members to be used as an interface. Aborting Setup. {interface_class}".format(**locals()))
self.interface = self.interface_class(*args, **kwargs)
return self.interface | Dynamically reset the interface to expose the services / topics / params whose names are passed as args
The interface class can be specified with a module to be dynamically imported
:param publishers:
:param subscribers:
:param services:
:param topics: BW COMPAT ONLY !
:param params:
:return: | Below is the the instruction that describes the task:
### Input:
Dynamically reset the interface to expose the services / topics / params whose names are passed as args
The interface class can be specified with a module to be dynamically imported
:param publishers:
:param subscribers:
:param services:
:param topics: BW COMPAT ONLY !
:param params:
:return:
### Response:
def setup(self, *args, **kwargs):
"""
Dynamically reset the interface to expose the services / topics / params whose names are passed as args
The interface class can be specified with a module to be dynamically imported
:param publishers:
:param subscribers:
:param services:
:param topics: BW COMPAT ONLY !
:param params:
:return:
"""
#BWCOMPAT
if kwargs.get('topics'):
if kwargs.get('publishers'):
kwargs['publishers'] = kwargs.get('publishers', []) + kwargs.get('topics', [])
else:
kwargs['publishers'] = kwargs.get('topics', [])
if kwargs.get('subscribers'):
kwargs['subscribers'] = kwargs.get('subscribers', []) + kwargs.get('topics', [])
else:
kwargs['subscribers'] = kwargs.get('topics', [])
kwargs.pop('topics')
# We can now import RosInterface and setup will be done ( we re in another process ).
# TODO : context to make it cleaner (maybe use zmp.Node context ?)
if isinstance(self.interface_class, tuple):
m = None
class_name = self.interface_class[-1] # last element is always the class_name
if len(self.interface_class) >= 3:
# load the relative module, will raise ImportError if module cannot be loaded
m = importlib.import_module(self.interface_class[1], self.interface_class[0])
elif len(self.interface_class) == 2:
# load the relative module, will raise ImportError if module cannot be loaded
m = importlib.import_module(self.interface_class[0])
# get the class, will raise AttributeError if class cannot be found
self.interface_class = getattr(m, class_name)
if not (
# TODO : we should pre check all the used members are present...
hasattr(self.interface_class, 'services')
# TODO : etc.
):
raise PyrosException("The interface class is missing some members to be used as an interface. Aborting Setup. {interface_class}".format(**locals()))
self.interface = self.interface_class(*args, **kwargs)
return self.interface |
def next_line(last_line, next_line_8bit):
"""Compute the next line based on the last line and a 8bit next line.
The behaviour of the function is specified in :ref:`reqline`.
:param int last_line: the last line that was processed
:param int next_line_8bit: the lower 8 bits of the next line
:return: the next line closest to :paramref:`last_line`
.. seealso:: :ref:`reqline`
"""
# compute the line without the lowest byte
base_line = last_line - (last_line & 255)
# compute the three different lines
line = base_line + next_line_8bit
lower_line = line - 256
upper_line = line + 256
# compute the next line
if last_line - lower_line <= line - last_line:
return lower_line
if upper_line - last_line < last_line - line:
return upper_line
return line | Compute the next line based on the last line and a 8bit next line.
The behaviour of the function is specified in :ref:`reqline`.
:param int last_line: the last line that was processed
:param int next_line_8bit: the lower 8 bits of the next line
:return: the next line closest to :paramref:`last_line`
.. seealso:: :ref:`reqline` | Below is the the instruction that describes the task:
### Input:
Compute the next line based on the last line and a 8bit next line.
The behaviour of the function is specified in :ref:`reqline`.
:param int last_line: the last line that was processed
:param int next_line_8bit: the lower 8 bits of the next line
:return: the next line closest to :paramref:`last_line`
.. seealso:: :ref:`reqline`
### Response:
def next_line(last_line, next_line_8bit):
"""Compute the next line based on the last line and a 8bit next line.
The behaviour of the function is specified in :ref:`reqline`.
:param int last_line: the last line that was processed
:param int next_line_8bit: the lower 8 bits of the next line
:return: the next line closest to :paramref:`last_line`
.. seealso:: :ref:`reqline`
"""
# compute the line without the lowest byte
base_line = last_line - (last_line & 255)
# compute the three different lines
line = base_line + next_line_8bit
lower_line = line - 256
upper_line = line + 256
# compute the next line
if last_line - lower_line <= line - last_line:
return lower_line
if upper_line - last_line < last_line - line:
return upper_line
return line |
def anticlockwise_sort_indices(pps):
"""
Returns the indices that would sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Indices of the sorted list of points
"""
angles = np.zeros(len(pps), np.float)
for ipp, pp in enumerate(pps):
angles[ipp] = np.arctan2(pp[1], pp[0])
return np.argsort(angles) | Returns the indices that would sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Indices of the sorted list of points | Below is the the instruction that describes the task:
### Input:
Returns the indices that would sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Indices of the sorted list of points
### Response:
def anticlockwise_sort_indices(pps):
"""
Returns the indices that would sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Indices of the sorted list of points
"""
angles = np.zeros(len(pps), np.float)
for ipp, pp in enumerate(pps):
angles[ipp] = np.arctan2(pp[1], pp[0])
return np.argsort(angles) |
def sort_depth(vals, reverse=False):
"""Sort bids or asks by price
"""
lst = [[float(price), quantity] for price, quantity in vals.items()]
lst = sorted(lst, key=itemgetter(0), reverse=reverse)
return lst | Sort bids or asks by price | Below is the the instruction that describes the task:
### Input:
Sort bids or asks by price
### Response:
def sort_depth(vals, reverse=False):
"""Sort bids or asks by price
"""
lst = [[float(price), quantity] for price, quantity in vals.items()]
lst = sorted(lst, key=itemgetter(0), reverse=reverse)
return lst |
def namespace(self, mid: ModuleId) -> YangIdentifier:
"""Return the namespace corresponding to a module or submodule.
Args:
mid: Module identifier.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
"""
try:
mdata = self.modules[mid]
except KeyError:
raise ModuleNotRegistered(*mid) from None
return mdata.main_module[0] | Return the namespace corresponding to a module or submodule.
Args:
mid: Module identifier.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model. | Below is the the instruction that describes the task:
### Input:
Return the namespace corresponding to a module or submodule.
Args:
mid: Module identifier.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
### Response:
def namespace(self, mid: ModuleId) -> YangIdentifier:
"""Return the namespace corresponding to a module or submodule.
Args:
mid: Module identifier.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
"""
try:
mdata = self.modules[mid]
except KeyError:
raise ModuleNotRegistered(*mid) from None
return mdata.main_module[0] |
def merge_dicts(*dicts, **kwargs):
"""Merges dicts and kwargs into one dict"""
result = {}
for d in dicts:
result.update(d)
result.update(kwargs)
return result | Merges dicts and kwargs into one dict | Below is the the instruction that describes the task:
### Input:
Merges dicts and kwargs into one dict
### Response:
def merge_dicts(*dicts, **kwargs):
"""Merges dicts and kwargs into one dict"""
result = {}
for d in dicts:
result.update(d)
result.update(kwargs)
return result |
def role_delete(self, role_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/roles#delete-role"
api_path = "/api/v2/roles/{role_id}"
api_path = api_path.format(role_id=role_id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/roles#delete-role | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/roles#delete-role
### Response:
def role_delete(self, role_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/roles#delete-role"
api_path = "/api/v2/roles/{role_id}"
api_path = api_path.format(role_id=role_id)
return self.call(api_path, method="DELETE", **kwargs) |
def is_permitted_collective(self, permission_s, logical_operator=all):
"""
:param permission_s: a List of authz_abcs.Permission objects
:param logical_operator: indicates whether *all* or at least one
permission check is true, *any*
:type: any OR all (functions from python stdlib)
:returns: a Boolean
"""
sm = self.security_manager
if self.authorized:
return sm.is_permitted_collective(self.identifiers,
permission_s,
logical_operator)
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg) | :param permission_s: a List of authz_abcs.Permission objects
:param logical_operator: indicates whether *all* or at least one
permission check is true, *any*
:type: any OR all (functions from python stdlib)
:returns: a Boolean | Below is the the instruction that describes the task:
### Input:
:param permission_s: a List of authz_abcs.Permission objects
:param logical_operator: indicates whether *all* or at least one
permission check is true, *any*
:type: any OR all (functions from python stdlib)
:returns: a Boolean
### Response:
def is_permitted_collective(self, permission_s, logical_operator=all):
"""
:param permission_s: a List of authz_abcs.Permission objects
:param logical_operator: indicates whether *all* or at least one
permission check is true, *any*
:type: any OR all (functions from python stdlib)
:returns: a Boolean
"""
sm = self.security_manager
if self.authorized:
return sm.is_permitted_collective(self.identifiers,
permission_s,
logical_operator)
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg) |
def drop_row_range(
self,
name,
row_key_prefix=None,
delete_all_data_from_table=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableTableAdminClient()
>>>
>>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> client.drop_row_range(name)
Args:
name (str): The unique name of the table on which to drop a range of rows. Values
are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be
zero length.
delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "drop_row_range" not in self._inner_api_calls:
self._inner_api_calls[
"drop_row_range"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.drop_row_range,
default_retry=self._method_configs["DropRowRange"].retry,
default_timeout=self._method_configs["DropRowRange"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
row_key_prefix=row_key_prefix,
delete_all_data_from_table=delete_all_data_from_table,
)
request = bigtable_table_admin_pb2.DropRowRangeRequest(
name=name,
row_key_prefix=row_key_prefix,
delete_all_data_from_table=delete_all_data_from_table,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["drop_row_range"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableTableAdminClient()
>>>
>>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> client.drop_row_range(name)
Args:
name (str): The unique name of the table on which to drop a range of rows. Values
are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be
zero length.
delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableTableAdminClient()
>>>
>>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> client.drop_row_range(name)
Args:
name (str): The unique name of the table on which to drop a range of rows. Values
are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be
zero length.
delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def drop_row_range(
self,
name,
row_key_prefix=None,
delete_all_data_from_table=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableTableAdminClient()
>>>
>>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> client.drop_row_range(name)
Args:
name (str): The unique name of the table on which to drop a range of rows. Values
are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be
zero length.
delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "drop_row_range" not in self._inner_api_calls:
self._inner_api_calls[
"drop_row_range"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.drop_row_range,
default_retry=self._method_configs["DropRowRange"].retry,
default_timeout=self._method_configs["DropRowRange"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
row_key_prefix=row_key_prefix,
delete_all_data_from_table=delete_all_data_from_table,
)
request = bigtable_table_admin_pb2.DropRowRangeRequest(
name=name,
row_key_prefix=row_key_prefix,
delete_all_data_from_table=delete_all_data_from_table,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["drop_row_range"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def load_key_bindings_for_prompt(**kw):
"""
Create a ``Registry`` object with the defaults key bindings for an input
prompt.
This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D),
incremental search and auto suggestions.
(Not for full screen applications.)
"""
kw.setdefault('enable_abort_and_exit_bindings', True)
kw.setdefault('enable_search', True)
kw.setdefault('enable_auto_suggest_bindings', True)
return load_key_bindings(**kw) | Create a ``Registry`` object with the defaults key bindings for an input
prompt.
This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D),
incremental search and auto suggestions.
(Not for full screen applications.) | Below is the the instruction that describes the task:
### Input:
Create a ``Registry`` object with the defaults key bindings for an input
prompt.
This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D),
incremental search and auto suggestions.
(Not for full screen applications.)
### Response:
def load_key_bindings_for_prompt(**kw):
"""
Create a ``Registry`` object with the defaults key bindings for an input
prompt.
This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D),
incremental search and auto suggestions.
(Not for full screen applications.)
"""
kw.setdefault('enable_abort_and_exit_bindings', True)
kw.setdefault('enable_search', True)
kw.setdefault('enable_auto_suggest_bindings', True)
return load_key_bindings(**kw) |
def _resolve(self, path, migration_file):
"""
Resolve a migration instance from a file.
:param migration_file: The migration file
:type migration_file: str
:rtype: eloquent.migrations.migration.Migration
"""
variables = {}
name = '_'.join(migration_file.split('_')[4:])
migration_file = os.path.join(path, '%s.py' % migration_file)
with open(migration_file) as fh:
exec(fh.read(), {}, variables)
klass = variables[inflection.camelize(name)]
instance = klass()
instance.set_schema_builder(self.get_repository().get_connection().get_schema_builder())
return instance | Resolve a migration instance from a file.
:param migration_file: The migration file
:type migration_file: str
:rtype: eloquent.migrations.migration.Migration | Below is the the instruction that describes the task:
### Input:
Resolve a migration instance from a file.
:param migration_file: The migration file
:type migration_file: str
:rtype: eloquent.migrations.migration.Migration
### Response:
def _resolve(self, path, migration_file):
"""
Resolve a migration instance from a file.
:param migration_file: The migration file
:type migration_file: str
:rtype: eloquent.migrations.migration.Migration
"""
variables = {}
name = '_'.join(migration_file.split('_')[4:])
migration_file = os.path.join(path, '%s.py' % migration_file)
with open(migration_file) as fh:
exec(fh.read(), {}, variables)
klass = variables[inflection.camelize(name)]
instance = klass()
instance.set_schema_builder(self.get_repository().get_connection().get_schema_builder())
return instance |
async def ask(self, body, quick_replies=None, options=None, user=None):
"""
simple ask with predefined quick replies
:param body:
:param quick_replies: (optional) in form of
{'title': <message>, 'payload': <any json>}
:param options:
:param user:
:return:
"""
await self.send_text_message_to_all_interfaces(
recipient=user,
text=body,
quick_replies=quick_replies,
options=options,
)
return any.Any() | simple ask with predefined quick replies
:param body:
:param quick_replies: (optional) in form of
{'title': <message>, 'payload': <any json>}
:param options:
:param user:
:return: | Below is the the instruction that describes the task:
### Input:
simple ask with predefined quick replies
:param body:
:param quick_replies: (optional) in form of
{'title': <message>, 'payload': <any json>}
:param options:
:param user:
:return:
### Response:
async def ask(self, body, quick_replies=None, options=None, user=None):
"""
simple ask with predefined quick replies
:param body:
:param quick_replies: (optional) in form of
{'title': <message>, 'payload': <any json>}
:param options:
:param user:
:return:
"""
await self.send_text_message_to_all_interfaces(
recipient=user,
text=body,
quick_replies=quick_replies,
options=options,
)
return any.Any() |
def cisco_conf_parse_objects(cfg_section, config):
"""
Use CiscoConfParse to find and return a section of Cisco IOS config.
Similar to "show run | section <cfg_section>"
:param cfg_section: The section of the config to return eg. "router bgp"
:param config: The running/startup config of the device to parse
"""
return_config = []
if type(config) is str:
config = config.splitlines()
parse = CiscoConfParse(config)
cfg_obj = parse.find_objects(cfg_section)
for parent in cfg_obj:
return_config.append(parent.text)
for child in parent.all_children:
return_config.append(child.text)
return return_config | Use CiscoConfParse to find and return a section of Cisco IOS config.
Similar to "show run | section <cfg_section>"
:param cfg_section: The section of the config to return eg. "router bgp"
:param config: The running/startup config of the device to parse | Below is the the instruction that describes the task:
### Input:
Use CiscoConfParse to find and return a section of Cisco IOS config.
Similar to "show run | section <cfg_section>"
:param cfg_section: The section of the config to return eg. "router bgp"
:param config: The running/startup config of the device to parse
### Response:
def cisco_conf_parse_objects(cfg_section, config):
"""
Use CiscoConfParse to find and return a section of Cisco IOS config.
Similar to "show run | section <cfg_section>"
:param cfg_section: The section of the config to return eg. "router bgp"
:param config: The running/startup config of the device to parse
"""
return_config = []
if type(config) is str:
config = config.splitlines()
parse = CiscoConfParse(config)
cfg_obj = parse.find_objects(cfg_section)
for parent in cfg_obj:
return_config.append(parent.text)
for child in parent.all_children:
return_config.append(child.text)
return return_config |
def client_key_loader(self, f):
"""Registers a function to be called to find a client key.
Function you set has to take a client id and return a client key::
@hawk.client_key_loader
def get_client_key(client_id):
if client_id == 'Alice':
return 'werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn'
else:
raise LookupError()
:param f: The callback for retrieving a client key.
"""
@wraps(f)
def wrapped_f(client_id):
client_key = f(client_id)
return {
'id': client_id,
'key': client_key,
'algorithm': current_app.config['HAWK_ALGORITHM']
}
self._client_key_loader_func = wrapped_f
return wrapped_f | Registers a function to be called to find a client key.
Function you set has to take a client id and return a client key::
@hawk.client_key_loader
def get_client_key(client_id):
if client_id == 'Alice':
return 'werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn'
else:
raise LookupError()
:param f: The callback for retrieving a client key. | Below is the the instruction that describes the task:
### Input:
Registers a function to be called to find a client key.
Function you set has to take a client id and return a client key::
@hawk.client_key_loader
def get_client_key(client_id):
if client_id == 'Alice':
return 'werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn'
else:
raise LookupError()
:param f: The callback for retrieving a client key.
### Response:
def client_key_loader(self, f):
"""Registers a function to be called to find a client key.
Function you set has to take a client id and return a client key::
@hawk.client_key_loader
def get_client_key(client_id):
if client_id == 'Alice':
return 'werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn'
else:
raise LookupError()
:param f: The callback for retrieving a client key.
"""
@wraps(f)
def wrapped_f(client_id):
client_key = f(client_id)
return {
'id': client_id,
'key': client_key,
'algorithm': current_app.config['HAWK_ALGORITHM']
}
self._client_key_loader_func = wrapped_f
return wrapped_f |
def use_comparative_assessment_part_bank_view(self):
"""Pass through to provider AssessmentPartBankSession.use_comparative_assessment_part_bank_view"""
self._bank_view = COMPARATIVE
# self._get_provider_session('assessment_part_bank_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_bank_view()
except AttributeError:
pass | Pass through to provider AssessmentPartBankSession.use_comparative_assessment_part_bank_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider AssessmentPartBankSession.use_comparative_assessment_part_bank_view
### Response:
def use_comparative_assessment_part_bank_view(self):
"""Pass through to provider AssessmentPartBankSession.use_comparative_assessment_part_bank_view"""
self._bank_view = COMPARATIVE
# self._get_provider_session('assessment_part_bank_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_bank_view()
except AttributeError:
pass |
def _download_image(self, imageURL):
"""
Downloads an image file from the given image URL
Arguments:
imageURL {[str]} -- [Image URL]
"""
# If the required count of images have been download,
# refrain from downloading the remainder of the images
if(self._imageCounter >= self._imageCount):
return
try:
imageResponse = requests.get(imageURL)
# Generate image file name as <_imageQuery>_<_imageCounter>.<extension>
imageType, imageEncoding = mimetypes.guess_type(imageURL)
if imageType is not None:
imageExtension = mimetypes.guess_extension(imageType)
else:
imageExtension = mimetypes.guess_extension(
imageResponse.headers['Content-Type'])
imageFileName = self._imageQuery.replace(
' ', '_') + '_' + str(self._imageCounter) + imageExtension
imageFileName = os.path.join(self._storageFolder, imageFileName)
image = Image.open(BytesIO(imageResponse.content))
image.save(imageFileName)
self._imageCounter += 1
self._downloadProgressBar.update(self._imageCounter)
except Exception as exception:
pass | Downloads an image file from the given image URL
Arguments:
imageURL {[str]} -- [Image URL] | Below is the the instruction that describes the task:
### Input:
Downloads an image file from the given image URL
Arguments:
imageURL {[str]} -- [Image URL]
### Response:
def _download_image(self, imageURL):
"""
Downloads an image file from the given image URL
Arguments:
imageURL {[str]} -- [Image URL]
"""
# If the required count of images have been download,
# refrain from downloading the remainder of the images
if(self._imageCounter >= self._imageCount):
return
try:
imageResponse = requests.get(imageURL)
# Generate image file name as <_imageQuery>_<_imageCounter>.<extension>
imageType, imageEncoding = mimetypes.guess_type(imageURL)
if imageType is not None:
imageExtension = mimetypes.guess_extension(imageType)
else:
imageExtension = mimetypes.guess_extension(
imageResponse.headers['Content-Type'])
imageFileName = self._imageQuery.replace(
' ', '_') + '_' + str(self._imageCounter) + imageExtension
imageFileName = os.path.join(self._storageFolder, imageFileName)
image = Image.open(BytesIO(imageResponse.content))
image.save(imageFileName)
self._imageCounter += 1
self._downloadProgressBar.update(self._imageCounter)
except Exception as exception:
pass |
def generate(self, output_dir, minimum_size):
"""Generates sequence reports and writes them to the output directory.
:param output_dir: directory to output reports to
:type output_dir: `str`
:param minimum_size: minimum size of n-grams to create sequences for
:type minimum_size: `int`
"""
self._output_dir = output_dir
# Get a list of the files in the matches, grouped by label
# (ordered by number of works).
labels = list(self._matches.groupby([constants.LABEL_FIELDNAME])[
constants.WORK_FIELDNAME].nunique().index)
original_ngrams = self._matches[
self._matches[
constants.SIZE_FIELDNAME] >= minimum_size].sort_values(
by=constants.SIZE_FIELDNAME, ascending=False)[
constants.NGRAM_FIELDNAME].unique()
ngrams = []
for original_ngram in original_ngrams:
ngrams.append(self._get_text(Text(original_ngram,
self._tokenizer)))
# Generate sequences for each witness in every combination of
# (different) labels.
for index, primary_label in enumerate(labels):
for secondary_label in labels[index+1:]:
self._generate_sequences(primary_label, secondary_label,
ngrams) | Generates sequence reports and writes them to the output directory.
:param output_dir: directory to output reports to
:type output_dir: `str`
:param minimum_size: minimum size of n-grams to create sequences for
:type minimum_size: `int` | Below is the the instruction that describes the task:
### Input:
Generates sequence reports and writes them to the output directory.
:param output_dir: directory to output reports to
:type output_dir: `str`
:param minimum_size: minimum size of n-grams to create sequences for
:type minimum_size: `int`
### Response:
def generate(self, output_dir, minimum_size):
"""Generates sequence reports and writes them to the output directory.
:param output_dir: directory to output reports to
:type output_dir: `str`
:param minimum_size: minimum size of n-grams to create sequences for
:type minimum_size: `int`
"""
self._output_dir = output_dir
# Get a list of the files in the matches, grouped by label
# (ordered by number of works).
labels = list(self._matches.groupby([constants.LABEL_FIELDNAME])[
constants.WORK_FIELDNAME].nunique().index)
original_ngrams = self._matches[
self._matches[
constants.SIZE_FIELDNAME] >= minimum_size].sort_values(
by=constants.SIZE_FIELDNAME, ascending=False)[
constants.NGRAM_FIELDNAME].unique()
ngrams = []
for original_ngram in original_ngrams:
ngrams.append(self._get_text(Text(original_ngram,
self._tokenizer)))
# Generate sequences for each witness in every combination of
# (different) labels.
for index, primary_label in enumerate(labels):
for secondary_label in labels[index+1:]:
self._generate_sequences(primary_label, secondary_label,
ngrams) |
def to_naf(self):
"""
Converts the object to NAF
"""
if self.type == 'KAF':
self.type = 'NAF'
for node in self.__get_wf_nodes():
node.set('id',node.get('wid'))
del node.attrib['wid'] | Converts the object to NAF | Below is the the instruction that describes the task:
### Input:
Converts the object to NAF
### Response:
def to_naf(self):
"""
Converts the object to NAF
"""
if self.type == 'KAF':
self.type = 'NAF'
for node in self.__get_wf_nodes():
node.set('id',node.get('wid'))
del node.attrib['wid'] |
def get_exec_create_kwargs(self, action, container_name, exec_cmd, exec_user, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_cmd: Command to be executed.
:type exec_cmd: unicode | str
:param exec_user: User to run the command.
:type exec_user: unicode | str
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(
container=container_name,
cmd=resolve_value(exec_cmd),
)
if exec_user is not None:
c_kwargs['user'] = text_type(resolve_value(exec_user))
elif action.config.user is not NotSet:
c_kwargs['user'] = extract_user(action.config.user)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_cmd: Command to be executed.
:type exec_cmd: unicode | str
:param exec_user: User to run the command.
:type exec_user: unicode | str
:return: Resulting keyword arguments.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_cmd: Command to be executed.
:type exec_cmd: unicode | str
:param exec_user: User to run the command.
:type exec_user: unicode | str
:return: Resulting keyword arguments.
:rtype: dict
### Response:
def get_exec_create_kwargs(self, action, container_name, exec_cmd, exec_user, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_cmd: Command to be executed.
:type exec_cmd: unicode | str
:param exec_user: User to run the command.
:type exec_user: unicode | str
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(
container=container_name,
cmd=resolve_value(exec_cmd),
)
if exec_user is not None:
c_kwargs['user'] = text_type(resolve_value(exec_user))
elif action.config.user is not NotSet:
c_kwargs['user'] = extract_user(action.config.user)
update_kwargs(c_kwargs, kwargs)
return c_kwargs |
def inv(x):
'''
inv(x) yields the inverse of x, 1/x.
Note that inv supports sparse matrices, but it is forced to reify them. Additionally, because
inv raises an error on divide-by-zero, they are unlikely to work. For better sparse-matrix
support, see zinv.
'''
if sps.issparse(x): return 1.0 / x.toarray()
else: return 1.0 / np.asarray(x) | inv(x) yields the inverse of x, 1/x.
Note that inv supports sparse matrices, but it is forced to reify them. Additionally, because
inv raises an error on divide-by-zero, they are unlikely to work. For better sparse-matrix
support, see zinv. | Below is the the instruction that describes the task:
### Input:
inv(x) yields the inverse of x, 1/x.
Note that inv supports sparse matrices, but it is forced to reify them. Additionally, because
inv raises an error on divide-by-zero, they are unlikely to work. For better sparse-matrix
support, see zinv.
### Response:
def inv(x):
'''
inv(x) yields the inverse of x, 1/x.
Note that inv supports sparse matrices, but it is forced to reify them. Additionally, because
inv raises an error on divide-by-zero, they are unlikely to work. For better sparse-matrix
support, see zinv.
'''
if sps.issparse(x): return 1.0 / x.toarray()
else: return 1.0 / np.asarray(x) |
def partitionBy(*cols):
"""
Creates a :class:`WindowSpec` with the partitioning defined.
"""
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols))
return WindowSpec(jspec) | Creates a :class:`WindowSpec` with the partitioning defined. | Below is the the instruction that describes the task:
### Input:
Creates a :class:`WindowSpec` with the partitioning defined.
### Response:
def partitionBy(*cols):
"""
Creates a :class:`WindowSpec` with the partitioning defined.
"""
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols))
return WindowSpec(jspec) |
def checkin_bundle(self, db_path, replace=True, cb=None):
"""Add a bundle, as a Sqlite file, to this library"""
from ambry.orm.exc import NotFoundError
db = Database('sqlite:///{}'.format(db_path))
db.open()
if len(db.datasets) == 0:
raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path))
ds = db.dataset(db.datasets[0].vid) # There should only be one
assert ds is not None
assert ds._database
try:
b = self.bundle(ds.vid)
self.logger.info(
"Removing old bundle before checking in new one of same number: '{}'"
.format(ds.vid))
self.remove(b)
except NotFoundError:
pass
try:
self.dataset(ds.vid) # Skip loading bundles we already have
except NotFoundError:
self.database.copy_dataset(ds, cb=cb)
b = self.bundle(ds.vid) # It had better exist now.
# b.state = Bundle.STATES.INSTALLED
b.commit()
#self.search.index_library_datasets(tick)
self.search.index_bundle(b)
return b | Add a bundle, as a Sqlite file, to this library | Below is the the instruction that describes the task:
### Input:
Add a bundle, as a Sqlite file, to this library
### Response:
def checkin_bundle(self, db_path, replace=True, cb=None):
"""Add a bundle, as a Sqlite file, to this library"""
from ambry.orm.exc import NotFoundError
db = Database('sqlite:///{}'.format(db_path))
db.open()
if len(db.datasets) == 0:
raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path))
ds = db.dataset(db.datasets[0].vid) # There should only be one
assert ds is not None
assert ds._database
try:
b = self.bundle(ds.vid)
self.logger.info(
"Removing old bundle before checking in new one of same number: '{}'"
.format(ds.vid))
self.remove(b)
except NotFoundError:
pass
try:
self.dataset(ds.vid) # Skip loading bundles we already have
except NotFoundError:
self.database.copy_dataset(ds, cb=cb)
b = self.bundle(ds.vid) # It had better exist now.
# b.state = Bundle.STATES.INSTALLED
b.commit()
#self.search.index_library_datasets(tick)
self.search.index_bundle(b)
return b |
def copy(self):
"""Copy the this node tree
Note all references to readers are removed. This is meant to avoid
tree copies accessing readers that would return incompatible (Area)
data. Theoretically it should be possible for tree copies to request
compositor or modifier information as long as they don't depend on
any datasets not already existing in the dependency tree.
"""
new_tree = DependencyTree({}, self.compositors, self.modifiers)
for c in self.children:
c = c.copy(node_cache=new_tree._all_nodes)
new_tree.add_child(new_tree, c)
return new_tree | Copy the this node tree
Note all references to readers are removed. This is meant to avoid
tree copies accessing readers that would return incompatible (Area)
data. Theoretically it should be possible for tree copies to request
compositor or modifier information as long as they don't depend on
any datasets not already existing in the dependency tree. | Below is the the instruction that describes the task:
### Input:
Copy the this node tree
Note all references to readers are removed. This is meant to avoid
tree copies accessing readers that would return incompatible (Area)
data. Theoretically it should be possible for tree copies to request
compositor or modifier information as long as they don't depend on
any datasets not already existing in the dependency tree.
### Response:
def copy(self):
"""Copy the this node tree
Note all references to readers are removed. This is meant to avoid
tree copies accessing readers that would return incompatible (Area)
data. Theoretically it should be possible for tree copies to request
compositor or modifier information as long as they don't depend on
any datasets not already existing in the dependency tree.
"""
new_tree = DependencyTree({}, self.compositors, self.modifiers)
for c in self.children:
c = c.copy(node_cache=new_tree._all_nodes)
new_tree.add_child(new_tree, c)
return new_tree |
def convert_world_to_phenotype(world):
"""
Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings
"""
if set(world.resources) != set(world.tasks):
print("Warning: world phenotypes don't correspond to phenotypes")
if set(world.resources).issubset(set(world.tasks)):
conversion_func = function_with_args(res_set_to_phenotype, world.tasks)
else:
conversion_func = \
function_with_args(res_set_to_phenotype, world.resources)
grid = agg_grid(deepcopy(world), conversion_func)
return grid | Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings | Below is the the instruction that describes the task:
### Input:
Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings
### Response:
def convert_world_to_phenotype(world):
"""
Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings
"""
if set(world.resources) != set(world.tasks):
print("Warning: world phenotypes don't correspond to phenotypes")
if set(world.resources).issubset(set(world.tasks)):
conversion_func = function_with_args(res_set_to_phenotype, world.tasks)
else:
conversion_func = \
function_with_args(res_set_to_phenotype, world.resources)
grid = agg_grid(deepcopy(world), conversion_func)
return grid |
def read_config(filename):
"""Reads and flattens a configuration file into a single
dictionary for ease of use. Works with both ``.config`` and
``.yaml`` files. Files should look like this::
search_rules:
from-date: 2017-06-01
to-date: 2017-09-01 01:01
pt-rule: kanye
search_params:
results-per-call: 500
max-results: 500
output_params:
save_file: True
filename_prefix: kanye
results_per_file: 10000000
or::
[search_rules]
from_date = 2017-06-01
to_date = 2017-09-01
pt_rule = beyonce has:geo
[search_params]
results_per_call = 500
max_results = 500
[output_params]
save_file = True
filename_prefix = beyonce
results_per_file = 10000000
Args:
filename (str): location of file with extension ('.config' or '.yaml')
Returns:
dict: parsed configuration dictionary.
"""
file_type = "yaml" if filename.endswith(".yaml") else "config"
config = configparser.ConfigParser()
if file_type == "yaml":
with open(os.path.expanduser(filename)) as f:
config_dict = yaml.load(f)
config_dict = merge_dicts(*[dict(config_dict[s]) for s
in config_dict.keys()])
elif file_type == "config":
with open(filename) as f:
config.read_file(f)
config_dict = merge_dicts(*[dict(config[s]) for s
in config.sections()])
else:
logger.error("Config files must be either in YAML or Config style.")
raise TypeError
# ensure args are renamed correctly:
config_dict = {k.replace('-', '_'): v for k, v in config_dict.items()}
# YAML will parse datestrings as datetimes; we'll convert them here if they
# exist
if config_dict.get("to_date") is not None:
config_dict["to_date"] = str(config_dict["to_date"])
if config_dict.get("from_date") is not None:
config_dict["from_date"] = str(config_dict["from_date"])
return config_dict | Reads and flattens a configuration file into a single
dictionary for ease of use. Works with both ``.config`` and
``.yaml`` files. Files should look like this::
search_rules:
from-date: 2017-06-01
to-date: 2017-09-01 01:01
pt-rule: kanye
search_params:
results-per-call: 500
max-results: 500
output_params:
save_file: True
filename_prefix: kanye
results_per_file: 10000000
or::
[search_rules]
from_date = 2017-06-01
to_date = 2017-09-01
pt_rule = beyonce has:geo
[search_params]
results_per_call = 500
max_results = 500
[output_params]
save_file = True
filename_prefix = beyonce
results_per_file = 10000000
Args:
filename (str): location of file with extension ('.config' or '.yaml')
Returns:
dict: parsed configuration dictionary. | Below is the the instruction that describes the task:
### Input:
Reads and flattens a configuration file into a single
dictionary for ease of use. Works with both ``.config`` and
``.yaml`` files. Files should look like this::
search_rules:
from-date: 2017-06-01
to-date: 2017-09-01 01:01
pt-rule: kanye
search_params:
results-per-call: 500
max-results: 500
output_params:
save_file: True
filename_prefix: kanye
results_per_file: 10000000
or::
[search_rules]
from_date = 2017-06-01
to_date = 2017-09-01
pt_rule = beyonce has:geo
[search_params]
results_per_call = 500
max_results = 500
[output_params]
save_file = True
filename_prefix = beyonce
results_per_file = 10000000
Args:
filename (str): location of file with extension ('.config' or '.yaml')
Returns:
dict: parsed configuration dictionary.
### Response:
def read_config(filename):
"""Reads and flattens a configuration file into a single
dictionary for ease of use. Works with both ``.config`` and
``.yaml`` files. Files should look like this::
search_rules:
from-date: 2017-06-01
to-date: 2017-09-01 01:01
pt-rule: kanye
search_params:
results-per-call: 500
max-results: 500
output_params:
save_file: True
filename_prefix: kanye
results_per_file: 10000000
or::
[search_rules]
from_date = 2017-06-01
to_date = 2017-09-01
pt_rule = beyonce has:geo
[search_params]
results_per_call = 500
max_results = 500
[output_params]
save_file = True
filename_prefix = beyonce
results_per_file = 10000000
Args:
filename (str): location of file with extension ('.config' or '.yaml')
Returns:
dict: parsed configuration dictionary.
"""
file_type = "yaml" if filename.endswith(".yaml") else "config"
config = configparser.ConfigParser()
if file_type == "yaml":
with open(os.path.expanduser(filename)) as f:
config_dict = yaml.load(f)
config_dict = merge_dicts(*[dict(config_dict[s]) for s
in config_dict.keys()])
elif file_type == "config":
with open(filename) as f:
config.read_file(f)
config_dict = merge_dicts(*[dict(config[s]) for s
in config.sections()])
else:
logger.error("Config files must be either in YAML or Config style.")
raise TypeError
# ensure args are renamed correctly:
config_dict = {k.replace('-', '_'): v for k, v in config_dict.items()}
# YAML will parse datestrings as datetimes; we'll convert them here if they
# exist
if config_dict.get("to_date") is not None:
config_dict["to_date"] = str(config_dict["to_date"])
if config_dict.get("from_date") is not None:
config_dict["from_date"] = str(config_dict["from_date"])
return config_dict |
def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs) | Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable`` | Below is the the instruction that describes the task:
### Input:
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
### Response:
def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs) |
def save_image(imager, grid_data, grid_norm, output_file):
"""Makes an image from gridded visibilities and saves it to a FITS file.
Args:
imager (oskar.Imager): Handle to configured imager.
grid_data (numpy.ndarray): Final visibility grid.
grid_norm (float): Grid normalisation to apply.
output_file (str): Name of output FITS file to write.
"""
# Make the image (take the FFT, normalise, and apply grid correction).
imager.finalise_plane(grid_data, grid_norm)
grid_data = numpy.real(grid_data)
# Trim the image if required.
border = (imager.plane_size - imager.image_size) // 2
if border > 0:
end = border + imager.image_size
grid_data = grid_data[border:end, border:end]
# Write the FITS file.
hdr = fits.header.Header()
fits.writeto(output_file, grid_data, hdr, clobber=True) | Makes an image from gridded visibilities and saves it to a FITS file.
Args:
imager (oskar.Imager): Handle to configured imager.
grid_data (numpy.ndarray): Final visibility grid.
grid_norm (float): Grid normalisation to apply.
output_file (str): Name of output FITS file to write. | Below is the the instruction that describes the task:
### Input:
Makes an image from gridded visibilities and saves it to a FITS file.
Args:
imager (oskar.Imager): Handle to configured imager.
grid_data (numpy.ndarray): Final visibility grid.
grid_norm (float): Grid normalisation to apply.
output_file (str): Name of output FITS file to write.
### Response:
def save_image(imager, grid_data, grid_norm, output_file):
"""Makes an image from gridded visibilities and saves it to a FITS file.
Args:
imager (oskar.Imager): Handle to configured imager.
grid_data (numpy.ndarray): Final visibility grid.
grid_norm (float): Grid normalisation to apply.
output_file (str): Name of output FITS file to write.
"""
# Make the image (take the FFT, normalise, and apply grid correction).
imager.finalise_plane(grid_data, grid_norm)
grid_data = numpy.real(grid_data)
# Trim the image if required.
border = (imager.plane_size - imager.image_size) // 2
if border > 0:
end = border + imager.image_size
grid_data = grid_data[border:end, border:end]
# Write the FITS file.
hdr = fits.header.Header()
fits.writeto(output_file, grid_data, hdr, clobber=True) |
def process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True):
"""Post process tags in a message.
:param str user: The user ID.
:param str msg: The user's formatted message.
:param str reply: The raw RiveScript reply for the message.
:param []str st: The array of ``<star>`` matches from the trigger.
:param []str bst: The array of ``<botstar>`` matches from a
``%Previous`` command.
:param int depth: The recursion depth counter.
:param bool ignore_object_errors: Whether to ignore errors in Python
object macros instead of raising an ``ObjectError`` exception.
:return str: The final reply after tags have been processed.
"""
stars = ['']
stars.extend(st)
botstars = ['']
botstars.extend(bst)
if len(stars) == 1:
stars.append("undefined")
if len(botstars) == 1:
botstars.append("undefined")
matcher = re.findall(RE.reply_array, reply)
for match in matcher:
name = match
if name in self.master._array:
result = "{random}" + "|".join(self.master._array[name]) + "{/random}"
else:
result = "\x00@" + name + "\x00"
reply = reply.replace("(@"+name+")", result)
reply = re.sub(RE.ph_array, r'(@\1)', reply)
# Tag shortcuts.
reply = reply.replace('<person>', '{person}<star>{/person}')
reply = reply.replace('<@>', '{@<star>}')
reply = reply.replace('<formal>', '{formal}<star>{/formal}')
reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}')
reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}')
reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}')
# Weight and <star> tags.
reply = re.sub(RE.weight, '', reply) # Leftover {weight}s
if len(stars) > 0:
reply = reply.replace('<star>', text_type(stars[1]))
reStars = re.findall(RE.star_tags, reply)
for match in reStars:
if int(match) < len(stars):
reply = reply.replace('<star{match}>'.format(match=match), text_type(stars[int(match)]))
if len(botstars) > 0:
reply = reply.replace('<botstar>', botstars[1])
reStars = re.findall(RE.botstars, reply)
for match in reStars:
if int(match) < len(botstars):
reply = reply.replace('<botstar{match}>'.format(match=match), text_type(botstars[int(match)]))
# <input> and <reply>
history = self.master.get_uservar(user, "__history__")
if type(history) is not dict:
history = self.default_history()
reply = reply.replace('<input>', history['input'][0])
reply = reply.replace('<reply>', history['reply'][0])
reInput = re.findall(RE.input_tags, reply)
for match in reInput:
reply = reply.replace('<input{match}>'.format(match=match),
history['input'][int(match) - 1])
reReply = re.findall(RE.reply_tags, reply)
for match in reReply:
reply = reply.replace('<reply{match}>'.format(match=match),
history['reply'][int(match) - 1])
# <id> and escape codes.
reply = reply.replace('<id>', user)
reply = reply.replace('\\s', ' ')
reply = reply.replace('\\n', "\n")
reply = reply.replace('\\#', '#')
# Random bits.
reRandom = re.findall(RE.random_tags, reply)
for match in reRandom:
output = ''
if '|' in match:
output = utils.random_choice(match.split('|'))
else:
output = utils.random_choice(match.split(' '))
reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output, 1) # Replace 1st match
# Person Substitutions and String Formatting.
for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']:
matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply)
for match in matcher:
output = None
if item == 'person':
# Person substitutions.
output = self.substitute(match, "person")
else:
output = utils.string_format(match, item)
reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output)
# Handle all variable-related tags with an iterative regex approach,
# to allow for nesting of tags in arbitrary ways (think <set a=<get b>>)
# Dummy out the <call> tags first, because we don't handle them right
# here.
reply = reply.replace("<call>", "{__call__}")
reply = reply.replace("</call>", "{/__call__}")
while True:
# This regex will match a <tag> which contains no other tag inside
# it, i.e. in the case of <set a=<get b>> it will match <get b> but
# not the <set> tag, on the first pass. The second pass will get the
# <set> tag, and so on.
match = re.search(RE.tag_search, reply)
if not match: break # No remaining tags!
match = match.group(1)
parts = match.split(" ", 1)
tag = parts[0].lower()
data = parts[1] if len(parts) > 1 else ""
insert = "" # Result of the tag evaluation
# Handle the tags.
if tag == "bot" or tag == "env":
# <bot> and <env> tags are similar.
target = self.master._var if tag == "bot" else self.master._global
if "=" in data:
# Setting a bot/env variable.
parts = data.split("=")
self.say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1]))
target[parts[0]] = parts[1]
else:
# Getting a bot/env variable.
insert = target.get(data, "undefined")
elif tag == "set":
# <set> user vars.
parts = data.split("=")
self.say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1]))
self.master.set_uservar(user, parts[0], parts[1])
elif tag in ["add", "sub", "mult", "div"]:
# Math operator tags.
parts = data.split("=")
var = parts[0]
value = parts[1]
curv = self.master.get_uservar(user, var)
# Sanity check the value.
try:
value = int(value)
if curv in [None, "undefined"]:
# Initialize it.
curv = 0
except:
insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value)
# Attempt the operation.
try:
orig = int(curv)
new = 0
if tag == "add":
new = orig + value
elif tag == "sub":
new = orig - value
elif tag == "mult":
new = orig * value
elif tag == "div":
new = orig // value
self.master.set_uservar(user, var, new)
except:
insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, curv)
elif tag == "get":
insert = self.master.get_uservar(user, data)
else:
# Unrecognized tag.
insert = "\x00{}\x01".format(match)
reply = reply.replace("<{}>".format(match), text_type(insert))
# Restore unrecognized tags.
reply = reply.replace("\x00", "<").replace("\x01", ">")
# Streaming code. DEPRECATED!
if '{!' in reply:
self._warn("Use of the {!...} tag is deprecated and not supported here.")
# Topic setter.
reTopic = re.findall(RE.topic_tag, reply)
for match in reTopic:
self.say("Setting user's topic to " + match)
self.master.set_uservar(user, "topic", match)
reply = reply.replace('{{topic={match}}}'.format(match=match), '')
# Inline redirecter.
reRedir = re.findall(RE.redir_tag, reply)
for match in reRedir:
self.say("Redirect to " + match)
at = match.strip()
subreply = self._getreply(user, at, step=(depth + 1))
reply = reply.replace('{{@{match}}}'.format(match=match), subreply)
# Object caller.
reply = reply.replace("{__call__}", "<call>")
reply = reply.replace("{/__call__}", "</call>")
reCall = re.findall(r'<call>(.+?)</call>', reply)
for match in reCall:
parts = re.split(RE.ws, match)
output = ''
obj = parts[0]
args = []
if len(parts) > 1:
args = parts[1:]
# Do we know this object?
if obj in self.master._objlangs:
# We do, but do we have a handler for that language?
lang = self.master._objlangs[obj]
if lang in self.master._handlers:
# We do.
try:
output = self.master._handlers[lang].call(self.master, obj, user, args)
except python.PythonObjectError as e:
self.warn(str(e))
if not ignore_object_errors:
raise ObjectError(str(e))
output = RS_ERR_OBJECT
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_HANDLER)
output = RS_ERR_OBJECT_HANDLER
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_MISSING)
output = RS_ERR_OBJECT_MISSING
reply = reply.replace('<call>{match}</call>'.format(match=match), output)
return reply | Post process tags in a message.
:param str user: The user ID.
:param str msg: The user's formatted message.
:param str reply: The raw RiveScript reply for the message.
:param []str st: The array of ``<star>`` matches from the trigger.
:param []str bst: The array of ``<botstar>`` matches from a
``%Previous`` command.
:param int depth: The recursion depth counter.
:param bool ignore_object_errors: Whether to ignore errors in Python
object macros instead of raising an ``ObjectError`` exception.
:return str: The final reply after tags have been processed. | Below is the the instruction that describes the task:
### Input:
Post process tags in a message.
:param str user: The user ID.
:param str msg: The user's formatted message.
:param str reply: The raw RiveScript reply for the message.
:param []str st: The array of ``<star>`` matches from the trigger.
:param []str bst: The array of ``<botstar>`` matches from a
``%Previous`` command.
:param int depth: The recursion depth counter.
:param bool ignore_object_errors: Whether to ignore errors in Python
object macros instead of raising an ``ObjectError`` exception.
:return str: The final reply after tags have been processed.
### Response:
def process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True):
"""Post process tags in a message.
:param str user: The user ID.
:param str msg: The user's formatted message.
:param str reply: The raw RiveScript reply for the message.
:param []str st: The array of ``<star>`` matches from the trigger.
:param []str bst: The array of ``<botstar>`` matches from a
``%Previous`` command.
:param int depth: The recursion depth counter.
:param bool ignore_object_errors: Whether to ignore errors in Python
object macros instead of raising an ``ObjectError`` exception.
:return str: The final reply after tags have been processed.
"""
stars = ['']
stars.extend(st)
botstars = ['']
botstars.extend(bst)
if len(stars) == 1:
stars.append("undefined")
if len(botstars) == 1:
botstars.append("undefined")
matcher = re.findall(RE.reply_array, reply)
for match in matcher:
name = match
if name in self.master._array:
result = "{random}" + "|".join(self.master._array[name]) + "{/random}"
else:
result = "\x00@" + name + "\x00"
reply = reply.replace("(@"+name+")", result)
reply = re.sub(RE.ph_array, r'(@\1)', reply)
# Tag shortcuts.
reply = reply.replace('<person>', '{person}<star>{/person}')
reply = reply.replace('<@>', '{@<star>}')
reply = reply.replace('<formal>', '{formal}<star>{/formal}')
reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}')
reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}')
reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}')
# Weight and <star> tags.
reply = re.sub(RE.weight, '', reply) # Leftover {weight}s
if len(stars) > 0:
reply = reply.replace('<star>', text_type(stars[1]))
reStars = re.findall(RE.star_tags, reply)
for match in reStars:
if int(match) < len(stars):
reply = reply.replace('<star{match}>'.format(match=match), text_type(stars[int(match)]))
if len(botstars) > 0:
reply = reply.replace('<botstar>', botstars[1])
reStars = re.findall(RE.botstars, reply)
for match in reStars:
if int(match) < len(botstars):
reply = reply.replace('<botstar{match}>'.format(match=match), text_type(botstars[int(match)]))
# <input> and <reply>
history = self.master.get_uservar(user, "__history__")
if type(history) is not dict:
history = self.default_history()
reply = reply.replace('<input>', history['input'][0])
reply = reply.replace('<reply>', history['reply'][0])
reInput = re.findall(RE.input_tags, reply)
for match in reInput:
reply = reply.replace('<input{match}>'.format(match=match),
history['input'][int(match) - 1])
reReply = re.findall(RE.reply_tags, reply)
for match in reReply:
reply = reply.replace('<reply{match}>'.format(match=match),
history['reply'][int(match) - 1])
# <id> and escape codes.
reply = reply.replace('<id>', user)
reply = reply.replace('\\s', ' ')
reply = reply.replace('\\n', "\n")
reply = reply.replace('\\#', '#')
# Random bits.
reRandom = re.findall(RE.random_tags, reply)
for match in reRandom:
output = ''
if '|' in match:
output = utils.random_choice(match.split('|'))
else:
output = utils.random_choice(match.split(' '))
reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output, 1) # Replace 1st match
# Person Substitutions and String Formatting.
for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']:
matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply)
for match in matcher:
output = None
if item == 'person':
# Person substitutions.
output = self.substitute(match, "person")
else:
output = utils.string_format(match, item)
reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output)
# Handle all variable-related tags with an iterative regex approach,
# to allow for nesting of tags in arbitrary ways (think <set a=<get b>>)
# Dummy out the <call> tags first, because we don't handle them right
# here.
reply = reply.replace("<call>", "{__call__}")
reply = reply.replace("</call>", "{/__call__}")
while True:
# This regex will match a <tag> which contains no other tag inside
# it, i.e. in the case of <set a=<get b>> it will match <get b> but
# not the <set> tag, on the first pass. The second pass will get the
# <set> tag, and so on.
match = re.search(RE.tag_search, reply)
if not match: break # No remaining tags!
match = match.group(1)
parts = match.split(" ", 1)
tag = parts[0].lower()
data = parts[1] if len(parts) > 1 else ""
insert = "" # Result of the tag evaluation
# Handle the tags.
if tag == "bot" or tag == "env":
# <bot> and <env> tags are similar.
target = self.master._var if tag == "bot" else self.master._global
if "=" in data:
# Setting a bot/env variable.
parts = data.split("=")
self.say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1]))
target[parts[0]] = parts[1]
else:
# Getting a bot/env variable.
insert = target.get(data, "undefined")
elif tag == "set":
# <set> user vars.
parts = data.split("=")
self.say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1]))
self.master.set_uservar(user, parts[0], parts[1])
elif tag in ["add", "sub", "mult", "div"]:
# Math operator tags.
parts = data.split("=")
var = parts[0]
value = parts[1]
curv = self.master.get_uservar(user, var)
# Sanity check the value.
try:
value = int(value)
if curv in [None, "undefined"]:
# Initialize it.
curv = 0
except:
insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value)
# Attempt the operation.
try:
orig = int(curv)
new = 0
if tag == "add":
new = orig + value
elif tag == "sub":
new = orig - value
elif tag == "mult":
new = orig * value
elif tag == "div":
new = orig // value
self.master.set_uservar(user, var, new)
except:
insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, curv)
elif tag == "get":
insert = self.master.get_uservar(user, data)
else:
# Unrecognized tag.
insert = "\x00{}\x01".format(match)
reply = reply.replace("<{}>".format(match), text_type(insert))
# Restore unrecognized tags.
reply = reply.replace("\x00", "<").replace("\x01", ">")
# Streaming code. DEPRECATED!
if '{!' in reply:
self._warn("Use of the {!...} tag is deprecated and not supported here.")
# Topic setter.
reTopic = re.findall(RE.topic_tag, reply)
for match in reTopic:
self.say("Setting user's topic to " + match)
self.master.set_uservar(user, "topic", match)
reply = reply.replace('{{topic={match}}}'.format(match=match), '')
# Inline redirecter.
reRedir = re.findall(RE.redir_tag, reply)
for match in reRedir:
self.say("Redirect to " + match)
at = match.strip()
subreply = self._getreply(user, at, step=(depth + 1))
reply = reply.replace('{{@{match}}}'.format(match=match), subreply)
# Object caller.
reply = reply.replace("{__call__}", "<call>")
reply = reply.replace("{/__call__}", "</call>")
reCall = re.findall(r'<call>(.+?)</call>', reply)
for match in reCall:
parts = re.split(RE.ws, match)
output = ''
obj = parts[0]
args = []
if len(parts) > 1:
args = parts[1:]
# Do we know this object?
if obj in self.master._objlangs:
# We do, but do we have a handler for that language?
lang = self.master._objlangs[obj]
if lang in self.master._handlers:
# We do.
try:
output = self.master._handlers[lang].call(self.master, obj, user, args)
except python.PythonObjectError as e:
self.warn(str(e))
if not ignore_object_errors:
raise ObjectError(str(e))
output = RS_ERR_OBJECT
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_HANDLER)
output = RS_ERR_OBJECT_HANDLER
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_MISSING)
output = RS_ERR_OBJECT_MISSING
reply = reply.replace('<call>{match}</call>'.format(match=match), output)
return reply |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.