code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_class(context, field, obj=None):
"""
Looks up the class for this field
"""
view = context['view']
return view.lookup_field_class(field, obj, "field_" + field) | Looks up the class for this field | Below is the the instruction that describes the task:
### Input:
Looks up the class for this field
### Response:
def get_class(context, field, obj=None):
"""
Looks up the class for this field
"""
view = context['view']
return view.lookup_field_class(field, obj, "field_" + field) |
def convert(self, obj):
"""Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.0.* type and
converts it to BY_ID_HONEY_BADGERFISH version. The object is modified in place
and returned.
"""
if self.pristine_if_invalid:
raise NotImplementedError('pristine_if_invalid option is not supported yet')
nex = get_nexml_el(obj)
assert nex
self._recursive_convert_dict(nex)
nex['@nexml2json'] = str(BADGER_FISH_NEXSON_VERSION)
self._single_el_list_to_dicts(nex, 'otus')
self._single_el_list_to_dicts(nex, 'trees')
#
# otu and tree are always arrays in phylografter
emulate_phylografter_pluralization = True
if not emulate_phylografter_pluralization:
self._single_el_list_to_dicts(nex, 'otus', 'otu')
self._single_el_list_to_dicts(nex, 'trees', 'tree')
self._single_el_list_to_dicts(nex, 'trees', 'tree', 'node')
self._single_el_list_to_dicts(nex, 'trees', 'tree', 'edge')
return obj | Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.0.* type and
converts it to BY_ID_HONEY_BADGERFISH version. The object is modified in place
and returned. | Below is the the instruction that describes the task:
### Input:
Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.0.* type and
converts it to BY_ID_HONEY_BADGERFISH version. The object is modified in place
and returned.
### Response:
def convert(self, obj):
"""Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.0.* type and
converts it to BY_ID_HONEY_BADGERFISH version. The object is modified in place
and returned.
"""
if self.pristine_if_invalid:
raise NotImplementedError('pristine_if_invalid option is not supported yet')
nex = get_nexml_el(obj)
assert nex
self._recursive_convert_dict(nex)
nex['@nexml2json'] = str(BADGER_FISH_NEXSON_VERSION)
self._single_el_list_to_dicts(nex, 'otus')
self._single_el_list_to_dicts(nex, 'trees')
#
# otu and tree are always arrays in phylografter
emulate_phylografter_pluralization = True
if not emulate_phylografter_pluralization:
self._single_el_list_to_dicts(nex, 'otus', 'otu')
self._single_el_list_to_dicts(nex, 'trees', 'tree')
self._single_el_list_to_dicts(nex, 'trees', 'tree', 'node')
self._single_el_list_to_dicts(nex, 'trees', 'tree', 'edge')
return obj |
def gini(self):
"""
geo.gini()
Return computed Gini coefficient.
"""
if self.count()>1:
xsort = sorted(self.raster.data[self.raster.mask == False].flatten()) # increasing order
y = np.cumsum(xsort)
B = sum(y) / (y[-1] * len(xsort))
return 1 + 1./len(xsort) - 2*B
else:
return 1 | geo.gini()
Return computed Gini coefficient. | Below is the the instruction that describes the task:
### Input:
geo.gini()
Return computed Gini coefficient.
### Response:
def gini(self):
"""
geo.gini()
Return computed Gini coefficient.
"""
if self.count()>1:
xsort = sorted(self.raster.data[self.raster.mask == False].flatten()) # increasing order
y = np.cumsum(xsort)
B = sum(y) / (y[-1] * len(xsort))
return 1 + 1./len(xsort) - 2*B
else:
return 1 |
def axis_to_data(ax, width):
"""For a width in axis coordinates, return the corresponding in data
coordinates.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
width : float
Width in xaxis coordinates.
"""
xlim = ax.get_xlim()
widthx = width*(xlim[1] - xlim[0])
ylim = ax.get_ylim()
widthy = width*(ylim[1] - ylim[0])
return 0.5*(widthx + widthy) | For a width in axis coordinates, return the corresponding in data
coordinates.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
width : float
Width in xaxis coordinates. | Below is the the instruction that describes the task:
### Input:
For a width in axis coordinates, return the corresponding in data
coordinates.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
width : float
Width in xaxis coordinates.
### Response:
def axis_to_data(ax, width):
"""For a width in axis coordinates, return the corresponding in data
coordinates.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
width : float
Width in xaxis coordinates.
"""
xlim = ax.get_xlim()
widthx = width*(xlim[1] - xlim[0])
ylim = ax.get_ylim()
widthy = width*(ylim[1] - ylim[0])
return 0.5*(widthx + widthy) |
def is_cross_origin(request):
"""Compare headers HOST and ORIGIN. Remove protocol prefix from ORIGIN, then
compare. Return true if they are not equal
example HTTP_HOST: '127.0.0.1:5000'
example HTTP_ORIGIN: 'http://127.0.0.1:5000'
"""
origin = request.environ.get("HTTP_ORIGIN")
host = request.environ.get("HTTP_HOST")
if origin is None:
# origin is sometimes omitted by the browser when origin and host are equal
return False
if origin.startswith("http://"):
origin = origin.replace("http://", "")
elif origin.startswith("https://"):
origin = origin.replace("https://", "")
return host != origin | Compare headers HOST and ORIGIN. Remove protocol prefix from ORIGIN, then
compare. Return true if they are not equal
example HTTP_HOST: '127.0.0.1:5000'
example HTTP_ORIGIN: 'http://127.0.0.1:5000' | Below is the the instruction that describes the task:
### Input:
Compare headers HOST and ORIGIN. Remove protocol prefix from ORIGIN, then
compare. Return true if they are not equal
example HTTP_HOST: '127.0.0.1:5000'
example HTTP_ORIGIN: 'http://127.0.0.1:5000'
### Response:
def is_cross_origin(request):
"""Compare headers HOST and ORIGIN. Remove protocol prefix from ORIGIN, then
compare. Return true if they are not equal
example HTTP_HOST: '127.0.0.1:5000'
example HTTP_ORIGIN: 'http://127.0.0.1:5000'
"""
origin = request.environ.get("HTTP_ORIGIN")
host = request.environ.get("HTTP_HOST")
if origin is None:
# origin is sometimes omitted by the browser when origin and host are equal
return False
if origin.startswith("http://"):
origin = origin.replace("http://", "")
elif origin.startswith("https://"):
origin = origin.replace("https://", "")
return host != origin |
def align(self, scale, center, angle, height, width):
""" Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image
"""
# rescale
scaled_im = self.resize(scale)
# transform
cx = scaled_im.center[1]
cy = scaled_im.center[0]
dx = cx - center[0] * scale
dy = cy - center[1] * scale
translation = np.array([dy, dx])
tf_im = scaled_im.transform(translation, angle)
# crop
aligned_im = tf_im.crop(height, width)
return aligned_im | Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image | Below is the the instruction that describes the task:
### Input:
Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image
### Response:
def align(self, scale, center, angle, height, width):
""" Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image
"""
# rescale
scaled_im = self.resize(scale)
# transform
cx = scaled_im.center[1]
cy = scaled_im.center[0]
dx = cx - center[0] * scale
dy = cy - center[1] * scale
translation = np.array([dy, dx])
tf_im = scaled_im.transform(translation, angle)
# crop
aligned_im = tf_im.crop(height, width)
return aligned_im |
def cal_gpa(grades):
"""
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
"""
# 课程总数
courses_sum = len(grades)
# 课程绩点和
points_sum = 0
# 学分和
credit_sum = 0
# 课程学分 x 课程绩点之和
gpa_points_sum = 0
for grade in grades:
point = get_point(grade.get('补考成绩') or grade['成绩'])
credit = float(grade['学分'])
points_sum += point
credit_sum += credit
gpa_points_sum += credit * point
ave_point = points_sum / courses_sum
gpa = gpa_points_sum / credit_sum
return round(ave_point, 5), round(gpa, 5) | 根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组 | Below is the the instruction that describes the task:
### Input:
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
### Response:
def cal_gpa(grades):
"""
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
"""
# 课程总数
courses_sum = len(grades)
# 课程绩点和
points_sum = 0
# 学分和
credit_sum = 0
# 课程学分 x 课程绩点之和
gpa_points_sum = 0
for grade in grades:
point = get_point(grade.get('补考成绩') or grade['成绩'])
credit = float(grade['学分'])
points_sum += point
credit_sum += credit
gpa_points_sum += credit * point
ave_point = points_sum / courses_sum
gpa = gpa_points_sum / credit_sum
return round(ave_point, 5), round(gpa, 5) |
def get_statepostal(self, obj):
"""State postal abbreviation if county or state else ``None``."""
if obj.division.level.name == DivisionLevel.STATE:
return us.states.lookup(obj.division.code).abbr
elif obj.division.level.name == DivisionLevel.COUNTY:
return us.states.lookup(obj.division.parent.code).abbr
return None | State postal abbreviation if county or state else ``None``. | Below is the the instruction that describes the task:
### Input:
State postal abbreviation if county or state else ``None``.
### Response:
def get_statepostal(self, obj):
"""State postal abbreviation if county or state else ``None``."""
if obj.division.level.name == DivisionLevel.STATE:
return us.states.lookup(obj.division.code).abbr
elif obj.division.level.name == DivisionLevel.COUNTY:
return us.states.lookup(obj.division.parent.code).abbr
return None |
def no_duplicates(function, *args, **kwargs):
"""
Makes sure that no duplicated tasks are enqueued.
"""
@wraps(function)
def wrapper(self, *args, **kwargs):
key = generate_key(function, *args, **kwargs)
try:
function(self, *args, **kwargs)
finally:
logging.info('Removing key %s', key)
cache.delete(key)
return wrapper | Makes sure that no duplicated tasks are enqueued. | Below is the the instruction that describes the task:
### Input:
Makes sure that no duplicated tasks are enqueued.
### Response:
def no_duplicates(function, *args, **kwargs):
"""
Makes sure that no duplicated tasks are enqueued.
"""
@wraps(function)
def wrapper(self, *args, **kwargs):
key = generate_key(function, *args, **kwargs)
try:
function(self, *args, **kwargs)
finally:
logging.info('Removing key %s', key)
cache.delete(key)
return wrapper |
def __get_language_match(self, languageCode, languageIds):
"""Compares ``languageCode`` to the provided ``languageIds`` to find
the closest match and returns it, if a match is not found returns
``None``.
e.g. if ``languageCode`` is ``en_CA`` and ``languageIds`` contains
``en``, the return value will be ``en``
"""
# special case
if languageCode == 'zh':
return 'zh-Hans'
# this will take care of cases such as mapping en_CA to en
if '-' in languageCode:
match = negotiate_locale([languageCode], languageIds, sep='-')
else:
match = negotiate_locale([languageCode], languageIds)
if match:
return match
# handle other cases
if '-' in languageCode:
locale = Locale.parse(languageCode, sep='-')
else:
locale = Locale.parse(languageCode)
for languageId in languageIds:
if '-' not in languageId:
continue
# normalize the languageId
nLanguageId = Locale.parse(languageId, sep='-')
# 1. lang subtag must match
# 2. either script or territory subtag must match AND
# one of them must not be None, i.e. do not allow None == None
if locale.language == nLanguageId.language and \
(((locale.script or nLanguageId.script) and
(locale.script == nLanguageId.script)) or \
(locale.territory or nLanguageId.territory) and
(locale.territory == nLanguageId.territory)):
return languageId
return None | Compares ``languageCode`` to the provided ``languageIds`` to find
the closest match and returns it, if a match is not found returns
``None``.
e.g. if ``languageCode`` is ``en_CA`` and ``languageIds`` contains
``en``, the return value will be ``en`` | Below is the the instruction that describes the task:
### Input:
Compares ``languageCode`` to the provided ``languageIds`` to find
the closest match and returns it, if a match is not found returns
``None``.
e.g. if ``languageCode`` is ``en_CA`` and ``languageIds`` contains
``en``, the return value will be ``en``
### Response:
def __get_language_match(self, languageCode, languageIds):
"""Compares ``languageCode`` to the provided ``languageIds`` to find
the closest match and returns it, if a match is not found returns
``None``.
e.g. if ``languageCode`` is ``en_CA`` and ``languageIds`` contains
``en``, the return value will be ``en``
"""
# special case
if languageCode == 'zh':
return 'zh-Hans'
# this will take care of cases such as mapping en_CA to en
if '-' in languageCode:
match = negotiate_locale([languageCode], languageIds, sep='-')
else:
match = negotiate_locale([languageCode], languageIds)
if match:
return match
# handle other cases
if '-' in languageCode:
locale = Locale.parse(languageCode, sep='-')
else:
locale = Locale.parse(languageCode)
for languageId in languageIds:
if '-' not in languageId:
continue
# normalize the languageId
nLanguageId = Locale.parse(languageId, sep='-')
# 1. lang subtag must match
# 2. either script or territory subtag must match AND
# one of them must not be None, i.e. do not allow None == None
if locale.language == nLanguageId.language and \
(((locale.script or nLanguageId.script) and
(locale.script == nLanguageId.script)) or \
(locale.territory or nLanguageId.territory) and
(locale.territory == nLanguageId.territory)):
return languageId
return None |
def qseries(fseries, Q, f0, return_complex=False):
"""Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
energy: '~pycbc.types.TimeSeries'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data.
"""
# normalize and generate bi-square window
qprime = Q / 11**(1/2.)
norm = numpy.sqrt(315. * qprime / (128. * f0))
window_size = 2 * int(f0 / qprime * fseries.duration) + 1
xfrequencies = numpy.linspace(-1., 1., window_size)
start = int((f0 - (f0 / qprime)) * fseries.duration)
end = int(start + window_size)
center = (start + end) / 2
windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm
tlen = (len(fseries)-1) * 2
windowed.resize(tlen)
windowed.roll(-center)
# calculate the time series for this q -value
windowed = FrequencySeries(windowed, delta_f=fseries.delta_f,
epoch=fseries.start_time)
ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
delta_t=fseries.delta_t)
ifft(windowed, ctseries)
if return_complex:
return ctseries
else:
energy = ctseries.squared_norm()
medianenergy = numpy.median(energy.numpy())
return energy / float(medianenergy) | Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
energy: '~pycbc.types.TimeSeries'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data. | Below is the the instruction that describes the task:
### Input:
Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
energy: '~pycbc.types.TimeSeries'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data.
### Response:
def qseries(fseries, Q, f0, return_complex=False):
"""Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
energy: '~pycbc.types.TimeSeries'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data.
"""
# normalize and generate bi-square window
qprime = Q / 11**(1/2.)
norm = numpy.sqrt(315. * qprime / (128. * f0))
window_size = 2 * int(f0 / qprime * fseries.duration) + 1
xfrequencies = numpy.linspace(-1., 1., window_size)
start = int((f0 - (f0 / qprime)) * fseries.duration)
end = int(start + window_size)
center = (start + end) / 2
windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm
tlen = (len(fseries)-1) * 2
windowed.resize(tlen)
windowed.roll(-center)
# calculate the time series for this q -value
windowed = FrequencySeries(windowed, delta_f=fseries.delta_f,
epoch=fseries.start_time)
ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
delta_t=fseries.delta_t)
ifft(windowed, ctseries)
if return_complex:
return ctseries
else:
energy = ctseries.squared_norm()
medianenergy = numpy.median(energy.numpy())
return energy / float(medianenergy) |
def p_variable_declaration_list_noin(self, p):
"""
variable_declaration_list_noin \
: variable_declaration_noin
| variable_declaration_list_noin COMMA variable_declaration_noin
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1] | variable_declaration_list_noin \
: variable_declaration_noin
| variable_declaration_list_noin COMMA variable_declaration_noin | Below is the the instruction that describes the task:
### Input:
variable_declaration_list_noin \
: variable_declaration_noin
| variable_declaration_list_noin COMMA variable_declaration_noin
### Response:
def p_variable_declaration_list_noin(self, p):
"""
variable_declaration_list_noin \
: variable_declaration_noin
| variable_declaration_list_noin COMMA variable_declaration_noin
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1] |
def make_imagehdu(data, wcs=None):
"""
Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100)
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array')
if wcs is not None:
header = wcs.to_header()
else:
header = None
return fits.ImageHDU(data, header=header) | Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100) | Below is the the instruction that describes the task:
### Input:
Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100)
### Response:
def make_imagehdu(data, wcs=None):
"""
Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100)
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array')
if wcs is not None:
header = wcs.to_header()
else:
header = None
return fits.ImageHDU(data, header=header) |
def obs(self):
"""
return the number of observations for your SASdata object
"""
code = "proc sql;select count(*) format best32. into :lastobs from " + self.libref + '.' + self.table + self._dsopts() + ";%put lastobs=&lastobs tom;quit;"
if self.sas.nosub:
print(code)
return
le = self._is_valid()
if not le:
ll = self.sas.submit(code, "text")
lastobs = ll['LOG'].rpartition("lastobs=")
lastobs = lastobs[2].partition(" tom")
lastobs = int(lastobs[0])
else:
print("The SASdata object is not valid. The table doesn't exist in this SAS session at this time.")
lastobs = None
return lastobs | return the number of observations for your SASdata object | Below is the the instruction that describes the task:
### Input:
return the number of observations for your SASdata object
### Response:
def obs(self):
"""
return the number of observations for your SASdata object
"""
code = "proc sql;select count(*) format best32. into :lastobs from " + self.libref + '.' + self.table + self._dsopts() + ";%put lastobs=&lastobs tom;quit;"
if self.sas.nosub:
print(code)
return
le = self._is_valid()
if not le:
ll = self.sas.submit(code, "text")
lastobs = ll['LOG'].rpartition("lastobs=")
lastobs = lastobs[2].partition(" tom")
lastobs = int(lastobs[0])
else:
print("The SASdata object is not valid. The table doesn't exist in this SAS session at this time.")
lastobs = None
return lastobs |
def which(name):
""" Returns the full path to executable in path matching provided name.
`name`
String value.
Returns string or ``None``.
"""
# we were given a filename, return it if it's executable
if os.path.dirname(name) != '':
if not os.path.isdir(name) and os.access(name, os.X_OK):
return name
else:
return None
# fetch PATH env var and split
path_val = os.environ.get('PATH', None) or os.defpath
# return the first match in the paths
for path in path_val.split(os.pathsep):
filename = os.path.join(path, name)
if os.access(filename, os.X_OK):
return filename
return None | Returns the full path to executable in path matching provided name.
`name`
String value.
Returns string or ``None``. | Below is the the instruction that describes the task:
### Input:
Returns the full path to executable in path matching provided name.
`name`
String value.
Returns string or ``None``.
### Response:
def which(name):
""" Returns the full path to executable in path matching provided name.
`name`
String value.
Returns string or ``None``.
"""
# we were given a filename, return it if it's executable
if os.path.dirname(name) != '':
if not os.path.isdir(name) and os.access(name, os.X_OK):
return name
else:
return None
# fetch PATH env var and split
path_val = os.environ.get('PATH', None) or os.defpath
# return the first match in the paths
for path in path_val.split(os.pathsep):
filename = os.path.join(path, name)
if os.access(filename, os.X_OK):
return filename
return None |
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
"""
self.debug("()")
# Init GPIO
# Enable warnings
GPIO.setwarnings(True)
# Careful - numbering between different pi version might differ
if self.gpio_mode == "BOARD":
GPIO.setmode(GPIO.BOARD)
else:
GPIO.setmode(GPIO.BCM)
# Register people
try:
self.changer.on_person_new(self.people)
except:
self.exception("Failed to add people to audience")
raise StartException("Adding people failed")
# Setup pins
for gpio in self.gpios:
GPIO.setup(gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(
gpio, GPIO.BOTH,
callback=self._gpio_callback, bouncetime=self.gpio_bouncetime
)
super(GPIODetector, self).start(blocking) | Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None | Below is the the instruction that describes the task:
### Input:
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
### Response:
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
"""
self.debug("()")
# Init GPIO
# Enable warnings
GPIO.setwarnings(True)
# Careful - numbering between different pi version might differ
if self.gpio_mode == "BOARD":
GPIO.setmode(GPIO.BOARD)
else:
GPIO.setmode(GPIO.BCM)
# Register people
try:
self.changer.on_person_new(self.people)
except:
self.exception("Failed to add people to audience")
raise StartException("Adding people failed")
# Setup pins
for gpio in self.gpios:
GPIO.setup(gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(
gpio, GPIO.BOTH,
callback=self._gpio_callback, bouncetime=self.gpio_bouncetime
)
super(GPIODetector, self).start(blocking) |
def correlation_matvec(P, obs1, obs2=None, times=[1]):
r"""Time-correlation for equilibrium experiment - via matrix vector products.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
if obs2 is None:
obs2 = obs1
"""Compute stationary vector"""
mu = statdist(P)
obs1mu = mu * obs1
times = np.asarray(times)
"""Sort in increasing order"""
ind = np.argsort(times)
times = times[ind]
if times[0] < 0:
raise ValueError("Times can not be negative")
dt = times[1:] - times[0:-1]
nt = len(times)
correlations = np.zeros(nt)
"""Propagate obs2 to initial time"""
obs2_t = 1.0 * obs2
obs2_t = propagate(P, obs2_t, times[0])
correlations[0] = np.dot(obs1mu, obs2_t)
for i in range(nt - 1):
obs2_t = propagate(P, obs2_t, dt[i])
correlations[i + 1] = np.dot(obs1mu, obs2_t)
"""Cast back to original order of time points"""
correlations = correlations[ind]
return correlations | r"""Time-correlation for equilibrium experiment - via matrix vector products.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
Returns
-------
correlations : ndarray
Correlation values at given times | Below is the the instruction that describes the task:
### Input:
r"""Time-correlation for equilibrium experiment - via matrix vector products.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
Returns
-------
correlations : ndarray
Correlation values at given times
### Response:
def correlation_matvec(P, obs1, obs2=None, times=[1]):
r"""Time-correlation for equilibrium experiment - via matrix vector products.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
if obs2 is None:
obs2 = obs1
"""Compute stationary vector"""
mu = statdist(P)
obs1mu = mu * obs1
times = np.asarray(times)
"""Sort in increasing order"""
ind = np.argsort(times)
times = times[ind]
if times[0] < 0:
raise ValueError("Times can not be negative")
dt = times[1:] - times[0:-1]
nt = len(times)
correlations = np.zeros(nt)
"""Propagate obs2 to initial time"""
obs2_t = 1.0 * obs2
obs2_t = propagate(P, obs2_t, times[0])
correlations[0] = np.dot(obs1mu, obs2_t)
for i in range(nt - 1):
obs2_t = propagate(P, obs2_t, dt[i])
correlations[i + 1] = np.dot(obs1mu, obs2_t)
"""Cast back to original order of time points"""
correlations = correlations[ind]
return correlations |
def delete_component(self, instance_name):
'''Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError
'''
with self._mutex:
if self._obj.delete_component(instance_name) != RTC.RTC_OK:
raise exceptions.FailedToDeleteComponentError(instance_name)
# The list of child components will have changed now, so it must be
# reparsed.
self._parse_component_children() | Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError | Below is the the instruction that describes the task:
### Input:
Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError
### Response:
def delete_component(self, instance_name):
'''Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError
'''
with self._mutex:
if self._obj.delete_component(instance_name) != RTC.RTC_OK:
raise exceptions.FailedToDeleteComponentError(instance_name)
# The list of child components will have changed now, so it must be
# reparsed.
self._parse_component_children() |
def SXTB(self, params):
"""
STXB Ra, Rb
Sign extend the byte in Rb and store the result in Ra
"""
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params)
self.check_arguments(low_registers=(Ra, Rb))
def SXTB_func():
if self.register[Rb] & (1 << 7):
self.register[Ra] = 0xFFFFFF00 + (self.register[Rb] & 0xFF)
else:
self.register[Ra] = (self.register[Rb] & 0xFF)
return SXTB_func | STXB Ra, Rb
Sign extend the byte in Rb and store the result in Ra | Below is the the instruction that describes the task:
### Input:
STXB Ra, Rb
Sign extend the byte in Rb and store the result in Ra
### Response:
def SXTB(self, params):
"""
STXB Ra, Rb
Sign extend the byte in Rb and store the result in Ra
"""
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params)
self.check_arguments(low_registers=(Ra, Rb))
def SXTB_func():
if self.register[Rb] & (1 << 7):
self.register[Ra] = 0xFFFFFF00 + (self.register[Rb] & 0xFF)
else:
self.register[Ra] = (self.register[Rb] & 0xFF)
return SXTB_func |
def _download_audio_files(self, records, target_path):
"""
Download all audio files based on the given records.
"""
for record in records:
audio_folder = os.path.join(target_path, 'audio', record[2])
audio_file = os.path.join(audio_folder, '{}.mp3'.format(record[0]))
os.makedirs(audio_folder, exist_ok=True)
download_url = 'https://audio.tatoeba.org/sentences/{}/{}.mp3'.format(record[2], record[0])
download.download_file(download_url, audio_file) | Download all audio files based on the given records. | Below is the the instruction that describes the task:
### Input:
Download all audio files based on the given records.
### Response:
def _download_audio_files(self, records, target_path):
"""
Download all audio files based on the given records.
"""
for record in records:
audio_folder = os.path.join(target_path, 'audio', record[2])
audio_file = os.path.join(audio_folder, '{}.mp3'.format(record[0]))
os.makedirs(audio_folder, exist_ok=True)
download_url = 'https://audio.tatoeba.org/sentences/{}/{}.mp3'.format(record[2], record[0])
download.download_file(download_url, audio_file) |
def formfield(self, **kwargs):
"""Get the form for field."""
defaults = {
'form_class': RichTextFormField,
'config': self.config,
}
defaults.update(kwargs)
return super(RichTextField, self).formfield(**defaults) | Get the form for field. | Below is the the instruction that describes the task:
### Input:
Get the form for field.
### Response:
def formfield(self, **kwargs):
"""Get the form for field."""
defaults = {
'form_class': RichTextFormField,
'config': self.config,
}
defaults.update(kwargs)
return super(RichTextField, self).formfield(**defaults) |
def pipe_reverse(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items
"""
for item in reversed(list(_INPUT)):
yield item | An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items | Below is the the instruction that describes the task:
### Input:
An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items
### Response:
def pipe_reverse(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items
"""
for item in reversed(list(_INPUT)):
yield item |
def get_project_info(configs, heartbeat, data):
"""Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
"""
project_name, branch_name = heartbeat.project, heartbeat.branch
if heartbeat.type != 'file':
project_name = project_name or heartbeat.args.project or heartbeat.args.alternate_project
return project_name, branch_name
if project_name is None or branch_name is None:
for plugin_cls in CONFIG_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = project.branch()
break
if project_name is None:
project_name = data.get('project') or heartbeat.args.project
hide_project = heartbeat.should_obfuscate_project()
if hide_project and project_name is not None:
return project_name, None
if project_name is None or branch_name is None:
for plugin_cls in REV_CONTROL_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = branch_name or project.branch()
if hide_project:
branch_name = None
project_name = generate_project_name()
project_file = os.path.join(project.folder(), '.wakatime-project')
try:
with open(project_file, 'w') as fh:
fh.write(project_name)
except IOError:
project_name = None
break
if project_name is None and not hide_project:
project_name = data.get('alternate_project') or heartbeat.args.alternate_project
return project_name, branch_name | Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple. | Below is the the instruction that describes the task:
### Input:
Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
### Response:
def get_project_info(configs, heartbeat, data):
"""Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
"""
project_name, branch_name = heartbeat.project, heartbeat.branch
if heartbeat.type != 'file':
project_name = project_name or heartbeat.args.project or heartbeat.args.alternate_project
return project_name, branch_name
if project_name is None or branch_name is None:
for plugin_cls in CONFIG_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = project.branch()
break
if project_name is None:
project_name = data.get('project') or heartbeat.args.project
hide_project = heartbeat.should_obfuscate_project()
if hide_project and project_name is not None:
return project_name, None
if project_name is None or branch_name is None:
for plugin_cls in REV_CONTROL_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = branch_name or project.branch()
if hide_project:
branch_name = None
project_name = generate_project_name()
project_file = os.path.join(project.folder(), '.wakatime-project')
try:
with open(project_file, 'w') as fh:
fh.write(project_name)
except IOError:
project_name = None
break
if project_name is None and not hide_project:
project_name = data.get('alternate_project') or heartbeat.args.alternate_project
return project_name, branch_name |
def parallel_epd_lclist(lclist,
externalparams,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
outdict = {}
# run by magcol
for t, m, e in zip(timecols, magcols, errcols):
tasks = [(x, t, m, e, externalparams, lcformat, lcformatdir,
epdsmooth_sigclip, epdsmooth_windowsize,
epdsmooth_func, epdsmooth_extraparams) for
x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(parallel_epd_worker, tasks)
pool.close()
pool.join()
outdict[m] = results
return outdict | This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486 | Below is the the instruction that describes the task:
### Input:
This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
### Response:
def parallel_epd_lclist(lclist,
externalparams,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
outdict = {}
# run by magcol
for t, m, e in zip(timecols, magcols, errcols):
tasks = [(x, t, m, e, externalparams, lcformat, lcformatdir,
epdsmooth_sigclip, epdsmooth_windowsize,
epdsmooth_func, epdsmooth_extraparams) for
x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(parallel_epd_worker, tasks)
pool.close()
pool.join()
outdict[m] = results
return outdict |
async def variant(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps variant type
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
version = await self.version(elem_type, params, elem=elem)
if self.is_tracked():
return self.get_tracked()
if hasattr(elem_type, 'boost_serialize'):
elem = elem_type() if elem is None else elem
self.pop_track()
return await elem.boost_serialize(self, elem=elem, elem_type=elem_type, params=params, version=version)
if self.writing:
self.pop_track()
return await self.dump_variant(elem=elem,
elem_type=elem_type if elem_type else elem.__class__, params=params)
else:
obj = await self.load_variant(elem_type=elem_type if elem_type else elem.__class__,
params=params, elem=elem)
return self.track_obj(obj) | Loads/dumps variant type
:param elem:
:param elem_type:
:param params:
:return: | Below is the the instruction that describes the task:
### Input:
Loads/dumps variant type
:param elem:
:param elem_type:
:param params:
:return:
### Response:
async def variant(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps variant type
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
version = await self.version(elem_type, params, elem=elem)
if self.is_tracked():
return self.get_tracked()
if hasattr(elem_type, 'boost_serialize'):
elem = elem_type() if elem is None else elem
self.pop_track()
return await elem.boost_serialize(self, elem=elem, elem_type=elem_type, params=params, version=version)
if self.writing:
self.pop_track()
return await self.dump_variant(elem=elem,
elem_type=elem_type if elem_type else elem.__class__, params=params)
else:
obj = await self.load_variant(elem_type=elem_type if elem_type else elem.__class__,
params=params, elem=elem)
return self.track_obj(obj) |
def all(self, instance):
"""Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list
"""
url = self._url.format(instance=instance)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl_list(data) | Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list
### Response:
def all(self, instance):
"""Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list
"""
url = self._url.format(instance=instance)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl_list(data) |
def show_rsa(minion_id, dns_name):
'''
Show a private RSA key
CLI Example:
.. code-block:: bash
salt-run digicert.show_rsa myminion domain.example.com
'''
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
bank = 'digicert/domains'
data = cache.fetch(
bank, dns_name
)
return data['private_key'] | Show a private RSA key
CLI Example:
.. code-block:: bash
salt-run digicert.show_rsa myminion domain.example.com | Below is the the instruction that describes the task:
### Input:
Show a private RSA key
CLI Example:
.. code-block:: bash
salt-run digicert.show_rsa myminion domain.example.com
### Response:
def show_rsa(minion_id, dns_name):
'''
Show a private RSA key
CLI Example:
.. code-block:: bash
salt-run digicert.show_rsa myminion domain.example.com
'''
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
bank = 'digicert/domains'
data = cache.fetch(
bank, dns_name
)
return data['private_key'] |
def get_all_users(**kwargs):
"""
Get the username & ID of all users.
Use the the filter if it has been provided
The filter has to be a list of values
"""
users_qry = db.DBSession.query(User)
filter_type = kwargs.get('filter_type')
filter_value = kwargs.get('filter_value')
if filter_type is not None:
# Filtering the search of users
if filter_type == "id":
if isinstance(filter_value, str):
# Trying to read a csv string
log.info("[HB.users] Getting user by Filter ID : %s", filter_value)
filter_value = eval(filter_value)
if type(filter_value) is int:
users_qry = users_qry.filter(User.id==filter_value)
else:
users_qry = users_qry.filter(User.id.in_(filter_value))
elif filter_type == "username":
if isinstance(filter_value, str):
# Trying to read a csv string
log.info("[HB.users] Getting user by Filter Username : %s", filter_value)
filter_value = filter_value.split(",")
for i, em in enumerate(filter_value):
log.info("[HB.users] >>> Getting user by single Username : %s", em)
filter_value[i] = em.strip()
if isinstance(filter_value, str):
users_qry = users_qry.filter(User.username==filter_value)
else:
users_qry = users_qry.filter(User.username.in_(filter_value))
else:
raise Exception("Filter type '{}' not allowed".format(filter_type))
else:
log.info('[HB.users] Getting All Users')
rs = users_qry.all()
return rs | Get the username & ID of all users.
Use the the filter if it has been provided
The filter has to be a list of values | Below is the the instruction that describes the task:
### Input:
Get the username & ID of all users.
Use the the filter if it has been provided
The filter has to be a list of values
### Response:
def get_all_users(**kwargs):
"""
Get the username & ID of all users.
Use the the filter if it has been provided
The filter has to be a list of values
"""
users_qry = db.DBSession.query(User)
filter_type = kwargs.get('filter_type')
filter_value = kwargs.get('filter_value')
if filter_type is not None:
# Filtering the search of users
if filter_type == "id":
if isinstance(filter_value, str):
# Trying to read a csv string
log.info("[HB.users] Getting user by Filter ID : %s", filter_value)
filter_value = eval(filter_value)
if type(filter_value) is int:
users_qry = users_qry.filter(User.id==filter_value)
else:
users_qry = users_qry.filter(User.id.in_(filter_value))
elif filter_type == "username":
if isinstance(filter_value, str):
# Trying to read a csv string
log.info("[HB.users] Getting user by Filter Username : %s", filter_value)
filter_value = filter_value.split(",")
for i, em in enumerate(filter_value):
log.info("[HB.users] >>> Getting user by single Username : %s", em)
filter_value[i] = em.strip()
if isinstance(filter_value, str):
users_qry = users_qry.filter(User.username==filter_value)
else:
users_qry = users_qry.filter(User.username.in_(filter_value))
else:
raise Exception("Filter type '{}' not allowed".format(filter_type))
else:
log.info('[HB.users] Getting All Users')
rs = users_qry.all()
return rs |
def add_resource(self, ref, **properties):
"""Add one or more resources entities, from a url and property values,
possibly adding multiple entries for an excel spreadsheet or ZIP file"""
raise NotImplementedError("Still uses decompose_url")
du = Bunch(decompose_url(ref))
added = []
if du.proto == 'file' and isdir(ref):
for f in self.find_files(ref, ['csv']):
if f.endswith(DEFAULT_METATAB_FILE):
continue
if self._doc.find_first('Root.Datafile', value=f):
self.prt("Datafile exists for '{}', ignoring".format(f))
else:
added.extend(self.add_resource(f, **properties))
else:
self.prt("Enumerating '{}'".format(ref))
for c in enumerate_contents(ref, self._cache):
added.append(self.add_single_resource(c.rebuild_url(), **properties))
return added | Add one or more resources entities, from a url and property values,
possibly adding multiple entries for an excel spreadsheet or ZIP file | Below is the the instruction that describes the task:
### Input:
Add one or more resources entities, from a url and property values,
possibly adding multiple entries for an excel spreadsheet or ZIP file
### Response:
def add_resource(self, ref, **properties):
"""Add one or more resources entities, from a url and property values,
possibly adding multiple entries for an excel spreadsheet or ZIP file"""
raise NotImplementedError("Still uses decompose_url")
du = Bunch(decompose_url(ref))
added = []
if du.proto == 'file' and isdir(ref):
for f in self.find_files(ref, ['csv']):
if f.endswith(DEFAULT_METATAB_FILE):
continue
if self._doc.find_first('Root.Datafile', value=f):
self.prt("Datafile exists for '{}', ignoring".format(f))
else:
added.extend(self.add_resource(f, **properties))
else:
self.prt("Enumerating '{}'".format(ref))
for c in enumerate_contents(ref, self._cache):
added.append(self.add_single_resource(c.rebuild_url(), **properties))
return added |
def workflow_states_column(self, obj):
""" Return text description of workflow states assigned to object """
workflow_states = models.WorkflowState.objects.filter(
content_type=self._get_obj_ct(obj),
object_id=obj.pk,
)
return ', '.join([unicode(wfs) for wfs in workflow_states]) | Return text description of workflow states assigned to object | Below is the the instruction that describes the task:
### Input:
Return text description of workflow states assigned to object
### Response:
def workflow_states_column(self, obj):
""" Return text description of workflow states assigned to object """
workflow_states = models.WorkflowState.objects.filter(
content_type=self._get_obj_ct(obj),
object_id=obj.pk,
)
return ', '.join([unicode(wfs) for wfs in workflow_states]) |
def _append_comment(ret, comment):
'''
append ``comment`` to ``ret['comment']``
'''
if ret['comment']:
ret['comment'] = ret['comment'].rstrip() + '\n' + comment
else:
ret['comment'] = comment
return ret | append ``comment`` to ``ret['comment']`` | Below is the the instruction that describes the task:
### Input:
append ``comment`` to ``ret['comment']``
### Response:
def _append_comment(ret, comment):
'''
append ``comment`` to ``ret['comment']``
'''
if ret['comment']:
ret['comment'] = ret['comment'].rstrip() + '\n' + comment
else:
ret['comment'] = comment
return ret |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
if hasattr(self, 'result_index') and self.result_index is not None:
_dict['result_index'] = self.result_index
if hasattr(self, 'speaker_labels') and self.speaker_labels is not None:
_dict['speaker_labels'] = [
x._to_dict() for x in self.speaker_labels
]
if hasattr(self, 'warnings') and self.warnings is not None:
_dict['warnings'] = self.warnings
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
if hasattr(self, 'result_index') and self.result_index is not None:
_dict['result_index'] = self.result_index
if hasattr(self, 'speaker_labels') and self.speaker_labels is not None:
_dict['speaker_labels'] = [
x._to_dict() for x in self.speaker_labels
]
if hasattr(self, 'warnings') and self.warnings is not None:
_dict['warnings'] = self.warnings
return _dict |
def _cast_to_type(self, value):
""" Convert the value to an int and raise error on failures"""
try:
return int(value)
except (ValueError, TypeError):
self.fail('invalid', value=value) | Convert the value to an int and raise error on failures | Below is the the instruction that describes the task:
### Input:
Convert the value to an int and raise error on failures
### Response:
def _cast_to_type(self, value):
""" Convert the value to an int and raise error on failures"""
try:
return int(value)
except (ValueError, TypeError):
self.fail('invalid', value=value) |
def _clean(self, value):
"""Perform a standardized pipline of operations across an iterable."""
value = (str(v) for v in value)
if self.strip:
value = (v.strip() for v in value)
if not self.empty:
value = (v for v in value if v)
return value | Perform a standardized pipline of operations across an iterable. | Below is the the instruction that describes the task:
### Input:
Perform a standardized pipline of operations across an iterable.
### Response:
def _clean(self, value):
"""Perform a standardized pipline of operations across an iterable."""
value = (str(v) for v in value)
if self.strip:
value = (v.strip() for v in value)
if not self.empty:
value = (v for v in value if v)
return value |
def get_for_control_var_and_eval_expr(comm_type, kwargs):
"""Returns tuple that consists of control variable name and iterable that is result
of evaluated expression of given for loop.
For example:
- given 'for $i in $(echo "foo bar")' it returns (['i'], ['foo', 'bar'])
- given 'for $i, $j in $foo' it returns (['i', 'j'], [('foo', 'bar')])
"""
# let possible exceptions bubble up
control_vars, iter_type, expression = parse_for(comm_type)
eval_expression = evaluate_expression(expression, kwargs)[1]
iterval = []
if len(control_vars) == 2:
if not isinstance(eval_expression, dict):
raise exceptions.YamlSyntaxError('Can\'t expand {t} to two control variables.'.
format(t=type(eval_expression)))
else:
iterval = list(eval_expression.items())
elif isinstance(eval_expression, six.string_types):
if iter_type == 'word_in':
iterval = eval_expression.split()
else:
iterval = eval_expression
else:
iterval = eval_expression
return control_vars, iterval | Returns tuple that consists of control variable name and iterable that is result
of evaluated expression of given for loop.
For example:
- given 'for $i in $(echo "foo bar")' it returns (['i'], ['foo', 'bar'])
- given 'for $i, $j in $foo' it returns (['i', 'j'], [('foo', 'bar')]) | Below is the the instruction that describes the task:
### Input:
Returns tuple that consists of control variable name and iterable that is result
of evaluated expression of given for loop.
For example:
- given 'for $i in $(echo "foo bar")' it returns (['i'], ['foo', 'bar'])
- given 'for $i, $j in $foo' it returns (['i', 'j'], [('foo', 'bar')])
### Response:
def get_for_control_var_and_eval_expr(comm_type, kwargs):
"""Returns tuple that consists of control variable name and iterable that is result
of evaluated expression of given for loop.
For example:
- given 'for $i in $(echo "foo bar")' it returns (['i'], ['foo', 'bar'])
- given 'for $i, $j in $foo' it returns (['i', 'j'], [('foo', 'bar')])
"""
# let possible exceptions bubble up
control_vars, iter_type, expression = parse_for(comm_type)
eval_expression = evaluate_expression(expression, kwargs)[1]
iterval = []
if len(control_vars) == 2:
if not isinstance(eval_expression, dict):
raise exceptions.YamlSyntaxError('Can\'t expand {t} to two control variables.'.
format(t=type(eval_expression)))
else:
iterval = list(eval_expression.items())
elif isinstance(eval_expression, six.string_types):
if iter_type == 'word_in':
iterval = eval_expression.split()
else:
iterval = eval_expression
else:
iterval = eval_expression
return control_vars, iterval |
def build_sourcemap(sources):
"""
Similar to build_headermap(), but builds a dictionary of includes from
the "source" files (i.e. ".c/.cc" files).
"""
sourcemap = {}
for sfile in sources:
inc = find_includes(sfile)
sourcemap[sfile] = set(inc)
return sourcemap | Similar to build_headermap(), but builds a dictionary of includes from
the "source" files (i.e. ".c/.cc" files). | Below is the the instruction that describes the task:
### Input:
Similar to build_headermap(), but builds a dictionary of includes from
the "source" files (i.e. ".c/.cc" files).
### Response:
def build_sourcemap(sources):
"""
Similar to build_headermap(), but builds a dictionary of includes from
the "source" files (i.e. ".c/.cc" files).
"""
sourcemap = {}
for sfile in sources:
inc = find_includes(sfile)
sourcemap[sfile] = set(inc)
return sourcemap |
def makeEndOfPrdvPfuncCond(self):
'''
Construct the end-of-period marginal value function conditional on next
period's state.
Parameters
----------
None
Returns
-------
EndofPrdvPfunc_cond : MargValueFunc
The end-of-period marginal value function conditional on a particular
state occuring in the succeeding period.
'''
# Get data to construct the end-of-period marginal value function (conditional on next state)
self.aNrm_cond = self.prepareToCalcEndOfPrdvP()
self.EndOfPrdvP_cond= self.calcEndOfPrdvPcond()
EndOfPrdvPnvrs_cond = self.uPinv(self.EndOfPrdvP_cond) # "decurved" marginal value
if self.CubicBool:
EndOfPrdvPP_cond = self.calcEndOfPrdvPP()
EndOfPrdvPnvrsP_cond = EndOfPrdvPP_cond*self.uPinvP(self.EndOfPrdvP_cond) # "decurved" marginal marginal value
# Construct the end-of-period marginal value function conditional on the next state.
if self.CubicBool:
EndOfPrdvPnvrsFunc_cond = CubicInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
EndOfPrdvPnvrsP_cond,lower_extrap=True)
else:
EndOfPrdvPnvrsFunc_cond = LinearInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
lower_extrap=True)
EndofPrdvPfunc_cond = MargValueFunc(EndOfPrdvPnvrsFunc_cond,self.CRRA) # "recurve" the interpolated marginal value function
return EndofPrdvPfunc_cond | Construct the end-of-period marginal value function conditional on next
period's state.
Parameters
----------
None
Returns
-------
EndofPrdvPfunc_cond : MargValueFunc
The end-of-period marginal value function conditional on a particular
state occuring in the succeeding period. | Below is the the instruction that describes the task:
### Input:
Construct the end-of-period marginal value function conditional on next
period's state.
Parameters
----------
None
Returns
-------
EndofPrdvPfunc_cond : MargValueFunc
The end-of-period marginal value function conditional on a particular
state occuring in the succeeding period.
### Response:
def makeEndOfPrdvPfuncCond(self):
'''
Construct the end-of-period marginal value function conditional on next
period's state.
Parameters
----------
None
Returns
-------
EndofPrdvPfunc_cond : MargValueFunc
The end-of-period marginal value function conditional on a particular
state occuring in the succeeding period.
'''
# Get data to construct the end-of-period marginal value function (conditional on next state)
self.aNrm_cond = self.prepareToCalcEndOfPrdvP()
self.EndOfPrdvP_cond= self.calcEndOfPrdvPcond()
EndOfPrdvPnvrs_cond = self.uPinv(self.EndOfPrdvP_cond) # "decurved" marginal value
if self.CubicBool:
EndOfPrdvPP_cond = self.calcEndOfPrdvPP()
EndOfPrdvPnvrsP_cond = EndOfPrdvPP_cond*self.uPinvP(self.EndOfPrdvP_cond) # "decurved" marginal marginal value
# Construct the end-of-period marginal value function conditional on the next state.
if self.CubicBool:
EndOfPrdvPnvrsFunc_cond = CubicInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
EndOfPrdvPnvrsP_cond,lower_extrap=True)
else:
EndOfPrdvPnvrsFunc_cond = LinearInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
lower_extrap=True)
EndofPrdvPfunc_cond = MargValueFunc(EndOfPrdvPnvrsFunc_cond,self.CRRA) # "recurve" the interpolated marginal value function
return EndofPrdvPfunc_cond |
def set_disk_timeout(timeout, power='ac', scheme=None):
'''
Set the disk timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the disk will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the disk timeout to 30 minutes on battery
salt '*' powercfg.set_disk_timeout 30 power=dc
'''
return _set_powercfg_value(
scheme=scheme,
sub_group='SUB_DISK',
setting_guid='DISKIDLE',
power=power,
value=timeout) | Set the disk timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the disk will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the disk timeout to 30 minutes on battery
salt '*' powercfg.set_disk_timeout 30 power=dc | Below is the the instruction that describes the task:
### Input:
Set the disk timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the disk will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the disk timeout to 30 minutes on battery
salt '*' powercfg.set_disk_timeout 30 power=dc
### Response:
def set_disk_timeout(timeout, power='ac', scheme=None):
'''
Set the disk timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the disk will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the disk timeout to 30 minutes on battery
salt '*' powercfg.set_disk_timeout 30 power=dc
'''
return _set_powercfg_value(
scheme=scheme,
sub_group='SUB_DISK',
setting_guid='DISKIDLE',
power=power,
value=timeout) |
def _paload8(ins):
''' Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
'''
output = _paddr(ins.quad[2])
output.append('ld a, (hl)')
output.append('push af')
return output | Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value. | Below is the the instruction that describes the task:
### Input:
Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
### Response:
def _paload8(ins):
''' Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
'''
output = _paddr(ins.quad[2])
output.append('ld a, (hl)')
output.append('push af')
return output |
def _check_action(action):
"""check for invalid actions"""
if isinstance(action, types.StringTypes):
action = action.lower()
if action not in ['learn', 'forget', 'report', 'revoke']:
raise SpamCError('The action option is invalid')
return action | check for invalid actions | Below is the the instruction that describes the task:
### Input:
check for invalid actions
### Response:
def _check_action(action):
"""check for invalid actions"""
if isinstance(action, types.StringTypes):
action = action.lower()
if action not in ['learn', 'forget', 'report', 'revoke']:
raise SpamCError('The action option is invalid')
return action |
def tweets_default(*args):
"""
Tweets for the default settings.
"""
query_type = settings.TWITTER_DEFAULT_QUERY_TYPE
args = (settings.TWITTER_DEFAULT_QUERY,
settings.TWITTER_DEFAULT_NUM_TWEETS)
per_user = None
if query_type == QUERY_TYPE_LIST:
per_user = 1
return tweets_for(query_type, args, per_user=per_user) | Tweets for the default settings. | Below is the the instruction that describes the task:
### Input:
Tweets for the default settings.
### Response:
def tweets_default(*args):
"""
Tweets for the default settings.
"""
query_type = settings.TWITTER_DEFAULT_QUERY_TYPE
args = (settings.TWITTER_DEFAULT_QUERY,
settings.TWITTER_DEFAULT_NUM_TWEETS)
per_user = None
if query_type == QUERY_TYPE_LIST:
per_user = 1
return tweets_for(query_type, args, per_user=per_user) |
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
) | write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value. | Below is the the instruction that describes the task:
### Input:
write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value.
### Response:
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
) |
def setCovariance(self,cov):
""" set hyperparameters from given covariance """
chol = LA.cholesky(cov,lower=True)
params = chol[sp.tril_indices(self.dim)]
self.setParams(params) | set hyperparameters from given covariance | Below is the the instruction that describes the task:
### Input:
set hyperparameters from given covariance
### Response:
def setCovariance(self,cov):
""" set hyperparameters from given covariance """
chol = LA.cholesky(cov,lower=True)
params = chol[sp.tril_indices(self.dim)]
self.setParams(params) |
def execute(self):
"""Run selected module generator."""
if self._cli_arguments['cfn']:
generate_sample_cfn_module(self.env_root)
elif self._cli_arguments['sls']:
generate_sample_sls_module(self.env_root)
elif self._cli_arguments['sls-tsc']:
generate_sample_sls_tsc_module(self.env_root)
elif self._cli_arguments['stacker']:
generate_sample_stacker_module(self.env_root)
elif self._cli_arguments['tf']:
generate_sample_tf_module(self.env_root)
elif self._cli_arguments['cdk-tsc']:
generate_sample_cdk_tsc_module(self.env_root)
elif self._cli_arguments['cdk-py']:
generate_sample_cdk_py_module(self.env_root)
elif self._cli_arguments['cdk-csharp']:
generate_sample_cdk_cs_module(self.env_root) | Run selected module generator. | Below is the the instruction that describes the task:
### Input:
Run selected module generator.
### Response:
def execute(self):
"""Run selected module generator."""
if self._cli_arguments['cfn']:
generate_sample_cfn_module(self.env_root)
elif self._cli_arguments['sls']:
generate_sample_sls_module(self.env_root)
elif self._cli_arguments['sls-tsc']:
generate_sample_sls_tsc_module(self.env_root)
elif self._cli_arguments['stacker']:
generate_sample_stacker_module(self.env_root)
elif self._cli_arguments['tf']:
generate_sample_tf_module(self.env_root)
elif self._cli_arguments['cdk-tsc']:
generate_sample_cdk_tsc_module(self.env_root)
elif self._cli_arguments['cdk-py']:
generate_sample_cdk_py_module(self.env_root)
elif self._cli_arguments['cdk-csharp']:
generate_sample_cdk_cs_module(self.env_root) |
def source_range_slices(start, end, nr_var_dict):
"""
Given a range of source numbers, as well as a dictionary
containing the numbers of each source, returns a dictionary
containing slices for each source variable type.
"""
return OrderedDict((k, slice(s,e,1))
for k, (s, e)
in source_range_tuple(start, end, nr_var_dict).iteritems()) | Given a range of source numbers, as well as a dictionary
containing the numbers of each source, returns a dictionary
containing slices for each source variable type. | Below is the the instruction that describes the task:
### Input:
Given a range of source numbers, as well as a dictionary
containing the numbers of each source, returns a dictionary
containing slices for each source variable type.
### Response:
def source_range_slices(start, end, nr_var_dict):
"""
Given a range of source numbers, as well as a dictionary
containing the numbers of each source, returns a dictionary
containing slices for each source variable type.
"""
return OrderedDict((k, slice(s,e,1))
for k, (s, e)
in source_range_tuple(start, end, nr_var_dict).iteritems()) |
def _parse_expr(text, ldelim="(", rdelim=")"):
"""Parse mathematical expression using PyParsing."""
var = pyparsing.Word(pyparsing.alphas + "_", pyparsing.alphanums + "_")
point = pyparsing.Literal(".")
exp = pyparsing.CaselessLiteral("E")
number = pyparsing.Combine(
pyparsing.Word("+-" + pyparsing.nums, pyparsing.nums)
+ pyparsing.Optional(point + pyparsing.Optional(pyparsing.Word(pyparsing.nums)))
+ pyparsing.Optional(
exp + pyparsing.Word("+-" + pyparsing.nums, pyparsing.nums)
)
)
atom = var | number
oplist = [
(pyparsing.Literal("**"), 2, pyparsing.opAssoc.RIGHT),
(pyparsing.oneOf("+ - ~"), 1, pyparsing.opAssoc.RIGHT),
(pyparsing.oneOf("* / // %"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.oneOf("+ -"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.oneOf("<< >>"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.Literal("&"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.Literal("^"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.Literal("|"), 2, pyparsing.opAssoc.LEFT),
]
# Get functions
expr = pyparsing.infixNotation(
atom, oplist, lpar=pyparsing.Suppress(ldelim), rpar=pyparsing.Suppress(rdelim)
)
return expr.parseString(text)[0] | Parse mathematical expression using PyParsing. | Below is the the instruction that describes the task:
### Input:
Parse mathematical expression using PyParsing.
### Response:
def _parse_expr(text, ldelim="(", rdelim=")"):
"""Parse mathematical expression using PyParsing."""
var = pyparsing.Word(pyparsing.alphas + "_", pyparsing.alphanums + "_")
point = pyparsing.Literal(".")
exp = pyparsing.CaselessLiteral("E")
number = pyparsing.Combine(
pyparsing.Word("+-" + pyparsing.nums, pyparsing.nums)
+ pyparsing.Optional(point + pyparsing.Optional(pyparsing.Word(pyparsing.nums)))
+ pyparsing.Optional(
exp + pyparsing.Word("+-" + pyparsing.nums, pyparsing.nums)
)
)
atom = var | number
oplist = [
(pyparsing.Literal("**"), 2, pyparsing.opAssoc.RIGHT),
(pyparsing.oneOf("+ - ~"), 1, pyparsing.opAssoc.RIGHT),
(pyparsing.oneOf("* / // %"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.oneOf("+ -"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.oneOf("<< >>"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.Literal("&"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.Literal("^"), 2, pyparsing.opAssoc.LEFT),
(pyparsing.Literal("|"), 2, pyparsing.opAssoc.LEFT),
]
# Get functions
expr = pyparsing.infixNotation(
atom, oplist, lpar=pyparsing.Suppress(ldelim), rpar=pyparsing.Suppress(rdelim)
)
return expr.parseString(text)[0] |
def apply_units_to_cache(self, data):
"""
Apply units to :class:`ParameterizedXLS` data reader.
"""
# parameter
parameter_name = self.parameters['parameter']['name']
parameter_units = str(self.parameters['parameter']['units'])
data[parameter_name] *= UREG(parameter_units)
# data
self.parameters.pop('parameter')
return super(ParameterizedXLS, self).apply_units_to_cache(data) | Apply units to :class:`ParameterizedXLS` data reader. | Below is the the instruction that describes the task:
### Input:
Apply units to :class:`ParameterizedXLS` data reader.
### Response:
def apply_units_to_cache(self, data):
"""
Apply units to :class:`ParameterizedXLS` data reader.
"""
# parameter
parameter_name = self.parameters['parameter']['name']
parameter_units = str(self.parameters['parameter']['units'])
data[parameter_name] *= UREG(parameter_units)
# data
self.parameters.pop('parameter')
return super(ParameterizedXLS, self).apply_units_to_cache(data) |
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self) | Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource | Below is the the instruction that describes the task:
### Input:
Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
### Response:
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self) |
def upsert(cls, name, **fields):
"""Insert or update an instance"""
instance = cls.get(name)
if instance:
instance._set_fields(fields)
else:
instance = cls(name=name, **fields)
instance = failsafe_add(cls.query.session, instance, name=name)
return instance | Insert or update an instance | Below is the the instruction that describes the task:
### Input:
Insert or update an instance
### Response:
def upsert(cls, name, **fields):
"""Insert or update an instance"""
instance = cls.get(name)
if instance:
instance._set_fields(fields)
else:
instance = cls(name=name, **fields)
instance = failsafe_add(cls.query.session, instance, name=name)
return instance |
def initialize(self, request):
"""Store the data we'll need to make the postback from the request object."""
if request.method == 'GET':
# PDT only - this data is currently unused
self.query = request.META.get('QUERY_STRING', '')
elif request.method == 'POST':
# The following works if paypal sends an ASCII bytestring, which it does.
self.query = request.body.decode('ascii')
self.ipaddress = request.META.get('REMOTE_ADDR', '') | Store the data we'll need to make the postback from the request object. | Below is the the instruction that describes the task:
### Input:
Store the data we'll need to make the postback from the request object.
### Response:
def initialize(self, request):
"""Store the data we'll need to make the postback from the request object."""
if request.method == 'GET':
# PDT only - this data is currently unused
self.query = request.META.get('QUERY_STRING', '')
elif request.method == 'POST':
# The following works if paypal sends an ASCII bytestring, which it does.
self.query = request.body.decode('ascii')
self.ipaddress = request.META.get('REMOTE_ADDR', '') |
def calculate_checksum_on_bytes(
b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
"""Calculate the checksum of ``bytes``.
Warning: This method requires the entire object to be buffered in (virtual) memory,
which should normally be avoided in production code.
Args:
b: bytes
Raw bytes
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
"""
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
checksum_calc.update(b)
return checksum_calc.hexdigest() | Calculate the checksum of ``bytes``.
Warning: This method requires the entire object to be buffered in (virtual) memory,
which should normally be avoided in production code.
Args:
b: bytes
Raw bytes
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm. | Below is the the instruction that describes the task:
### Input:
Calculate the checksum of ``bytes``.
Warning: This method requires the entire object to be buffered in (virtual) memory,
which should normally be avoided in production code.
Args:
b: bytes
Raw bytes
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
### Response:
def calculate_checksum_on_bytes(
b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
"""Calculate the checksum of ``bytes``.
Warning: This method requires the entire object to be buffered in (virtual) memory,
which should normally be avoided in production code.
Args:
b: bytes
Raw bytes
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
"""
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
checksum_calc.update(b)
return checksum_calc.hexdigest() |
def stash(self, path):
"""Stashes the directory or file and returns its new location.
"""
if os.path.isdir(path):
new_path = self._get_directory_stash(path)
else:
new_path = self._get_file_stash(path)
self._moves.append((path, new_path))
if os.path.isdir(path) and os.path.isdir(new_path):
# If we're moving a directory, we need to
# remove the destination first or else it will be
# moved to inside the existing directory.
# We just created new_path ourselves, so it will
# be removable.
os.rmdir(new_path)
renames(path, new_path)
return new_path | Stashes the directory or file and returns its new location. | Below is the the instruction that describes the task:
### Input:
Stashes the directory or file and returns its new location.
### Response:
def stash(self, path):
"""Stashes the directory or file and returns its new location.
"""
if os.path.isdir(path):
new_path = self._get_directory_stash(path)
else:
new_path = self._get_file_stash(path)
self._moves.append((path, new_path))
if os.path.isdir(path) and os.path.isdir(new_path):
# If we're moving a directory, we need to
# remove the destination first or else it will be
# moved to inside the existing directory.
# We just created new_path ourselves, so it will
# be removable.
os.rmdir(new_path)
renames(path, new_path)
return new_path |
def getOperationNameForId(i: int):
""" Convert an operation id into the corresponding string
"""
assert isinstance(i, (int)), "This method expects an integer argument"
for key in operations:
if int(operations[key]) is int(i):
return key
raise ValueError("Unknown Operation ID %d" % i) | Convert an operation id into the corresponding string | Below is the the instruction that describes the task:
### Input:
Convert an operation id into the corresponding string
### Response:
def getOperationNameForId(i: int):
""" Convert an operation id into the corresponding string
"""
assert isinstance(i, (int)), "This method expects an integer argument"
for key in operations:
if int(operations[key]) is int(i):
return key
raise ValueError("Unknown Operation ID %d" % i) |
def unpack_to_nibbles(bindata):
"""unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator
"""
o = bin_to_nibbles(bindata)
flags = o[0]
if flags & 2:
o.append(NIBBLE_TERMINATOR)
if flags & 1 == 1:
o = o[1:]
else:
o = o[2:]
return o | unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator | Below is the the instruction that describes the task:
### Input:
unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator
### Response:
def unpack_to_nibbles(bindata):
"""unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator
"""
o = bin_to_nibbles(bindata)
flags = o[0]
if flags & 2:
o.append(NIBBLE_TERMINATOR)
if flags & 1 == 1:
o = o[1:]
else:
o = o[2:]
return o |
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request | Creates a deep copy of this request. | Below is the the instruction that describes the task:
### Input:
Creates a deep copy of this request.
### Response:
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request |
def Draw(self, *args, **kwargs):
"""
Parameters
----------
args : positional arguments
Positional arguments are passed directly to ROOT's Draw
kwargs : keyword arguments
If keyword arguments are present, then a clone is drawn instead
with DrawCopy, where the name, title, and style attributes are
taken from ``kwargs``.
Returns
-------
If ``kwargs`` is not empty and a clone is drawn, then the clone is
returned, otherwise None is returned.
"""
if kwargs:
return self.DrawCopy(*args, **kwargs)
pad = ROOT.gPad
own_pad = False
if not pad:
# avoid circular import by delaying import until needed here
from .canvas import Canvas
pad = Canvas()
own_pad = True
if self.visible:
if self.drawstyle:
self.__class__.__bases__[-1].Draw(self,
" ".join((self.drawstyle, ) + args))
else:
self.__class__.__bases__[-1].Draw(self, " ".join(args))
pad.Modified()
pad.Update()
if own_pad:
keepalive(self, pad) | Parameters
----------
args : positional arguments
Positional arguments are passed directly to ROOT's Draw
kwargs : keyword arguments
If keyword arguments are present, then a clone is drawn instead
with DrawCopy, where the name, title, and style attributes are
taken from ``kwargs``.
Returns
-------
If ``kwargs`` is not empty and a clone is drawn, then the clone is
returned, otherwise None is returned. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
args : positional arguments
Positional arguments are passed directly to ROOT's Draw
kwargs : keyword arguments
If keyword arguments are present, then a clone is drawn instead
with DrawCopy, where the name, title, and style attributes are
taken from ``kwargs``.
Returns
-------
If ``kwargs`` is not empty and a clone is drawn, then the clone is
returned, otherwise None is returned.
### Response:
def Draw(self, *args, **kwargs):
"""
Parameters
----------
args : positional arguments
Positional arguments are passed directly to ROOT's Draw
kwargs : keyword arguments
If keyword arguments are present, then a clone is drawn instead
with DrawCopy, where the name, title, and style attributes are
taken from ``kwargs``.
Returns
-------
If ``kwargs`` is not empty and a clone is drawn, then the clone is
returned, otherwise None is returned.
"""
if kwargs:
return self.DrawCopy(*args, **kwargs)
pad = ROOT.gPad
own_pad = False
if not pad:
# avoid circular import by delaying import until needed here
from .canvas import Canvas
pad = Canvas()
own_pad = True
if self.visible:
if self.drawstyle:
self.__class__.__bases__[-1].Draw(self,
" ".join((self.drawstyle, ) + args))
else:
self.__class__.__bases__[-1].Draw(self, " ".join(args))
pad.Modified()
pad.Update()
if own_pad:
keepalive(self, pad) |
def load_characters(self):
"""Fetches the MAL media characters page and sets the current media's character attributes.
:rtype: :class:`.Media`
:return: current media object.
"""
characters_page = self.session.session.get(u'http://myanimelist.net/' + self.__class__.__name__.lower() + u'/' + str(self.id) + u'/' + utilities.urlencode(self.title) + u'/characters').text
self.set(self.parse_characters(utilities.get_clean_dom(characters_page)))
return self | Fetches the MAL media characters page and sets the current media's character attributes.
:rtype: :class:`.Media`
:return: current media object. | Below is the the instruction that describes the task:
### Input:
Fetches the MAL media characters page and sets the current media's character attributes.
:rtype: :class:`.Media`
:return: current media object.
### Response:
def load_characters(self):
"""Fetches the MAL media characters page and sets the current media's character attributes.
:rtype: :class:`.Media`
:return: current media object.
"""
characters_page = self.session.session.get(u'http://myanimelist.net/' + self.__class__.__name__.lower() + u'/' + str(self.id) + u'/' + utilities.urlencode(self.title) + u'/characters').text
self.set(self.parse_characters(utilities.get_clean_dom(characters_page)))
return self |
def write(self, text, flush=False, error=False, prompt=False):
"""Simulate stdout and stderr"""
if prompt:
self.flush()
if not is_string(text):
# This test is useful to discriminate QStrings from decoded str
text = to_text_string(text)
self.__buffer.append(text)
ts = time.time()
if flush or prompt:
self.flush(error=error, prompt=prompt)
elif ts - self.__timestamp > 0.05:
self.flush(error=error)
self.__timestamp = ts
# Timer to flush strings cached by last write() operation in series
self.__flushtimer.start(50) | Simulate stdout and stderr | Below is the the instruction that describes the task:
### Input:
Simulate stdout and stderr
### Response:
def write(self, text, flush=False, error=False, prompt=False):
"""Simulate stdout and stderr"""
if prompt:
self.flush()
if not is_string(text):
# This test is useful to discriminate QStrings from decoded str
text = to_text_string(text)
self.__buffer.append(text)
ts = time.time()
if flush or prompt:
self.flush(error=error, prompt=prompt)
elif ts - self.__timestamp > 0.05:
self.flush(error=error)
self.__timestamp = ts
# Timer to flush strings cached by last write() operation in series
self.__flushtimer.start(50) |
def up(tgt='*', tgt_type='glob', timeout=None, gather_job_timeout=None): # pylint: disable=C0103
'''
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Print a list of all of the minions that are up
CLI Example:
.. code-block:: bash
salt-run manage.up
salt-run manage.up tgt="webservers" tgt_type="nodegroup"
salt-run manage.up timeout=5 gather_job_timeout=10
'''
ret = status(
output=False,
tgt=tgt,
tgt_type=tgt_type,
timeout=timeout,
gather_job_timeout=gather_job_timeout
).get('up', [])
return ret | .. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Print a list of all of the minions that are up
CLI Example:
.. code-block:: bash
salt-run manage.up
salt-run manage.up tgt="webservers" tgt_type="nodegroup"
salt-run manage.up timeout=5 gather_job_timeout=10 | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Print a list of all of the minions that are up
CLI Example:
.. code-block:: bash
salt-run manage.up
salt-run manage.up tgt="webservers" tgt_type="nodegroup"
salt-run manage.up timeout=5 gather_job_timeout=10
### Response:
def up(tgt='*', tgt_type='glob', timeout=None, gather_job_timeout=None): # pylint: disable=C0103
'''
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Print a list of all of the minions that are up
CLI Example:
.. code-block:: bash
salt-run manage.up
salt-run manage.up tgt="webservers" tgt_type="nodegroup"
salt-run manage.up timeout=5 gather_job_timeout=10
'''
ret = status(
output=False,
tgt=tgt,
tgt_type=tgt_type,
timeout=timeout,
gather_job_timeout=gather_job_timeout
).get('up', [])
return ret |
def download_icon_font(icon_font, directory):
"""Download given (implemented) icon font into passed directory"""
try:
downloader = AVAILABLE_ICON_FONTS[icon_font]['downloader'](directory)
downloader.download_files()
return downloader
except KeyError: # pragma: no cover
raise Exception("We don't support downloading font '{name}'".format(
name=icon_font)
) | Download given (implemented) icon font into passed directory | Below is the the instruction that describes the task:
### Input:
Download given (implemented) icon font into passed directory
### Response:
def download_icon_font(icon_font, directory):
"""Download given (implemented) icon font into passed directory"""
try:
downloader = AVAILABLE_ICON_FONTS[icon_font]['downloader'](directory)
downloader.download_files()
return downloader
except KeyError: # pragma: no cover
raise Exception("We don't support downloading font '{name}'".format(
name=icon_font)
) |
def current_kv_names(self):
"""Return set of string names of current available Splunk KV collections"""
return current_kv_names(self.sci, self.username, self.appname, request=self._request) | Return set of string names of current available Splunk KV collections | Below is the the instruction that describes the task:
### Input:
Return set of string names of current available Splunk KV collections
### Response:
def current_kv_names(self):
"""Return set of string names of current available Splunk KV collections"""
return current_kv_names(self.sci, self.username, self.appname, request=self._request) |
def personal_unlockAccount(self, address, passphrase=None, duration=None):
"""https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_unlockaccount
:param address: Account address
:type address: str
:param passphrase: Passphrase of account (optional)
:type passphrase: str
:param duration: Duration to be unlocked (optional)
:type duration: int
:rtype: bool
"""
return (yield from self.rpc_call('personal_unlockAccount',
[address, passphrase, duration])) | https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_unlockaccount
:param address: Account address
:type address: str
:param passphrase: Passphrase of account (optional)
:type passphrase: str
:param duration: Duration to be unlocked (optional)
:type duration: int
:rtype: bool | Below is the the instruction that describes the task:
### Input:
https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_unlockaccount
:param address: Account address
:type address: str
:param passphrase: Passphrase of account (optional)
:type passphrase: str
:param duration: Duration to be unlocked (optional)
:type duration: int
:rtype: bool
### Response:
def personal_unlockAccount(self, address, passphrase=None, duration=None):
"""https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_unlockaccount
:param address: Account address
:type address: str
:param passphrase: Passphrase of account (optional)
:type passphrase: str
:param duration: Duration to be unlocked (optional)
:type duration: int
:rtype: bool
"""
return (yield from self.rpc_call('personal_unlockAccount',
[address, passphrase, duration])) |
def sort_by_name(names):
"""Sort by last name, uniquely."""
def last_name_key(full_name):
parts = full_name.split(' ')
if len(parts) == 1:
return full_name.upper()
last_first = parts[-1] + ' ' + ' '.join(parts[:-1])
return last_first.upper()
return sorted(set(names), key=last_name_key) | Sort by last name, uniquely. | Below is the the instruction that describes the task:
### Input:
Sort by last name, uniquely.
### Response:
def sort_by_name(names):
"""Sort by last name, uniquely."""
def last_name_key(full_name):
parts = full_name.split(' ')
if len(parts) == 1:
return full_name.upper()
last_first = parts[-1] + ' ' + ' '.join(parts[:-1])
return last_first.upper()
return sorted(set(names), key=last_name_key) |
def viewport(self) -> Tuple[int, int, int, int]:
'''
tuple: The viewport of the framebuffer.
'''
return self.mglo.viewport | tuple: The viewport of the framebuffer. | Below is the the instruction that describes the task:
### Input:
tuple: The viewport of the framebuffer.
### Response:
def viewport(self) -> Tuple[int, int, int, int]:
'''
tuple: The viewport of the framebuffer.
'''
return self.mglo.viewport |
def map(self, callable):
""" Apply 'callable' function over all values. """
for k,v in self.iteritems():
self[k] = callable(v) | Apply 'callable' function over all values. | Below is the the instruction that describes the task:
### Input:
Apply 'callable' function over all values.
### Response:
def map(self, callable):
""" Apply 'callable' function over all values. """
for k,v in self.iteritems():
self[k] = callable(v) |
def get_correctness(self, question_id):
"""get measure of correctness for the question"""
response = self.get_response(question_id)
if response.is_answered():
item = self._get_item(response.get_item_id())
return item.get_correctness_for_response(response)
raise errors.IllegalState() | get measure of correctness for the question | Below is the the instruction that describes the task:
### Input:
get measure of correctness for the question
### Response:
def get_correctness(self, question_id):
"""get measure of correctness for the question"""
response = self.get_response(question_id)
if response.is_answered():
item = self._get_item(response.get_item_id())
return item.get_correctness_for_response(response)
raise errors.IllegalState() |
def _exists(self, path):
"""S3 directory is not S3Ojbect.
"""
if path.endswith('/'):
return True
return self.storage.exists(path) | S3 directory is not S3Ojbect. | Below is the the instruction that describes the task:
### Input:
S3 directory is not S3Ojbect.
### Response:
def _exists(self, path):
"""S3 directory is not S3Ojbect.
"""
if path.endswith('/'):
return True
return self.storage.exists(path) |
def repositories(self):
"""Get dependencies by repositories
"""
if self.repo == "sbo":
self.sbo_case_insensitive()
self.find_pkg = sbo_search_pkg(self.name)
if self.find_pkg:
self.dependencies_list = Requires(self.flag).sbo(self.name)
else:
PACKAGES_TXT = Utils().read_file(
self.meta.lib_path + "{0}_repo/PACKAGES.TXT".format(self.repo))
self.names = Utils().package_name(PACKAGES_TXT)
self.bin_case_insensitive()
self.find_pkg = search_pkg(self.name, self.repo)
if self.find_pkg:
self.black = BlackList().packages(self.names, self.repo)
self.dependencies_list = Dependencies(
self.repo, self.black).binary(self.name, self.flag) | Get dependencies by repositories | Below is the the instruction that describes the task:
### Input:
Get dependencies by repositories
### Response:
def repositories(self):
"""Get dependencies by repositories
"""
if self.repo == "sbo":
self.sbo_case_insensitive()
self.find_pkg = sbo_search_pkg(self.name)
if self.find_pkg:
self.dependencies_list = Requires(self.flag).sbo(self.name)
else:
PACKAGES_TXT = Utils().read_file(
self.meta.lib_path + "{0}_repo/PACKAGES.TXT".format(self.repo))
self.names = Utils().package_name(PACKAGES_TXT)
self.bin_case_insensitive()
self.find_pkg = search_pkg(self.name, self.repo)
if self.find_pkg:
self.black = BlackList().packages(self.names, self.repo)
self.dependencies_list = Dependencies(
self.repo, self.black).binary(self.name, self.flag) |
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close() | Stop the server. | Below is the the instruction that describes the task:
### Input:
Stop the server.
### Response:
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close() |
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out | Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter. | Below is the the instruction that describes the task:
### Input:
Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
### Response:
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out |
def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None):
"""Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
mask_items = [
'id',
'billingItem',
'storageType[keyName]',
'capacityGb',
'provisionedIops',
'storageTierLevel',
'staasVersion',
'hasEncryptionAtRest',
]
block_mask = ','.join(mask_items)
volume = self.get_block_volume_details(volume_id, mask=block_mask)
order = storage_utils.prepare_modify_order_object(
self, volume, new_iops, new_tier_level, new_size
)
return self.client.call('Product_Order', 'placeOrder', order) | Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt | Below is the the instruction that describes the task:
### Input:
Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
### Response:
def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None):
"""Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
mask_items = [
'id',
'billingItem',
'storageType[keyName]',
'capacityGb',
'provisionedIops',
'storageTierLevel',
'staasVersion',
'hasEncryptionAtRest',
]
block_mask = ','.join(mask_items)
volume = self.get_block_volume_details(volume_id, mask=block_mask)
order = storage_utils.prepare_modify_order_object(
self, volume, new_iops, new_tier_level, new_size
)
return self.client.call('Product_Order', 'placeOrder', order) |
def export_throw_event_info(node_params, output_element):
"""
Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element
:param node_params: dictionary with given intermediate throw event parameters,
:param output_element: object representing BPMN XML 'intermediateThrowEvent' element.
"""
definitions = node_params[consts.Consts.event_definitions]
for definition in definitions:
definition_id = definition[consts.Consts.id]
definition_type = definition[consts.Consts.definition_type]
output_definition = eTree.SubElement(output_element, definition_type)
if definition_id != "":
output_definition.set(consts.Consts.id, definition_id) | Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element
:param node_params: dictionary with given intermediate throw event parameters,
:param output_element: object representing BPMN XML 'intermediateThrowEvent' element. | Below is the the instruction that describes the task:
### Input:
Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element
:param node_params: dictionary with given intermediate throw event parameters,
:param output_element: object representing BPMN XML 'intermediateThrowEvent' element.
### Response:
def export_throw_event_info(node_params, output_element):
"""
Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element
:param node_params: dictionary with given intermediate throw event parameters,
:param output_element: object representing BPMN XML 'intermediateThrowEvent' element.
"""
definitions = node_params[consts.Consts.event_definitions]
for definition in definitions:
definition_id = definition[consts.Consts.id]
definition_type = definition[consts.Consts.definition_type]
output_definition = eTree.SubElement(output_element, definition_type)
if definition_id != "":
output_definition.set(consts.Consts.id, definition_id) |
def estimate_shift(x, y, smoother=None, w=None, index_and_value=False, ignore_edge=1/3.,
method='valid'):
"""Estimate the time shift between two signals based on their cross correlation
Arguements:
smoother: Smoothing function applied to correlation values before finding peak
w: Window. Sequence of values between 0 and 1 for wind centered on 0-shift
to weight correlation by before finding peak. Zero-padded to match width of
larger of x and y. Default = hanning(max(len(x, y)))
Returns:
int: number to subtract from an x index to compute a corresponding y index
>>> x, y = np.asarray(np.matrix([[0.5, 0.01], [0.01, 1.0]]) * np.random.randn(50,2).T)
>>> x[:30-8] = y[8:30]
>> estimate_shift(x, y, 'full')
-8
>> estimate_shift(x, y, 'valid')
-8
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
"""
return NotImplementedError("On Line 965, FIXME: TypeError: object of type 'NoneType' has no len()")
method = method or 'valid'
try:
x = x.dropna()
x = x.values
except:
pass
try:
y = y.dropna()
y = y.values
except:
pass
if len(x) < len(y):
swap, x, y = -1, y, x
else:
swap = +1
Nx, Ny = len(x), len(y)
if ignore_edge > 0:
yi0 = int(max(Ny * ignore_edge, 1))
yi1 = max(Ny - yi0 - 1, 0)
# ignore a large portion of the data in the shorter vector
y = y[yi0:yi1]
x, y = x - x.mean(), y - y.mean()
x, y = x / x.std(), y / y.std()
c = np.correlate(x, y, mode=method)
print(len(x))
print(len(y))
print(len(w))
print(len(c))
if w is not None:
wc = int(np.ceil(len(w) / 2.)) - 1
cc = int(np.ceil(len(c) / 2.)) - 1
w0 = cc - wc
print(w0)
if w0 > 0:
c[:w0], c[-w0:] = 0, 0
c[w0:-w0] = w[:len(c[w0:-w0])] * c[w0:-w0]
elif w0 == 0:
if len(w) < len(c):
w = np.append(w, 0)
c = c * w[:len(c)]
elif w0 < 0:
w0 = abs(w0)
w = w[w0:-w0]
c[w0:-w0] = w[:len(c[w0:-w0])] * c[w0:-w0]
try:
c = smoother(c)
except:
pass
offset = imax = c.argmax()
offset = offset - yi0
if method == 'full':
offset = imax - Nx + 1
# elif method == 'valid':
# offset = imax - yi0
elif method == 'same':
raise NotImplementedError("Unsure what index value to report for a correlation maximum at i = {}"
.format(imax))
offset *= swap
if index_and_value:
return offset, c[imax]
else:
return offset | Estimate the time shift between two signals based on their cross correlation
Arguements:
smoother: Smoothing function applied to correlation values before finding peak
w: Window. Sequence of values between 0 and 1 for wind centered on 0-shift
to weight correlation by before finding peak. Zero-padded to match width of
larger of x and y. Default = hanning(max(len(x, y)))
Returns:
int: number to subtract from an x index to compute a corresponding y index
>>> x, y = np.asarray(np.matrix([[0.5, 0.01], [0.01, 1.0]]) * np.random.randn(50,2).T)
>>> x[:30-8] = y[8:30]
>> estimate_shift(x, y, 'full')
-8
>> estimate_shift(x, y, 'valid')
-8
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True | Below is the the instruction that describes the task:
### Input:
Estimate the time shift between two signals based on their cross correlation
Arguements:
smoother: Smoothing function applied to correlation values before finding peak
w: Window. Sequence of values between 0 and 1 for wind centered on 0-shift
to weight correlation by before finding peak. Zero-padded to match width of
larger of x and y. Default = hanning(max(len(x, y)))
Returns:
int: number to subtract from an x index to compute a corresponding y index
>>> x, y = np.asarray(np.matrix([[0.5, 0.01], [0.01, 1.0]]) * np.random.randn(50,2).T)
>>> x[:30-8] = y[8:30]
>> estimate_shift(x, y, 'full')
-8
>> estimate_shift(x, y, 'valid')
-8
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
### Response:
def estimate_shift(x, y, smoother=None, w=None, index_and_value=False, ignore_edge=1/3.,
method='valid'):
"""Estimate the time shift between two signals based on their cross correlation
Arguements:
smoother: Smoothing function applied to correlation values before finding peak
w: Window. Sequence of values between 0 and 1 for wind centered on 0-shift
to weight correlation by before finding peak. Zero-padded to match width of
larger of x and y. Default = hanning(max(len(x, y)))
Returns:
int: number to subtract from an x index to compute a corresponding y index
>>> x, y = np.asarray(np.matrix([[0.5, 0.01], [0.01, 1.0]]) * np.random.randn(50,2).T)
>>> x[:30-8] = y[8:30]
>> estimate_shift(x, y, 'full')
-8
>> estimate_shift(x, y, 'valid')
-8
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
>> estimate_shift(y, x, 'full') in [8, 9]
True
"""
return NotImplementedError("On Line 965, FIXME: TypeError: object of type 'NoneType' has no len()")
method = method or 'valid'
try:
x = x.dropna()
x = x.values
except:
pass
try:
y = y.dropna()
y = y.values
except:
pass
if len(x) < len(y):
swap, x, y = -1, y, x
else:
swap = +1
Nx, Ny = len(x), len(y)
if ignore_edge > 0:
yi0 = int(max(Ny * ignore_edge, 1))
yi1 = max(Ny - yi0 - 1, 0)
# ignore a large portion of the data in the shorter vector
y = y[yi0:yi1]
x, y = x - x.mean(), y - y.mean()
x, y = x / x.std(), y / y.std()
c = np.correlate(x, y, mode=method)
print(len(x))
print(len(y))
print(len(w))
print(len(c))
if w is not None:
wc = int(np.ceil(len(w) / 2.)) - 1
cc = int(np.ceil(len(c) / 2.)) - 1
w0 = cc - wc
print(w0)
if w0 > 0:
c[:w0], c[-w0:] = 0, 0
c[w0:-w0] = w[:len(c[w0:-w0])] * c[w0:-w0]
elif w0 == 0:
if len(w) < len(c):
w = np.append(w, 0)
c = c * w[:len(c)]
elif w0 < 0:
w0 = abs(w0)
w = w[w0:-w0]
c[w0:-w0] = w[:len(c[w0:-w0])] * c[w0:-w0]
try:
c = smoother(c)
except:
pass
offset = imax = c.argmax()
offset = offset - yi0
if method == 'full':
offset = imax - Nx + 1
# elif method == 'valid':
# offset = imax - yi0
elif method == 'same':
raise NotImplementedError("Unsure what index value to report for a correlation maximum at i = {}"
.format(imax))
offset *= swap
if index_and_value:
return offset, c[imax]
else:
return offset |
def sample(self, n_samples):
"""Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)`
Args:
n_samples: `int`, amount of samples to create.
Returns:
np.ndarray: Array of length `n_samples` with generated data from the model.
"""
if self.tau > 1 or self.tau < -1:
raise ValueError("The range for correlation measure is [-1,1].")
v = np.random.uniform(0, 1, n_samples)
c = np.random.uniform(0, 1, n_samples)
u = self.percent_point(c, v)
return np.column_stack((u, v)) | Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)`
Args:
n_samples: `int`, amount of samples to create.
Returns:
np.ndarray: Array of length `n_samples` with generated data from the model. | Below is the the instruction that describes the task:
### Input:
Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)`
Args:
n_samples: `int`, amount of samples to create.
Returns:
np.ndarray: Array of length `n_samples` with generated data from the model.
### Response:
def sample(self, n_samples):
"""Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)`
Args:
n_samples: `int`, amount of samples to create.
Returns:
np.ndarray: Array of length `n_samples` with generated data from the model.
"""
if self.tau > 1 or self.tau < -1:
raise ValueError("The range for correlation measure is [-1,1].")
v = np.random.uniform(0, 1, n_samples)
c = np.random.uniform(0, 1, n_samples)
u = self.percent_point(c, v)
return np.column_stack((u, v)) |
def createStatus(self,
project_id, branch, sha, state, target_url=None,
description=None, context=None):
"""
:param project_id: Project ID from GitLab
:param branch: Branch name to create the status for.
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'failed'
or 'cancelled'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Context of the result
:return: A deferred with the result from GitLab.
"""
payload = {'state': state, 'ref': branch}
if description is not None:
payload['description'] = description
if target_url is not None:
payload['target_url'] = target_url
if context is not None:
payload['name'] = context
return self._http.post('/api/v4/projects/%d/statuses/%s' % (
project_id, sha),
json=payload) | :param project_id: Project ID from GitLab
:param branch: Branch name to create the status for.
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'failed'
or 'cancelled'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Context of the result
:return: A deferred with the result from GitLab. | Below is the the instruction that describes the task:
### Input:
:param project_id: Project ID from GitLab
:param branch: Branch name to create the status for.
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'failed'
or 'cancelled'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Context of the result
:return: A deferred with the result from GitLab.
### Response:
def createStatus(self,
project_id, branch, sha, state, target_url=None,
description=None, context=None):
"""
:param project_id: Project ID from GitLab
:param branch: Branch name to create the status for.
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'failed'
or 'cancelled'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Context of the result
:return: A deferred with the result from GitLab.
"""
payload = {'state': state, 'ref': branch}
if description is not None:
payload['description'] = description
if target_url is not None:
payload['target_url'] = target_url
if context is not None:
payload['name'] = context
return self._http.post('/api/v4/projects/%d/statuses/%s' % (
project_id, sha),
json=payload) |
def nonlocal_packages_path(self):
"""Returns package search paths with local path removed."""
paths = self.packages_path[:]
if self.local_packages_path in paths:
paths.remove(self.local_packages_path)
return paths | Returns package search paths with local path removed. | Below is the the instruction that describes the task:
### Input:
Returns package search paths with local path removed.
### Response:
def nonlocal_packages_path(self):
"""Returns package search paths with local path removed."""
paths = self.packages_path[:]
if self.local_packages_path in paths:
paths.remove(self.local_packages_path)
return paths |
def to_currency(self, val, currency='EUR', cents=True, separator=',',
adjective=False):
"""
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
"""
left, right, is_negative = parse_currency_parts(val)
try:
cr1, cr2 = self.CURRENCY_FORMS[currency]
except KeyError:
raise NotImplementedError(
'Currency code "%s" not implemented for "%s"' %
(currency, self.__class__.__name__))
if adjective and currency in self.CURRENCY_ADJECTIVES:
cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1)
minus_str = "%s " % self.negword if is_negative else ""
cents_str = self._cents_verbose(right, currency) \
if cents else self._cents_terse(right, currency)
return u'%s%s %s%s %s %s' % (
minus_str,
self.to_cardinal(left),
self.pluralize(left, cr1),
separator,
cents_str,
self.pluralize(right, cr2)
) | Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string | Below is the the instruction that describes the task:
### Input:
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
### Response:
def to_currency(self, val, currency='EUR', cents=True, separator=',',
adjective=False):
"""
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
"""
left, right, is_negative = parse_currency_parts(val)
try:
cr1, cr2 = self.CURRENCY_FORMS[currency]
except KeyError:
raise NotImplementedError(
'Currency code "%s" not implemented for "%s"' %
(currency, self.__class__.__name__))
if adjective and currency in self.CURRENCY_ADJECTIVES:
cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1)
minus_str = "%s " % self.negword if is_negative else ""
cents_str = self._cents_verbose(right, currency) \
if cents else self._cents_terse(right, currency)
return u'%s%s %s%s %s %s' % (
minus_str,
self.to_cardinal(left),
self.pluralize(left, cr1),
separator,
cents_str,
self.pluralize(right, cr2)
) |
def decode_body(body, content_type):
"""Decode event body"""
if isinstance(body, dict):
return body
else:
try:
decoded_body = base64.b64decode(body)
except:
return body
if content_type == 'application/json':
try:
return json.loads(decoded_body)
except:
pass
return decoded_body | Decode event body | Below is the the instruction that describes the task:
### Input:
Decode event body
### Response:
def decode_body(body, content_type):
"""Decode event body"""
if isinstance(body, dict):
return body
else:
try:
decoded_body = base64.b64decode(body)
except:
return body
if content_type == 'application/json':
try:
return json.loads(decoded_body)
except:
pass
return decoded_body |
def named(self, name):
'''Returns .get_by('name', name)'''
name = self.serialize(name)
return self.get_by('name', name) | Returns .get_by('name', name) | Below is the the instruction that describes the task:
### Input:
Returns .get_by('name', name)
### Response:
def named(self, name):
'''Returns .get_by('name', name)'''
name = self.serialize(name)
return self.get_by('name', name) |
def cache_py2_modules():
"""
Currently this function is unneeded, as we are not attempting to provide import hooks
for modules with ambiguous names: email, urllib, pickle.
"""
if len(sys.py2_modules) != 0:
return
assert not detect_hooks()
import urllib
sys.py2_modules['urllib'] = urllib
import email
sys.py2_modules['email'] = email
import pickle
sys.py2_modules['pickle'] = pickle | Currently this function is unneeded, as we are not attempting to provide import hooks
for modules with ambiguous names: email, urllib, pickle. | Below is the the instruction that describes the task:
### Input:
Currently this function is unneeded, as we are not attempting to provide import hooks
for modules with ambiguous names: email, urllib, pickle.
### Response:
def cache_py2_modules():
"""
Currently this function is unneeded, as we are not attempting to provide import hooks
for modules with ambiguous names: email, urllib, pickle.
"""
if len(sys.py2_modules) != 0:
return
assert not detect_hooks()
import urllib
sys.py2_modules['urllib'] = urllib
import email
sys.py2_modules['email'] = email
import pickle
sys.py2_modules['pickle'] = pickle |
def delete_plate(self, plate_id, delete_meta_data=False):
"""
Delete a plate from the database
:param plate_id: The plate id
:param delete_meta_data: Optionally delete all meta data associated with this plate as well
:return: None
"""
if plate_id not in self.plates:
logging.info("Plate {} not found for deletion".format(plate_id))
return
plate = self.plates[plate_id]
if delete_meta_data:
for pv in plate.values:
identifier = ".".join(map(lambda x: "_".join(x), pv))
self.meta_data_manager.delete(identifier=identifier)
with switch_db(PlateDefinitionModel, "hyperstream"):
try:
p = PlateDefinitionModel.objects.get(plate_id=plate_id)
p.delete()
del self.plates[plate_id]
except DoesNotExist as e:
logging.warn(e)
logging.info("Plate {} deleted".format(plate_id)) | Delete a plate from the database
:param plate_id: The plate id
:param delete_meta_data: Optionally delete all meta data associated with this plate as well
:return: None | Below is the the instruction that describes the task:
### Input:
Delete a plate from the database
:param plate_id: The plate id
:param delete_meta_data: Optionally delete all meta data associated with this plate as well
:return: None
### Response:
def delete_plate(self, plate_id, delete_meta_data=False):
"""
Delete a plate from the database
:param plate_id: The plate id
:param delete_meta_data: Optionally delete all meta data associated with this plate as well
:return: None
"""
if plate_id not in self.plates:
logging.info("Plate {} not found for deletion".format(plate_id))
return
plate = self.plates[plate_id]
if delete_meta_data:
for pv in plate.values:
identifier = ".".join(map(lambda x: "_".join(x), pv))
self.meta_data_manager.delete(identifier=identifier)
with switch_db(PlateDefinitionModel, "hyperstream"):
try:
p = PlateDefinitionModel.objects.get(plate_id=plate_id)
p.delete()
del self.plates[plate_id]
except DoesNotExist as e:
logging.warn(e)
logging.info("Plate {} deleted".format(plate_id)) |
def list_math_addition(a, b):
"""!
@brief Addition of two lists.
@details Each element from list 'a' is added to element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematic addition..
@param[in] b (list): List of elements that supports mathematic addition..
@return (list) Results of addtion of two lists.
"""
return [a[i] + b[i] for i in range(len(a))]; | !
@brief Addition of two lists.
@details Each element from list 'a' is added to element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematic addition..
@param[in] b (list): List of elements that supports mathematic addition..
@return (list) Results of addtion of two lists. | Below is the the instruction that describes the task:
### Input:
!
@brief Addition of two lists.
@details Each element from list 'a' is added to element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematic addition..
@param[in] b (list): List of elements that supports mathematic addition..
@return (list) Results of addtion of two lists.
### Response:
def list_math_addition(a, b):
"""!
@brief Addition of two lists.
@details Each element from list 'a' is added to element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematic addition..
@param[in] b (list): List of elements that supports mathematic addition..
@return (list) Results of addtion of two lists.
"""
return [a[i] + b[i] for i in range(len(a))]; |
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers | The existence of files matching configured globs will trigger a version bump | Below is the the instruction that describes the task:
### Input:
The existence of files matching configured globs will trigger a version bump
### Response:
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers |
def security_group_get(auth=None, **kwargs):
'''
Get a single security group. This will create a default security group
if one does not exist yet for a particular project id.
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_get \
name=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.security_group_get \
name=default\
filters='{"tenant_id":"2e778bb64ca64a199eb526b5958d8710"}'
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_security_group(**kwargs) | Get a single security group. This will create a default security group
if one does not exist yet for a particular project id.
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_get \
name=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.security_group_get \
name=default\
filters='{"tenant_id":"2e778bb64ca64a199eb526b5958d8710"}' | Below is the the instruction that describes the task:
### Input:
Get a single security group. This will create a default security group
if one does not exist yet for a particular project id.
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_get \
name=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.security_group_get \
name=default\
filters='{"tenant_id":"2e778bb64ca64a199eb526b5958d8710"}'
### Response:
def security_group_get(auth=None, **kwargs):
'''
Get a single security group. This will create a default security group
if one does not exist yet for a particular project id.
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_get \
name=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.security_group_get \
name=default\
filters='{"tenant_id":"2e778bb64ca64a199eb526b5958d8710"}'
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_security_group(**kwargs) |
def remove_uuid_id_like_indexes(app_name, database=None):
"""
Remove all of varchar_pattern_ops indexes that django created for uuid
columns.
A search is never done with a filter of the style (uuid__like='1ae3c%'), so
all such indexes can be removed from Versionable models.
This will only try to remove indexes if they exist in the database, so it
should be safe to run in a post_migrate signal handler. Running it several
times should leave the database in the same state as running it once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of indexes removed
:rtype: int
"""
removed_indexes = 0
with database_connection(database).cursor() as cursor:
for model in versionable_models(app_name, include_auto_created=True):
indexes = select_uuid_like_indexes_on_table(model, cursor)
if indexes:
index_list = ','.join(['"%s"' % r[0] for r in indexes])
cursor.execute("DROP INDEX %s" % index_list)
removed_indexes += len(indexes)
return removed_indexes | Remove all of varchar_pattern_ops indexes that django created for uuid
columns.
A search is never done with a filter of the style (uuid__like='1ae3c%'), so
all such indexes can be removed from Versionable models.
This will only try to remove indexes if they exist in the database, so it
should be safe to run in a post_migrate signal handler. Running it several
times should leave the database in the same state as running it once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of indexes removed
:rtype: int | Below is the the instruction that describes the task:
### Input:
Remove all of varchar_pattern_ops indexes that django created for uuid
columns.
A search is never done with a filter of the style (uuid__like='1ae3c%'), so
all such indexes can be removed from Versionable models.
This will only try to remove indexes if they exist in the database, so it
should be safe to run in a post_migrate signal handler. Running it several
times should leave the database in the same state as running it once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of indexes removed
:rtype: int
### Response:
def remove_uuid_id_like_indexes(app_name, database=None):
"""
Remove all of varchar_pattern_ops indexes that django created for uuid
columns.
A search is never done with a filter of the style (uuid__like='1ae3c%'), so
all such indexes can be removed from Versionable models.
This will only try to remove indexes if they exist in the database, so it
should be safe to run in a post_migrate signal handler. Running it several
times should leave the database in the same state as running it once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of indexes removed
:rtype: int
"""
removed_indexes = 0
with database_connection(database).cursor() as cursor:
for model in versionable_models(app_name, include_auto_created=True):
indexes = select_uuid_like_indexes_on_table(model, cursor)
if indexes:
index_list = ','.join(['"%s"' % r[0] for r in indexes])
cursor.execute("DROP INDEX %s" % index_list)
removed_indexes += len(indexes)
return removed_indexes |
def _startProgramsNode(self, name, attrs):
"""Process the start of a node under xtvd/programs"""
if name == 'program':
self._programId = attrs.get('id')
self._series = None
self._title = None
self._subtitle = None
self._description = None
self._mpaaRating = None
self._starRating = None
self._runTime = None
self._year = None
self._showType = None
self._colorCode = None
self._originalAirDate = None
self._syndicatedEpisodeNumber = None
self._advisories = [] | Process the start of a node under xtvd/programs | Below is the the instruction that describes the task:
### Input:
Process the start of a node under xtvd/programs
### Response:
def _startProgramsNode(self, name, attrs):
"""Process the start of a node under xtvd/programs"""
if name == 'program':
self._programId = attrs.get('id')
self._series = None
self._title = None
self._subtitle = None
self._description = None
self._mpaaRating = None
self._starRating = None
self._runTime = None
self._year = None
self._showType = None
self._colorCode = None
self._originalAirDate = None
self._syndicatedEpisodeNumber = None
self._advisories = [] |
def _create_child(self, tag):
"""Create a new child element with the given tag."""
return etree.SubElement(self._root, self._get_namespace_tag(tag)) | Create a new child element with the given tag. | Below is the the instruction that describes the task:
### Input:
Create a new child element with the given tag.
### Response:
def _create_child(self, tag):
"""Create a new child element with the given tag."""
return etree.SubElement(self._root, self._get_namespace_tag(tag)) |
def to_dict(self):
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result | Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object. | Below is the the instruction that describes the task:
### Input:
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
### Response:
def to_dict(self):
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result |
def colorstart(fgcolor, bgcolor, weight):
''' Begin a text style. '''
if weight:
weight = bold
else:
weight = norm
if bgcolor:
out('\x1b[%s;%s;%sm' % (weight, fgcolor, bgcolor))
else:
out('\x1b[%s;%sm' % (weight, fgcolor)) | Begin a text style. | Below is the the instruction that describes the task:
### Input:
Begin a text style.
### Response:
def colorstart(fgcolor, bgcolor, weight):
''' Begin a text style. '''
if weight:
weight = bold
else:
weight = norm
if bgcolor:
out('\x1b[%s;%s;%sm' % (weight, fgcolor, bgcolor))
else:
out('\x1b[%s;%sm' % (weight, fgcolor)) |
def get_identifier(self):
"""Validate and returns disqus_identifier option value.
:returns: disqus_identifier config value.
:rtype: str
"""
if 'disqus_identifier' in self.options:
return self.options['disqus_identifier']
title_nodes = self.state.document.traverse(nodes.title)
if not title_nodes:
raise DisqusError('No title nodes found in document, cannot derive disqus_identifier config value.')
return title_nodes[0].astext() | Validate and returns disqus_identifier option value.
:returns: disqus_identifier config value.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Validate and returns disqus_identifier option value.
:returns: disqus_identifier config value.
:rtype: str
### Response:
def get_identifier(self):
"""Validate and returns disqus_identifier option value.
:returns: disqus_identifier config value.
:rtype: str
"""
if 'disqus_identifier' in self.options:
return self.options['disqus_identifier']
title_nodes = self.state.document.traverse(nodes.title)
if not title_nodes:
raise DisqusError('No title nodes found in document, cannot derive disqus_identifier config value.')
return title_nodes[0].astext() |
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable) | Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value | Below is the the instruction that describes the task:
### Input:
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
### Response:
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable) |
def k_depth(d, depth, _counter=1):
"""Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
"""
if depth == 0:
yield d[_meta]["_rootname"]
else:
if _counter == depth:
for key in DictTree.k(d):
yield key
else:
_counter += 1
for node in DictTree.v(d):
for key in DictTree.k_depth(node, depth, _counter):
yield key | Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>` | Below is the the instruction that describes the task:
### Input:
Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
### Response:
def k_depth(d, depth, _counter=1):
"""Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
"""
if depth == 0:
yield d[_meta]["_rootname"]
else:
if _counter == depth:
for key in DictTree.k(d):
yield key
else:
_counter += 1
for node in DictTree.v(d):
for key in DictTree.k_depth(node, depth, _counter):
yield key |
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
equities_cols = self.equities.c
if country_codes:
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
).execute(),
),
dtype='f8', # use doubles so we get NaNs
)
else:
buf = np.array([], dtype='f8')
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', 'f8'),
('start', 'f8'),
('end', 'f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', 'i8'),
('start', 'i8'),
('end', 'i8'),
]) | Compute and cache a recarray of asset lifetimes. | Below is the the instruction that describes the task:
### Input:
Compute and cache a recarray of asset lifetimes.
### Response:
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
equities_cols = self.equities.c
if country_codes:
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
).execute(),
),
dtype='f8', # use doubles so we get NaNs
)
else:
buf = np.array([], dtype='f8')
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', 'f8'),
('start', 'f8'),
('end', 'f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', 'i8'),
('start', 'i8'),
('end', 'i8'),
]) |
def get_file_descriptor(self):
"""Return the file descriptor for the given websocket"""
try:
return uwsgi.connection_fd()
except IOError as e:
self.close()
raise WebSocketError(e) | Return the file descriptor for the given websocket | Below is the the instruction that describes the task:
### Input:
Return the file descriptor for the given websocket
### Response:
def get_file_descriptor(self):
"""Return the file descriptor for the given websocket"""
try:
return uwsgi.connection_fd()
except IOError as e:
self.close()
raise WebSocketError(e) |
def setup_versioned_routes(routes, version=None):
"""Set up routes with a version prefix."""
prefix = '/' + version if version else ""
for r in routes:
path, method = r
route(prefix + path, method, routes[r]) | Set up routes with a version prefix. | Below is the the instruction that describes the task:
### Input:
Set up routes with a version prefix.
### Response:
def setup_versioned_routes(routes, version=None):
"""Set up routes with a version prefix."""
prefix = '/' + version if version else ""
for r in routes:
path, method = r
route(prefix + path, method, routes[r]) |
def install_requirements(self, requires):
""" Install the listed requirements
"""
# Temporarily install dependencies required by setup.py before trying to import them.
sys.path[0:0] = ['setup-requires']
pkg_resources.working_set.add_entry('setup-requires')
to_install = list(self.missing_requirements(requires))
if to_install:
cmd = [sys.executable, "-m", "pip", "install",
"-t", "setup-requires"] + to_install
subprocess.call(cmd) | Install the listed requirements | Below is the the instruction that describes the task:
### Input:
Install the listed requirements
### Response:
def install_requirements(self, requires):
""" Install the listed requirements
"""
# Temporarily install dependencies required by setup.py before trying to import them.
sys.path[0:0] = ['setup-requires']
pkg_resources.working_set.add_entry('setup-requires')
to_install = list(self.missing_requirements(requires))
if to_install:
cmd = [sys.executable, "-m", "pip", "install",
"-t", "setup-requires"] + to_install
subprocess.call(cmd) |
def enable_logging( level='WARNING' ):
"""Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'.
"""
log = logging.getLogger( 'mrcrowbar' )
log.setLevel( level )
out = logging.StreamHandler()
out.setLevel( level )
form = logging.Formatter( '[%(levelname)s] %(name)s - %(message)s' )
out.setFormatter( form )
log.addHandler( out ) | Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'. | Below is the the instruction that describes the task:
### Input:
Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'.
### Response:
def enable_logging( level='WARNING' ):
"""Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'.
"""
log = logging.getLogger( 'mrcrowbar' )
log.setLevel( level )
out = logging.StreamHandler()
out.setLevel( level )
form = logging.Formatter( '[%(levelname)s] %(name)s - %(message)s' )
out.setFormatter( form )
log.addHandler( out ) |
def MT2Axes(mt):
"""
Calculates the principal axes of a given moment tensor.
:param mt: :class:`~MomentTensor`
:return: tuple of :class:`~PrincipalAxis` T, N and P
Adapted from ps_tensor / utilmeca.c /
`Generic Mapping Tools (GMT) <http://gmt.soest.hawaii.edu>`_.
"""
(D, V) = np.linalg.eigh(mt.mt)
pl = np.arcsin(-V[0])
az = np.arctan2(V[2], -V[1])
for i in range(0, 3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl *= R2D
az *= R2D
T = PrincipalAxis(D[2], az[2], pl[2])
N = PrincipalAxis(D[1], az[1], pl[1])
P = PrincipalAxis(D[0], az[0], pl[0])
return (T, N, P) | Calculates the principal axes of a given moment tensor.
:param mt: :class:`~MomentTensor`
:return: tuple of :class:`~PrincipalAxis` T, N and P
Adapted from ps_tensor / utilmeca.c /
`Generic Mapping Tools (GMT) <http://gmt.soest.hawaii.edu>`_. | Below is the the instruction that describes the task:
### Input:
Calculates the principal axes of a given moment tensor.
:param mt: :class:`~MomentTensor`
:return: tuple of :class:`~PrincipalAxis` T, N and P
Adapted from ps_tensor / utilmeca.c /
`Generic Mapping Tools (GMT) <http://gmt.soest.hawaii.edu>`_.
### Response:
def MT2Axes(mt):
"""
Calculates the principal axes of a given moment tensor.
:param mt: :class:`~MomentTensor`
:return: tuple of :class:`~PrincipalAxis` T, N and P
Adapted from ps_tensor / utilmeca.c /
`Generic Mapping Tools (GMT) <http://gmt.soest.hawaii.edu>`_.
"""
(D, V) = np.linalg.eigh(mt.mt)
pl = np.arcsin(-V[0])
az = np.arctan2(V[2], -V[1])
for i in range(0, 3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl *= R2D
az *= R2D
T = PrincipalAxis(D[2], az[2], pl[2])
N = PrincipalAxis(D[1], az[1], pl[1])
P = PrincipalAxis(D[0], az[0], pl[0])
return (T, N, P) |
def _build_full_list(self):
"""Build a full list of pages.
Examples:
>>> _SlicedPaginator(1, 7, 5)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> import itertools
>>> combinations = itertools.combinations(range(100), 2)
>>> combinations = filter(lambda (x,y): x<y, combinations)
>>> for page, maxpages in combinations:
... a = _SlicedPaginator(page + 1, maxpages, 7)
... b = a._build_full_list()
>>> _SlicedPaginator(2, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(5, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
"""
if self.npages <= self.maxpages_items:
return range(1, self.npages + 1)
else:
l = range(self.curpage - self.max_prev_items,
self.curpage + self.max_next_items + 1)
while l and l[0] < 1:
l.append(l[-1] + 1)
del l[0]
while l and l[-1] > self.npages:
l.insert(0, l[0] - 1)
del l[-1]
return l | Build a full list of pages.
Examples:
>>> _SlicedPaginator(1, 7, 5)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> import itertools
>>> combinations = itertools.combinations(range(100), 2)
>>> combinations = filter(lambda (x,y): x<y, combinations)
>>> for page, maxpages in combinations:
... a = _SlicedPaginator(page + 1, maxpages, 7)
... b = a._build_full_list()
>>> _SlicedPaginator(2, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(5, 5, 7)._build_full_list()
[1, 2, 3, 4, 5] | Below is the the instruction that describes the task:
### Input:
Build a full list of pages.
Examples:
>>> _SlicedPaginator(1, 7, 5)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> import itertools
>>> combinations = itertools.combinations(range(100), 2)
>>> combinations = filter(lambda (x,y): x<y, combinations)
>>> for page, maxpages in combinations:
... a = _SlicedPaginator(page + 1, maxpages, 7)
... b = a._build_full_list()
>>> _SlicedPaginator(2, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(5, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
### Response:
def _build_full_list(self):
"""Build a full list of pages.
Examples:
>>> _SlicedPaginator(1, 7, 5)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> _SlicedPaginator(6, 7, 5)._build_full_list()
[3, 4, 5, 6, 7]
>>> import itertools
>>> combinations = itertools.combinations(range(100), 2)
>>> combinations = filter(lambda (x,y): x<y, combinations)
>>> for page, maxpages in combinations:
... a = _SlicedPaginator(page + 1, maxpages, 7)
... b = a._build_full_list()
>>> _SlicedPaginator(2, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
>>> _SlicedPaginator(5, 5, 7)._build_full_list()
[1, 2, 3, 4, 5]
"""
if self.npages <= self.maxpages_items:
return range(1, self.npages + 1)
else:
l = range(self.curpage - self.max_prev_items,
self.curpage + self.max_next_items + 1)
while l and l[0] < 1:
l.append(l[-1] + 1)
del l[0]
while l and l[-1] > self.npages:
l.insert(0, l[0] - 1)
del l[-1]
return l |
def get_doc(func):
"""Extract and dedent the __doc__ of a function.
Unlike `textwrap.dedent()` it also works when the first line
is not indented.
"""
doc = func.__doc__
if not doc:
return ""
# doc has only one line
if "\n" not in doc:
return doc
# Only Python core devs write __doc__ like this
if doc.startswith(("\n", "\\\n")):
return textwrap.dedent(doc)
# First line is not indented
first, rest = doc.split("\n", 1)
return first + "\n" + textwrap.dedent(rest) | Extract and dedent the __doc__ of a function.
Unlike `textwrap.dedent()` it also works when the first line
is not indented. | Below is the the instruction that describes the task:
### Input:
Extract and dedent the __doc__ of a function.
Unlike `textwrap.dedent()` it also works when the first line
is not indented.
### Response:
def get_doc(func):
"""Extract and dedent the __doc__ of a function.
Unlike `textwrap.dedent()` it also works when the first line
is not indented.
"""
doc = func.__doc__
if not doc:
return ""
# doc has only one line
if "\n" not in doc:
return doc
# Only Python core devs write __doc__ like this
if doc.startswith(("\n", "\\\n")):
return textwrap.dedent(doc)
# First line is not indented
first, rest = doc.split("\n", 1)
return first + "\n" + textwrap.dedent(rest) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.