code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def cache_result(cache_key, timeout):
"""A decorator for caching the result of a function."""
def decorator(f):
cache_name = settings.WAFER_CACHE
@functools.wraps(f)
def wrapper(*args, **kw):
cache = caches[cache_name]
result = cache.get(cache_key)
if result is None:
result = f(*args, **kw)
cache.set(cache_key, result, timeout)
return result
def invalidate():
cache = caches[cache_name]
cache.delete(cache_key)
wrapper.invalidate = invalidate
return wrapper
return decorator | A decorator for caching the result of a function. | Below is the the instruction that describes the task:
### Input:
A decorator for caching the result of a function.
### Response:
def cache_result(cache_key, timeout):
"""A decorator for caching the result of a function."""
def decorator(f):
cache_name = settings.WAFER_CACHE
@functools.wraps(f)
def wrapper(*args, **kw):
cache = caches[cache_name]
result = cache.get(cache_key)
if result is None:
result = f(*args, **kw)
cache.set(cache_key, result, timeout)
return result
def invalidate():
cache = caches[cache_name]
cache.delete(cache_key)
wrapper.invalidate = invalidate
return wrapper
return decorator |
def get_interface(iface):
'''
Returns details about given interface.
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
'''
_interfaces = get_interfaces_details()
for _interface in _interfaces['interfaces']:
if _interface['connectionid'] == iface:
return _dict_to_string(_interface)
return None | Returns details about given interface.
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0 | Below is the the instruction that describes the task:
### Input:
Returns details about given interface.
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
### Response:
def get_interface(iface):
'''
Returns details about given interface.
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
'''
_interfaces = get_interfaces_details()
for _interface in _interfaces['interfaces']:
if _interface['connectionid'] == iface:
return _dict_to_string(_interface)
return None |
def filter(self, dataset):
"""Mark unusual trips for all the routes in the dataset."""
self.info('Going to filter infrequent routes in the dataset')
for route in dataset.routes.values():
self.filter_line(route) | Mark unusual trips for all the routes in the dataset. | Below is the the instruction that describes the task:
### Input:
Mark unusual trips for all the routes in the dataset.
### Response:
def filter(self, dataset):
"""Mark unusual trips for all the routes in the dataset."""
self.info('Going to filter infrequent routes in the dataset')
for route in dataset.routes.values():
self.filter_line(route) |
def gradient_rgb(
self, text=None, fore=None, back=None, style=None,
start=None, stop=None, step=1, linemode=True, movefactor=0):
""" Return a black and white gradient.
Arguments:
text : String to colorize.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting rgb value.
stop : Stopping rgb value.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
This will always be greater than 0.
linemode : Colorize each line in the input.
Default: True
movefactor : Amount to shift gradient for each line when
`linemode` is set.
"""
gradargs = {
'step': step,
'fore': fore,
'back': back,
'style': style,
}
start = start or (0, 0, 0)
stop = stop or (255, 255, 255)
if linemode:
method = self._gradient_rgb_lines
gradargs['movefactor'] = movefactor
else:
method = self._gradient_rgb_line
if text:
return self.__class__(
''.join((
self.data or '',
method(
text,
start,
stop,
**gradargs
),
))
)
# Operating on self.data.
return self.__class__(
method(
self.stripped(),
start,
stop,
**gradargs
)
) | Return a black and white gradient.
Arguments:
text : String to colorize.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting rgb value.
stop : Stopping rgb value.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
This will always be greater than 0.
linemode : Colorize each line in the input.
Default: True
movefactor : Amount to shift gradient for each line when
`linemode` is set. | Below is the the instruction that describes the task:
### Input:
Return a black and white gradient.
Arguments:
text : String to colorize.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting rgb value.
stop : Stopping rgb value.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
This will always be greater than 0.
linemode : Colorize each line in the input.
Default: True
movefactor : Amount to shift gradient for each line when
`linemode` is set.
### Response:
def gradient_rgb(
self, text=None, fore=None, back=None, style=None,
start=None, stop=None, step=1, linemode=True, movefactor=0):
""" Return a black and white gradient.
Arguments:
text : String to colorize.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting rgb value.
stop : Stopping rgb value.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
This will always be greater than 0.
linemode : Colorize each line in the input.
Default: True
movefactor : Amount to shift gradient for each line when
`linemode` is set.
"""
gradargs = {
'step': step,
'fore': fore,
'back': back,
'style': style,
}
start = start or (0, 0, 0)
stop = stop or (255, 255, 255)
if linemode:
method = self._gradient_rgb_lines
gradargs['movefactor'] = movefactor
else:
method = self._gradient_rgb_line
if text:
return self.__class__(
''.join((
self.data or '',
method(
text,
start,
stop,
**gradargs
),
))
)
# Operating on self.data.
return self.__class__(
method(
self.stripped(),
start,
stop,
**gradargs
)
) |
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs) | r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
### Response:
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs) |
def getFileSecurity(
self,
fileName,
securityInformation,
securityDescriptor,
lengthSecurityDescriptorBuffer,
lengthNeeded,
dokanFileInfo,
):
"""Get security attributes of a file.
:param fileName: name of file to get security for
:type fileName: ctypes.c_wchar_p
:param securityInformation: buffer for security information
:type securityInformation: PSECURITY_INFORMATION
:param securityDescriptor: buffer for security descriptor
:type securityDescriptor: PSECURITY_DESCRIPTOR
:param lengthSecurityDescriptorBuffer: length of descriptor buffer
:type lengthSecurityDescriptorBuffer: ctypes.c_ulong
:param lengthNeeded: length needed for the buffer
:type lengthNeeded: ctypes.POINTER(ctypes.c_ulong)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('getFileSecurity', fileName) | Get security attributes of a file.
:param fileName: name of file to get security for
:type fileName: ctypes.c_wchar_p
:param securityInformation: buffer for security information
:type securityInformation: PSECURITY_INFORMATION
:param securityDescriptor: buffer for security descriptor
:type securityDescriptor: PSECURITY_DESCRIPTOR
:param lengthSecurityDescriptorBuffer: length of descriptor buffer
:type lengthSecurityDescriptorBuffer: ctypes.c_ulong
:param lengthNeeded: length needed for the buffer
:type lengthNeeded: ctypes.POINTER(ctypes.c_ulong)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int | Below is the the instruction that describes the task:
### Input:
Get security attributes of a file.
:param fileName: name of file to get security for
:type fileName: ctypes.c_wchar_p
:param securityInformation: buffer for security information
:type securityInformation: PSECURITY_INFORMATION
:param securityDescriptor: buffer for security descriptor
:type securityDescriptor: PSECURITY_DESCRIPTOR
:param lengthSecurityDescriptorBuffer: length of descriptor buffer
:type lengthSecurityDescriptorBuffer: ctypes.c_ulong
:param lengthNeeded: length needed for the buffer
:type lengthNeeded: ctypes.POINTER(ctypes.c_ulong)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
### Response:
def getFileSecurity(
self,
fileName,
securityInformation,
securityDescriptor,
lengthSecurityDescriptorBuffer,
lengthNeeded,
dokanFileInfo,
):
"""Get security attributes of a file.
:param fileName: name of file to get security for
:type fileName: ctypes.c_wchar_p
:param securityInformation: buffer for security information
:type securityInformation: PSECURITY_INFORMATION
:param securityDescriptor: buffer for security descriptor
:type securityDescriptor: PSECURITY_DESCRIPTOR
:param lengthSecurityDescriptorBuffer: length of descriptor buffer
:type lengthSecurityDescriptorBuffer: ctypes.c_ulong
:param lengthNeeded: length needed for the buffer
:type lengthNeeded: ctypes.POINTER(ctypes.c_ulong)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('getFileSecurity', fileName) |
def _histogram(self, which, mu, sigma, data):
"""plot a histogram. For internal use only"""
weights = np.ones_like(data)/len(data) # make bar heights sum to 100%
n, bins, patches = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5)
plt.title(r'%s %s: $\mu=%.2f$, $\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma))
plt.xlabel('Items' if which == 'count' else 'Seconds')
plt.ylabel('Frequency')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: "{:.1f}%".format(y*100))) | plot a histogram. For internal use only | Below is the the instruction that describes the task:
### Input:
plot a histogram. For internal use only
### Response:
def _histogram(self, which, mu, sigma, data):
"""plot a histogram. For internal use only"""
weights = np.ones_like(data)/len(data) # make bar heights sum to 100%
n, bins, patches = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5)
plt.title(r'%s %s: $\mu=%.2f$, $\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma))
plt.xlabel('Items' if which == 'count' else 'Seconds')
plt.ylabel('Frequency')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: "{:.1f}%".format(y*100))) |
def set_distributed_assembled(self, irn_loc, jcn_loc, a_loc):
"""Set the distributed assembled matrix.
Distributed assembled matrices require setting icntl(18) != 0.
"""
self.set_distributed_assembled_rows_cols(irn_loc, jcn_loc)
self.set_distributed_assembled_values(a_loc) | Set the distributed assembled matrix.
Distributed assembled matrices require setting icntl(18) != 0. | Below is the the instruction that describes the task:
### Input:
Set the distributed assembled matrix.
Distributed assembled matrices require setting icntl(18) != 0.
### Response:
def set_distributed_assembled(self, irn_loc, jcn_loc, a_loc):
"""Set the distributed assembled matrix.
Distributed assembled matrices require setting icntl(18) != 0.
"""
self.set_distributed_assembled_rows_cols(irn_loc, jcn_loc)
self.set_distributed_assembled_values(a_loc) |
def put_pixel(self, x: int, y: int, color: Tuple[int, int, int]) -> None:
"""Change a pixel on this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
"""
lib.TCOD_image_put_pixel(self.image_c, x, y, color) | Change a pixel on this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance. | Below is the the instruction that describes the task:
### Input:
Change a pixel on this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
### Response:
def put_pixel(self, x: int, y: int, color: Tuple[int, int, int]) -> None:
"""Change a pixel on this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
"""
lib.TCOD_image_put_pixel(self.image_c, x, y, color) |
def head(self, url: StrOrURL, *, allow_redirects: bool=False,
**kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs)) | Perform HTTP HEAD request. | Below is the the instruction that describes the task:
### Input:
Perform HTTP HEAD request.
### Response:
def head(self, url: StrOrURL, *, allow_redirects: bool=False,
**kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs)) |
def confusion_matrix(exp, obs):
"""Create a confusion matrix
In each axis of the resulting confusion matrix the negative case is
0-index and the positive case 1-index. The labels get sorted, in a
True/False scenario true positives will occur at (1,1). The first dimension
(rows) of the resulting matrix is the expected class and the second
dimension (columns) is the observed class.
:param exp: expected values
:type exp: list of float
:param obs: observed values
:type obs: list of float
:rtype: tuple of square matrix and sorted labels
"""
assert len(exp) == len(obs)
# Expected in the first dimension (0;rows), observed in the second (1;cols)
lbls = sorted(set(exp))
res = numpy.zeros(shape=(len(lbls), len(lbls)))
for i in range(len(exp)):
res[lbls.index(exp[i]), lbls.index(obs[i])] += 1
return res, lbls | Create a confusion matrix
In each axis of the resulting confusion matrix the negative case is
0-index and the positive case 1-index. The labels get sorted, in a
True/False scenario true positives will occur at (1,1). The first dimension
(rows) of the resulting matrix is the expected class and the second
dimension (columns) is the observed class.
:param exp: expected values
:type exp: list of float
:param obs: observed values
:type obs: list of float
:rtype: tuple of square matrix and sorted labels | Below is the the instruction that describes the task:
### Input:
Create a confusion matrix
In each axis of the resulting confusion matrix the negative case is
0-index and the positive case 1-index. The labels get sorted, in a
True/False scenario true positives will occur at (1,1). The first dimension
(rows) of the resulting matrix is the expected class and the second
dimension (columns) is the observed class.
:param exp: expected values
:type exp: list of float
:param obs: observed values
:type obs: list of float
:rtype: tuple of square matrix and sorted labels
### Response:
def confusion_matrix(exp, obs):
"""Create a confusion matrix
In each axis of the resulting confusion matrix the negative case is
0-index and the positive case 1-index. The labels get sorted, in a
True/False scenario true positives will occur at (1,1). The first dimension
(rows) of the resulting matrix is the expected class and the second
dimension (columns) is the observed class.
:param exp: expected values
:type exp: list of float
:param obs: observed values
:type obs: list of float
:rtype: tuple of square matrix and sorted labels
"""
assert len(exp) == len(obs)
# Expected in the first dimension (0;rows), observed in the second (1;cols)
lbls = sorted(set(exp))
res = numpy.zeros(shape=(len(lbls), len(lbls)))
for i in range(len(exp)):
res[lbls.index(exp[i]), lbls.index(obs[i])] += 1
return res, lbls |
def attachment_both(self, files, parentid=None):
"""
Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments
"""
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for f in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = files[idx][0]
tmplt["filename"] = files[idx][1]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add) | Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments | Below is the the instruction that describes the task:
### Input:
Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments
### Response:
def attachment_both(self, files, parentid=None):
"""
Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments
"""
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for f in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = files[idx][0]
tmplt["filename"] = files[idx][1]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add) |
def terminal_cfg_line_sessionid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid = ET.SubElement(line, "sessionid")
sessionid.text = kwargs.pop('sessionid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def terminal_cfg_line_sessionid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid = ET.SubElement(line, "sessionid")
sessionid.text = kwargs.pop('sessionid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def make_headers(context: TraceContext) -> Headers:
"""Creates dict with zipkin headers from supplied trace context.
"""
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: '0',
SAMPLED_ID_HEADER: '1' if context.sampled else '0',
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers | Creates dict with zipkin headers from supplied trace context. | Below is the the instruction that describes the task:
### Input:
Creates dict with zipkin headers from supplied trace context.
### Response:
def make_headers(context: TraceContext) -> Headers:
"""Creates dict with zipkin headers from supplied trace context.
"""
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: '0',
SAMPLED_ID_HEADER: '1' if context.sampled else '0',
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers |
def get_class_info(self, xqdm, kcdm, jxbh):
"""
获取教学班详情, 包括上课时间地点, 考查方式, 老师, 选中人数, 课程容量等等信息
@structure {'校区': str,'开课单位': str,'考核类型': str,'课程类型': str,'课程名称': str,'教学班号': str,'起止周': str,
'时间地点': str,'学分': float,'性别限制': str,'优选范围': str,'禁选范围': str,'选中人数': int,'备注': str}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号
"""
return self.query(GetClassInfo(xqdm, kcdm, jxbh)) | 获取教学班详情, 包括上课时间地点, 考查方式, 老师, 选中人数, 课程容量等等信息
@structure {'校区': str,'开课单位': str,'考核类型': str,'课程类型': str,'课程名称': str,'教学班号': str,'起止周': str,
'时间地点': str,'学分': float,'性别限制': str,'优选范围': str,'禁选范围': str,'选中人数': int,'备注': str}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号 | Below is the the instruction that describes the task:
### Input:
获取教学班详情, 包括上课时间地点, 考查方式, 老师, 选中人数, 课程容量等等信息
@structure {'校区': str,'开课单位': str,'考核类型': str,'课程类型': str,'课程名称': str,'教学班号': str,'起止周': str,
'时间地点': str,'学分': float,'性别限制': str,'优选范围': str,'禁选范围': str,'选中人数': int,'备注': str}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号
### Response:
def get_class_info(self, xqdm, kcdm, jxbh):
"""
获取教学班详情, 包括上课时间地点, 考查方式, 老师, 选中人数, 课程容量等等信息
@structure {'校区': str,'开课单位': str,'考核类型': str,'课程类型': str,'课程名称': str,'教学班号': str,'起止周': str,
'时间地点': str,'学分': float,'性别限制': str,'优选范围': str,'禁选范围': str,'选中人数': int,'备注': str}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号
"""
return self.query(GetClassInfo(xqdm, kcdm, jxbh)) |
def encode_pb_list(pb_spans):
"""Encode list of protobuf Spans to binary.
:param pb_spans: list of protobuf Spans.
:type pb_spans: list of zipkin_pb2.Span
:return: encoded list.
:rtype: bytes
"""
pb_list = zipkin_pb2.ListOfSpans()
pb_list.spans.extend(pb_spans)
return pb_list.SerializeToString() | Encode list of protobuf Spans to binary.
:param pb_spans: list of protobuf Spans.
:type pb_spans: list of zipkin_pb2.Span
:return: encoded list.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Encode list of protobuf Spans to binary.
:param pb_spans: list of protobuf Spans.
:type pb_spans: list of zipkin_pb2.Span
:return: encoded list.
:rtype: bytes
### Response:
def encode_pb_list(pb_spans):
"""Encode list of protobuf Spans to binary.
:param pb_spans: list of protobuf Spans.
:type pb_spans: list of zipkin_pb2.Span
:return: encoded list.
:rtype: bytes
"""
pb_list = zipkin_pb2.ListOfSpans()
pb_list.spans.extend(pb_spans)
return pb_list.SerializeToString() |
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Yss = np.sqrt(np.sum(self.Y[..., 0:-1]**2, axis=self.S.ndim,
keepdims=True))
U0 = (self.lmbda/self.rho)*sl.zdivide(self.Y[..., 0:-1], Yss)
U1 = (1.0 / self.rho)*np.sign(self.Y[..., -1:])
return np.concatenate((U0, U1), axis=self.S.ndim) | Return initialiser for working variable U. | Below is the the instruction that describes the task:
### Input:
Return initialiser for working variable U.
### Response:
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Yss = np.sqrt(np.sum(self.Y[..., 0:-1]**2, axis=self.S.ndim,
keepdims=True))
U0 = (self.lmbda/self.rho)*sl.zdivide(self.Y[..., 0:-1], Yss)
U1 = (1.0 / self.rho)*np.sign(self.Y[..., -1:])
return np.concatenate((U0, U1), axis=self.S.ndim) |
def atomic_sa(self, i):
r"""Calculate atomic surface area.
:type i: int
:param i: atom index
:rtype: float
"""
sa = 4.0 * np.pi * self.rads2[i]
neighbors = self.neighbors.get(i)
if neighbors is None:
return sa
XYZi = self.xyzs[i, np.newaxis].T
sphere = self.sphere * self.rads[i] + XYZi
N = sphere.shape[1]
for j, _ in neighbors:
XYZj = self.xyzs[j, np.newaxis].T
d2 = (sphere - XYZj) ** 2
mask = (d2[0] + d2[1] + d2[2]) > self.rads2[j]
sphere = np.compress(mask, sphere, axis=1)
return sa * sphere.shape[1] / N | r"""Calculate atomic surface area.
:type i: int
:param i: atom index
:rtype: float | Below is the the instruction that describes the task:
### Input:
r"""Calculate atomic surface area.
:type i: int
:param i: atom index
:rtype: float
### Response:
def atomic_sa(self, i):
r"""Calculate atomic surface area.
:type i: int
:param i: atom index
:rtype: float
"""
sa = 4.0 * np.pi * self.rads2[i]
neighbors = self.neighbors.get(i)
if neighbors is None:
return sa
XYZi = self.xyzs[i, np.newaxis].T
sphere = self.sphere * self.rads[i] + XYZi
N = sphere.shape[1]
for j, _ in neighbors:
XYZj = self.xyzs[j, np.newaxis].T
d2 = (sphere - XYZj) ** 2
mask = (d2[0] + d2[1] + d2[2]) > self.rads2[j]
sphere = np.compress(mask, sphere, axis=1)
return sa * sphere.shape[1] / N |
def toHierarchy(self, classView, level, stream, lastChild=False):
'''
**Parameters**
``classView`` (bool)
``True`` if generating the Class Hierarchy, ``False`` for File Hierarchy.
``level`` (int)
Recursion level used to determine indentation.
``stream`` (StringIO)
The stream to write the contents to.
``lastChild`` (bool)
When :data:`~exhale.configs.createTreeView` is ``True`` and
:data:`~exhale.configs.treeViewIsBootstrap` is ``False``, the generated
HTML ``li`` elements need to add a ``class="lastChild"`` to use the
appropriate styling.
.. todo:: add thorough documentation of this
'''
if self.inHierarchy(classView):
# For the Tree Views, we need to know if there are nested children before
# writing anything. If there are, we need to open a new list
nested_children = self.hierarchySortedDirectDescendants(classView)
############################################################################
# Write out this node. #
############################################################################
# Easy case: just write another bullet point
if not configs.createTreeView:
stream.write("{indent}- :ref:`{link}`\n".format(
indent=' ' * level,
link=self.link_name
))
# Otherwise, we're generating some raw HTML and/or JavaScript depending on
# whether we are using bootstrap or not
else:
# Declare the relevant links needed for the Tree Views
indent = " " * (level * 2)
next_indent = " {0}".format(indent)
# turn double underscores into underscores, then underscores into hyphens
html_link = self.link_name.replace("__", "_").replace("_", "-")
href = "{file}.html#{anchor}".format(
file=self.file_name.rsplit(".rst", 1)[0],
anchor=html_link
)
# should always have at least two parts (templates will have more)
title_as_link_parts = self.title.split(" ")
if self.template_params:
# E.g. 'Template Class Foo'
q_start = 0
q_end = 2
else:
# E.g. 'Class Foo'
q_start = 0
q_end = 1
# the qualifier will not be part of the hyperlink (for clarity of
# navigation), the link_title will be
qualifier = " ".join(title_as_link_parts[q_start:q_end])
link_title = " ".join(title_as_link_parts[q_end:])
link_title = link_title.replace("&", "&").replace("<", "<").replace(">", ">")
# the actual text / link inside of the list item
li_text = '{qualifier} <a href="{href}">{link_title}</a>'.format(
qualifier=qualifier,
href=href,
link_title=link_title
)
if configs.treeViewIsBootstrap:
text = "text: \"<span class=\\\"{span_cls}\\\">{qualifier}</span> {link_title}\"".format(
span_cls=configs.treeViewBootstrapTextSpanClass,
qualifier=qualifier,
link_title=link_title
)
link = "href: \"{href}\"".format(href=href)
# write some json data, something like
# {
# text: "<span class=\\\"text-muted\\\"> some text",
# href: "link to actual item",
# selectable: false,
stream.write("{indent}{{\n{next_indent}{text},\n".format(
indent=indent,
next_indent=next_indent,
text=text
))
stream.write("{next_indent}{link},\n{next_indent}selectable: false,\n".format(
next_indent=next_indent,
link=link
))
# if requested, add the badge indicating how many children there are
# only add this if there are children
if configs.treeViewBootstrapUseBadgeTags and nested_children:
stream.write("{next_indent}tags: ['{num_children}'],\n".format(
next_indent=next_indent,
num_children=len(nested_children)
))
if nested_children:
# If there are children then `nodes: [ ... ]` will be next
stream.write("\n{next_indent}nodes: [\n".format(next_indent=next_indent))
else:
# Otherwise, this element is ending. JavaScript doesn't care
# about trailing commas :)
stream.write("{indent}}},\n".format(indent=indent))
else:
if lastChild:
opening_li = '<li class="lastChild">'
else:
opening_li = "<li>"
if nested_children:
# write this list element and begin the next list
# writes something like
# <li>
# some text with an href
# <ul>
#
# the <ul> started here gets closed below
stream.write("{indent}{li}\n{next_indent}{li_text}\n{next_indent}<ul>\n".format(
indent=indent,
li=opening_li,
next_indent=next_indent,
li_text=li_text
))
else:
# write this list element and end it now (since no children)
# writes something like
# <li>
# some text with an href
# </li>
stream.write("{indent}{li}{li_text}</li>\n".format(
indent=indent,
li=opening_li,
li_text=li_text
))
############################################################################
# Write out all of the children (if there are any). #
############################################################################
last_child_index = len(nested_children) - 1
child_idx = 0
for child in nested_children:
child.toHierarchy(classView, level + 1, stream, child_idx == last_child_index)
child_idx += 1
############################################################################
# If there were children, close the lists we started above. #
############################################################################
if configs.createTreeView and nested_children:
if configs.treeViewIsBootstrap:
# close the `nodes: [ ... ]` and final } for element
# the final comma IS necessary, and extra commas don't matter in javascript
stream.write("{next_indent}]\n{indent}}},\n".format(
next_indent=next_indent,
indent=indent
))
else:
stream.write("{next_indent}</ul>\n{indent}</li>\n".format(
next_indent=next_indent,
indent=indent
)) | **Parameters**
``classView`` (bool)
``True`` if generating the Class Hierarchy, ``False`` for File Hierarchy.
``level`` (int)
Recursion level used to determine indentation.
``stream`` (StringIO)
The stream to write the contents to.
``lastChild`` (bool)
When :data:`~exhale.configs.createTreeView` is ``True`` and
:data:`~exhale.configs.treeViewIsBootstrap` is ``False``, the generated
HTML ``li`` elements need to add a ``class="lastChild"`` to use the
appropriate styling.
.. todo:: add thorough documentation of this | Below is the the instruction that describes the task:
### Input:
**Parameters**
``classView`` (bool)
``True`` if generating the Class Hierarchy, ``False`` for File Hierarchy.
``level`` (int)
Recursion level used to determine indentation.
``stream`` (StringIO)
The stream to write the contents to.
``lastChild`` (bool)
When :data:`~exhale.configs.createTreeView` is ``True`` and
:data:`~exhale.configs.treeViewIsBootstrap` is ``False``, the generated
HTML ``li`` elements need to add a ``class="lastChild"`` to use the
appropriate styling.
.. todo:: add thorough documentation of this
### Response:
def toHierarchy(self, classView, level, stream, lastChild=False):
'''
**Parameters**
``classView`` (bool)
``True`` if generating the Class Hierarchy, ``False`` for File Hierarchy.
``level`` (int)
Recursion level used to determine indentation.
``stream`` (StringIO)
The stream to write the contents to.
``lastChild`` (bool)
When :data:`~exhale.configs.createTreeView` is ``True`` and
:data:`~exhale.configs.treeViewIsBootstrap` is ``False``, the generated
HTML ``li`` elements need to add a ``class="lastChild"`` to use the
appropriate styling.
.. todo:: add thorough documentation of this
'''
if self.inHierarchy(classView):
# For the Tree Views, we need to know if there are nested children before
# writing anything. If there are, we need to open a new list
nested_children = self.hierarchySortedDirectDescendants(classView)
############################################################################
# Write out this node. #
############################################################################
# Easy case: just write another bullet point
if not configs.createTreeView:
stream.write("{indent}- :ref:`{link}`\n".format(
indent=' ' * level,
link=self.link_name
))
# Otherwise, we're generating some raw HTML and/or JavaScript depending on
# whether we are using bootstrap or not
else:
# Declare the relevant links needed for the Tree Views
indent = " " * (level * 2)
next_indent = " {0}".format(indent)
# turn double underscores into underscores, then underscores into hyphens
html_link = self.link_name.replace("__", "_").replace("_", "-")
href = "{file}.html#{anchor}".format(
file=self.file_name.rsplit(".rst", 1)[0],
anchor=html_link
)
# should always have at least two parts (templates will have more)
title_as_link_parts = self.title.split(" ")
if self.template_params:
# E.g. 'Template Class Foo'
q_start = 0
q_end = 2
else:
# E.g. 'Class Foo'
q_start = 0
q_end = 1
# the qualifier will not be part of the hyperlink (for clarity of
# navigation), the link_title will be
qualifier = " ".join(title_as_link_parts[q_start:q_end])
link_title = " ".join(title_as_link_parts[q_end:])
link_title = link_title.replace("&", "&").replace("<", "<").replace(">", ">")
# the actual text / link inside of the list item
li_text = '{qualifier} <a href="{href}">{link_title}</a>'.format(
qualifier=qualifier,
href=href,
link_title=link_title
)
if configs.treeViewIsBootstrap:
text = "text: \"<span class=\\\"{span_cls}\\\">{qualifier}</span> {link_title}\"".format(
span_cls=configs.treeViewBootstrapTextSpanClass,
qualifier=qualifier,
link_title=link_title
)
link = "href: \"{href}\"".format(href=href)
# write some json data, something like
# {
# text: "<span class=\\\"text-muted\\\"> some text",
# href: "link to actual item",
# selectable: false,
stream.write("{indent}{{\n{next_indent}{text},\n".format(
indent=indent,
next_indent=next_indent,
text=text
))
stream.write("{next_indent}{link},\n{next_indent}selectable: false,\n".format(
next_indent=next_indent,
link=link
))
# if requested, add the badge indicating how many children there are
# only add this if there are children
if configs.treeViewBootstrapUseBadgeTags and nested_children:
stream.write("{next_indent}tags: ['{num_children}'],\n".format(
next_indent=next_indent,
num_children=len(nested_children)
))
if nested_children:
# If there are children then `nodes: [ ... ]` will be next
stream.write("\n{next_indent}nodes: [\n".format(next_indent=next_indent))
else:
# Otherwise, this element is ending. JavaScript doesn't care
# about trailing commas :)
stream.write("{indent}}},\n".format(indent=indent))
else:
if lastChild:
opening_li = '<li class="lastChild">'
else:
opening_li = "<li>"
if nested_children:
# write this list element and begin the next list
# writes something like
# <li>
# some text with an href
# <ul>
#
# the <ul> started here gets closed below
stream.write("{indent}{li}\n{next_indent}{li_text}\n{next_indent}<ul>\n".format(
indent=indent,
li=opening_li,
next_indent=next_indent,
li_text=li_text
))
else:
# write this list element and end it now (since no children)
# writes something like
# <li>
# some text with an href
# </li>
stream.write("{indent}{li}{li_text}</li>\n".format(
indent=indent,
li=opening_li,
li_text=li_text
))
############################################################################
# Write out all of the children (if there are any). #
############################################################################
last_child_index = len(nested_children) - 1
child_idx = 0
for child in nested_children:
child.toHierarchy(classView, level + 1, stream, child_idx == last_child_index)
child_idx += 1
############################################################################
# If there were children, close the lists we started above. #
############################################################################
if configs.createTreeView and nested_children:
if configs.treeViewIsBootstrap:
# close the `nodes: [ ... ]` and final } for element
# the final comma IS necessary, and extra commas don't matter in javascript
stream.write("{next_indent}]\n{indent}}},\n".format(
next_indent=next_indent,
indent=indent
))
else:
stream.write("{next_indent}</ul>\n{indent}</li>\n".format(
next_indent=next_indent,
indent=indent
)) |
def handle_signal(self, signum, frame):
"""
handles UNIX signals
This function currently just handles SIGUSR1. It could be extended to
handle more
:param signum: The signal number (see man 7 signal)
:param frame: The execution frame
(https://docs.python.org/2/reference/datamodel.html#frame-objects)
"""
# it is a SIGINT ?
if signum == signal.SIGINT:
logging.info('shut down cleanly')
asyncio.ensure_future(self.apply_command(globals.ExitCommand()))
elif signum == signal.SIGUSR1:
if isinstance(self.current_buffer, SearchBuffer):
self.current_buffer.rebuild()
self.update() | handles UNIX signals
This function currently just handles SIGUSR1. It could be extended to
handle more
:param signum: The signal number (see man 7 signal)
:param frame: The execution frame
(https://docs.python.org/2/reference/datamodel.html#frame-objects) | Below is the the instruction that describes the task:
### Input:
handles UNIX signals
This function currently just handles SIGUSR1. It could be extended to
handle more
:param signum: The signal number (see man 7 signal)
:param frame: The execution frame
(https://docs.python.org/2/reference/datamodel.html#frame-objects)
### Response:
def handle_signal(self, signum, frame):
"""
handles UNIX signals
This function currently just handles SIGUSR1. It could be extended to
handle more
:param signum: The signal number (see man 7 signal)
:param frame: The execution frame
(https://docs.python.org/2/reference/datamodel.html#frame-objects)
"""
# it is a SIGINT ?
if signum == signal.SIGINT:
logging.info('shut down cleanly')
asyncio.ensure_future(self.apply_command(globals.ExitCommand()))
elif signum == signal.SIGUSR1:
if isinstance(self.current_buffer, SearchBuffer):
self.current_buffer.rebuild()
self.update() |
def htmlNewDocNoDtD(URI, ExternalID):
"""Creates a new HTML document without a DTD node if @URI and
@ExternalID are None """
ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID)
if ret is None:raise treeError('htmlNewDocNoDtD() failed')
return xmlDoc(_obj=ret) | Creates a new HTML document without a DTD node if @URI and
@ExternalID are None | Below is the the instruction that describes the task:
### Input:
Creates a new HTML document without a DTD node if @URI and
@ExternalID are None
### Response:
def htmlNewDocNoDtD(URI, ExternalID):
"""Creates a new HTML document without a DTD node if @URI and
@ExternalID are None """
ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID)
if ret is None:raise treeError('htmlNewDocNoDtD() failed')
return xmlDoc(_obj=ret) |
def reset(self, params, repetition):
"""
Called once at the beginning of each experiment.
"""
self.startTime = time.time()
print(params)
seed = params["seed"] + repetition
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Get our directories correct
self.dataDir = os.path.join(params["datadir"], "speech_commands")
self.resultsDir = os.path.join(params["path"], params["name"], "plots")
if not os.path.exists(self.resultsDir):
os.makedirs(self.resultsDir)
self.use_cuda = not params["no_cuda"] and torch.cuda.is_available()
if self.use_cuda:
print("*********using cuda!")
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.use_preprocessed_dataset = False
self.loadDatasets(params)
# Parse 'n' and 'k' parameters
n = params["n"]
k = params["k"]
if isinstance(n, basestring):
n = map(int, n.split("_"))
if isinstance(k, basestring):
k = map(int, k.split("_"))
if params["model_type"] == "cnn":
c1_out_channels = params["c1_out_channels"]
c1_k = params["c1_k"]
if isinstance(c1_out_channels, basestring):
c1_out_channels = map(int, c1_out_channels.split("_"))
if isinstance(c1_k, basestring):
c1_k = map(int, c1_k.split("_"))
# Parse 'c1_input_shape; parameter
if "c1_input_shape" in params:
c1_input_shape = map(int, params["c1_input_shape"].split("_"))
else:
c1_input_shape = (1, 32, 32)
sp_model = SparseNet(
inputSize=c1_input_shape,
outputSize=len(self.train_loader.dataset.classes),
outChannels=c1_out_channels,
c_k=c1_k,
kernelSize=5,
stride=1,
dropout=params["dropout"],
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
weightSparsityCNN=params["weight_sparsity_cnn"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
elif params["model_type"] == "resnet9":
sp_model = resnet9(num_classes=len(self.train_loader.dataset.classes),
in_channels=1)
elif params["model_type"] == "linear":
sp_model = SparseNet(
n=n,
k=k,
inputSize=32*32,
outputSize=len(self.train_loader.dataset.classes),
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
dropout=params["dropout"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
else:
raise RuntimeError("Unknown model type")
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
sp_model = torch.nn.DataParallel(sp_model)
self.model = sp_model.to(self.device)
self.learningRate = params["learning_rate"]
self.optimizer = self.createOptimizer(params, self.model)
self.lr_scheduler = self.createLearningRateScheduler(params, self.optimizer) | Called once at the beginning of each experiment. | Below is the the instruction that describes the task:
### Input:
Called once at the beginning of each experiment.
### Response:
def reset(self, params, repetition):
"""
Called once at the beginning of each experiment.
"""
self.startTime = time.time()
print(params)
seed = params["seed"] + repetition
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Get our directories correct
self.dataDir = os.path.join(params["datadir"], "speech_commands")
self.resultsDir = os.path.join(params["path"], params["name"], "plots")
if not os.path.exists(self.resultsDir):
os.makedirs(self.resultsDir)
self.use_cuda = not params["no_cuda"] and torch.cuda.is_available()
if self.use_cuda:
print("*********using cuda!")
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.use_preprocessed_dataset = False
self.loadDatasets(params)
# Parse 'n' and 'k' parameters
n = params["n"]
k = params["k"]
if isinstance(n, basestring):
n = map(int, n.split("_"))
if isinstance(k, basestring):
k = map(int, k.split("_"))
if params["model_type"] == "cnn":
c1_out_channels = params["c1_out_channels"]
c1_k = params["c1_k"]
if isinstance(c1_out_channels, basestring):
c1_out_channels = map(int, c1_out_channels.split("_"))
if isinstance(c1_k, basestring):
c1_k = map(int, c1_k.split("_"))
# Parse 'c1_input_shape; parameter
if "c1_input_shape" in params:
c1_input_shape = map(int, params["c1_input_shape"].split("_"))
else:
c1_input_shape = (1, 32, 32)
sp_model = SparseNet(
inputSize=c1_input_shape,
outputSize=len(self.train_loader.dataset.classes),
outChannels=c1_out_channels,
c_k=c1_k,
kernelSize=5,
stride=1,
dropout=params["dropout"],
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
weightSparsityCNN=params["weight_sparsity_cnn"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
elif params["model_type"] == "resnet9":
sp_model = resnet9(num_classes=len(self.train_loader.dataset.classes),
in_channels=1)
elif params["model_type"] == "linear":
sp_model = SparseNet(
n=n,
k=k,
inputSize=32*32,
outputSize=len(self.train_loader.dataset.classes),
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
dropout=params["dropout"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
else:
raise RuntimeError("Unknown model type")
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
sp_model = torch.nn.DataParallel(sp_model)
self.model = sp_model.to(self.device)
self.learningRate = params["learning_rate"]
self.optimizer = self.createOptimizer(params, self.model)
self.lr_scheduler = self.createLearningRateScheduler(params, self.optimizer) |
def clean(self, value):
"""
Clean the field values.
"""
if not self.create:
# No new value can be created, use the regular clean field
return super(AgnocompleteModelMultipleField, self).clean(value)
# We have to do this here before the call to "super".
# It'll be called again, but we can't find a way to "pre_clean" the
# field value before pushing it into the parent class "clean()" method.
value = self.clear_list_value(value)
# Split the actual values with the potential new values
# Numeric values will always be considered as PKs
pks = [v for v in value if v.isdigit()]
self._new_values = [v for v in value if not v.isdigit()]
qs = super(AgnocompleteModelMultipleField, self).clean(pks)
return qs | Clean the field values. | Below is the the instruction that describes the task:
### Input:
Clean the field values.
### Response:
def clean(self, value):
"""
Clean the field values.
"""
if not self.create:
# No new value can be created, use the regular clean field
return super(AgnocompleteModelMultipleField, self).clean(value)
# We have to do this here before the call to "super".
# It'll be called again, but we can't find a way to "pre_clean" the
# field value before pushing it into the parent class "clean()" method.
value = self.clear_list_value(value)
# Split the actual values with the potential new values
# Numeric values will always be considered as PKs
pks = [v for v in value if v.isdigit()]
self._new_values = [v for v in value if not v.isdigit()]
qs = super(AgnocompleteModelMultipleField, self).clean(pks)
return qs |
def _backup_compresslevel(self, dirs):
"""Create a backup file with a compresslevel parameter."""
# Only supported in Python 3.7+
with ZipFile(self.zip_filename, 'w', compresslevel=self.compress_level) as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)]) | Create a backup file with a compresslevel parameter. | Below is the the instruction that describes the task:
### Input:
Create a backup file with a compresslevel parameter.
### Response:
def _backup_compresslevel(self, dirs):
"""Create a backup file with a compresslevel parameter."""
# Only supported in Python 3.7+
with ZipFile(self.zip_filename, 'w', compresslevel=self.compress_level) as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)]) |
def get(self, token):
'''
Check the token, returns a 401 if the token is invalid.
Else open the websocket connection
'''
log.debug('In the websocket get method')
self.token = token
# close the connection, if not authenticated
if not self.application.auth.get_tok(token):
log.debug('Refusing websocket connection, bad token!')
self.send_error(401)
return
super(AllEventsHandler, self).get(token) | Check the token, returns a 401 if the token is invalid.
Else open the websocket connection | Below is the the instruction that describes the task:
### Input:
Check the token, returns a 401 if the token is invalid.
Else open the websocket connection
### Response:
def get(self, token):
'''
Check the token, returns a 401 if the token is invalid.
Else open the websocket connection
'''
log.debug('In the websocket get method')
self.token = token
# close the connection, if not authenticated
if not self.application.auth.get_tok(token):
log.debug('Refusing websocket connection, bad token!')
self.send_error(401)
return
super(AllEventsHandler, self).get(token) |
def _compute_precision(references, translation, n):
"""Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams
"""
matches = 0
candidates = 0
ref_ngram_counts = Counter()
for reference in references:
ref_ngram_counts |= _ngrams(reference, n)
trans_ngram_counts = _ngrams(translation, n)
overlap_ngram_counts = trans_ngram_counts & ref_ngram_counts
matches += sum(overlap_ngram_counts.values())
possible_matches = len(translation) - n + 1
if possible_matches > 0:
candidates += possible_matches
return matches, candidates | Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams | Below is the the instruction that describes the task:
### Input:
Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams
### Response:
def _compute_precision(references, translation, n):
"""Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams
"""
matches = 0
candidates = 0
ref_ngram_counts = Counter()
for reference in references:
ref_ngram_counts |= _ngrams(reference, n)
trans_ngram_counts = _ngrams(translation, n)
overlap_ngram_counts = trans_ngram_counts & ref_ngram_counts
matches += sum(overlap_ngram_counts.values())
possible_matches = len(translation) - n + 1
if possible_matches > 0:
candidates += possible_matches
return matches, candidates |
def apply(self, collection, ops, **kwargs):
"""Apply the filter to collection."""
validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa
return [o for o in collection if validator(o)] | Apply the filter to collection. | Below is the the instruction that describes the task:
### Input:
Apply the filter to collection.
### Response:
def apply(self, collection, ops, **kwargs):
"""Apply the filter to collection."""
validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa
return [o for o in collection if validator(o)] |
def InitializeFD(
self,
Channel,
BitrateFD):
"""
Initializes a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
BitrateFD : The speed for the communication (FD bit rate string)
Remarks:
See PCAN_BR_* values.
* parameter and values must be separated by '='
* Couples of Parameter/value must be separated by ','
* Following Parameter must be filled out: f_clock, data_brp, data_sjw, data_tseg1, data_tseg2,
nom_brp, nom_sjw, nom_tseg1, nom_tseg2.
* Following Parameters are optional (not used yet): data_ssp_offset, nom_samp
Example:
f_clock=80000000,nom_brp=10,nom_tseg1=5,nom_tseg2=2,nom_sjw=1,data_brp=4,data_tseg1=7,data_tseg2=2,data_sjw=1
Returns:
A TPCANStatus error code
"""
try:
res = self.__m_dllBasic.CAN_InitializeFD(Channel,BitrateFD)
return TPCANStatus(res)
except:
logger.error("Exception on PCANBasic.InitializeFD")
raise | Initializes a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
BitrateFD : The speed for the communication (FD bit rate string)
Remarks:
See PCAN_BR_* values.
* parameter and values must be separated by '='
* Couples of Parameter/value must be separated by ','
* Following Parameter must be filled out: f_clock, data_brp, data_sjw, data_tseg1, data_tseg2,
nom_brp, nom_sjw, nom_tseg1, nom_tseg2.
* Following Parameters are optional (not used yet): data_ssp_offset, nom_samp
Example:
f_clock=80000000,nom_brp=10,nom_tseg1=5,nom_tseg2=2,nom_sjw=1,data_brp=4,data_tseg1=7,data_tseg2=2,data_sjw=1
Returns:
A TPCANStatus error code | Below is the the instruction that describes the task:
### Input:
Initializes a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
BitrateFD : The speed for the communication (FD bit rate string)
Remarks:
See PCAN_BR_* values.
* parameter and values must be separated by '='
* Couples of Parameter/value must be separated by ','
* Following Parameter must be filled out: f_clock, data_brp, data_sjw, data_tseg1, data_tseg2,
nom_brp, nom_sjw, nom_tseg1, nom_tseg2.
* Following Parameters are optional (not used yet): data_ssp_offset, nom_samp
Example:
f_clock=80000000,nom_brp=10,nom_tseg1=5,nom_tseg2=2,nom_sjw=1,data_brp=4,data_tseg1=7,data_tseg2=2,data_sjw=1
Returns:
A TPCANStatus error code
### Response:
def InitializeFD(
self,
Channel,
BitrateFD):
"""
Initializes a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
BitrateFD : The speed for the communication (FD bit rate string)
Remarks:
See PCAN_BR_* values.
* parameter and values must be separated by '='
* Couples of Parameter/value must be separated by ','
* Following Parameter must be filled out: f_clock, data_brp, data_sjw, data_tseg1, data_tseg2,
nom_brp, nom_sjw, nom_tseg1, nom_tseg2.
* Following Parameters are optional (not used yet): data_ssp_offset, nom_samp
Example:
f_clock=80000000,nom_brp=10,nom_tseg1=5,nom_tseg2=2,nom_sjw=1,data_brp=4,data_tseg1=7,data_tseg2=2,data_sjw=1
Returns:
A TPCANStatus error code
"""
try:
res = self.__m_dllBasic.CAN_InitializeFD(Channel,BitrateFD)
return TPCANStatus(res)
except:
logger.error("Exception on PCANBasic.InitializeFD")
raise |
def flavor_access_add(self, flavor_id, project_id):
'''
Add a project to the flavor access list
'''
nt_ks = self.compute_conn
ret = {flavor_id: []}
flavor_accesses = nt_ks.flavor_access.add_tenant_access(flavor_id, project_id)
for project in flavor_accesses:
ret[flavor_id].append(project.tenant_id)
return ret | Add a project to the flavor access list | Below is the the instruction that describes the task:
### Input:
Add a project to the flavor access list
### Response:
def flavor_access_add(self, flavor_id, project_id):
'''
Add a project to the flavor access list
'''
nt_ks = self.compute_conn
ret = {flavor_id: []}
flavor_accesses = nt_ks.flavor_access.add_tenant_access(flavor_id, project_id)
for project in flavor_accesses:
ret[flavor_id].append(project.tenant_id)
return ret |
def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs) | Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated`` | Below is the the instruction that describes the task:
### Input:
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated``
### Response:
def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs) |
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers | Checks to see if the response has a given header name in its Vary header. | Below is the the instruction that describes the task:
### Input:
Checks to see if the response has a given header name in its Vary header.
### Response:
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers |
def write_right(self, msg, thread_lock=None):
""" msg may or may not be encoded """
if self.cursesScreen:
if thread_lock is not None:
thread_lock.acquire()
try:
a_msg = msg.strip()
self.cursesScreen.addstr(0, self.width + 5 - len(a_msg) - 1, a_msg.replace("\r", "").replace("\n", ""))
except:
a_msg = msg.encode('utf-8', 'replace').strip()
self.cursesScreen.addstr(0, self.width + 5 - len(a_msg) - 1, a_msg.replace("\r", "").replace("\n", ""))
self.cursesScreen.refresh()
if thread_lock is not None:
thread_lock.release() | msg may or may not be encoded | Below is the the instruction that describes the task:
### Input:
msg may or may not be encoded
### Response:
def write_right(self, msg, thread_lock=None):
""" msg may or may not be encoded """
if self.cursesScreen:
if thread_lock is not None:
thread_lock.acquire()
try:
a_msg = msg.strip()
self.cursesScreen.addstr(0, self.width + 5 - len(a_msg) - 1, a_msg.replace("\r", "").replace("\n", ""))
except:
a_msg = msg.encode('utf-8', 'replace').strip()
self.cursesScreen.addstr(0, self.width + 5 - len(a_msg) - 1, a_msg.replace("\r", "").replace("\n", ""))
self.cursesScreen.refresh()
if thread_lock is not None:
thread_lock.release() |
def parse(self, mimetypes):
""" Invoke the RFC 2388 spec compliant parser """
self._parse_top_level_content_type()
link = 'tools.ietf.org/html/rfc2388'
parts = cgi.FieldStorage(
fp=self.req.stream,
environ=self.req.env,
)
if not parts:
self.fail('A payload in the body of your request is required '
'& must be encapsulated by the boundary with proper '
'headers according to RFC 2388', link)
elif len(parts) > 1:
self.fail('Currently, only 1 upload at a time is allowed. Please '
'break up your request into %s individual requests & '
'retry' % len(parts), link)
else:
self._parse_part(parts.list[0], mimetypes)
return parts | Invoke the RFC 2388 spec compliant parser | Below is the the instruction that describes the task:
### Input:
Invoke the RFC 2388 spec compliant parser
### Response:
def parse(self, mimetypes):
""" Invoke the RFC 2388 spec compliant parser """
self._parse_top_level_content_type()
link = 'tools.ietf.org/html/rfc2388'
parts = cgi.FieldStorage(
fp=self.req.stream,
environ=self.req.env,
)
if not parts:
self.fail('A payload in the body of your request is required '
'& must be encapsulated by the boundary with proper '
'headers according to RFC 2388', link)
elif len(parts) > 1:
self.fail('Currently, only 1 upload at a time is allowed. Please '
'break up your request into %s individual requests & '
'retry' % len(parts), link)
else:
self._parse_part(parts.list[0], mimetypes)
return parts |
def make_router(*routings):
"""Return a WSGI application that dispatches requests to controllers """
routes = []
for routing in routings:
methods, regex, app = routing[:3]
if isinstance(methods, basestring):
methods = (methods,)
vars = routing[3] if len(routing) >= 4 else {}
routes.append((methods, re.compile(unicode(regex)), app, vars))
def router(environ, start_response):
"""Dispatch request to controllers."""
req = webob.Request(environ)
split_path_info = req.path_info.split('/')
if split_path_info[0]:
# When path_info doesn't start with a "/" this is an error or a attack => Reject request.
# An example of an URL with such a invalid path_info: http://127.0.0.1http%3A//127.0.0.1%3A80/result?...
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
return wsgihelpers.respond_json(ctx,
dict(
apiVersion = 1,
error = dict(
code = 400, # Bad Request
message = ctx._(u"Invalid path: {0}").format(req.path_info),
),
),
headers = headers,
)(environ, start_response)
for methods, regex, app, vars in routes:
match = regex.match(req.path_info)
if match is not None:
if methods is not None and req.method not in methods:
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
return wsgihelpers.respond_json(ctx,
dict(
apiVersion = 1,
error = dict(
code = 405,
message = ctx._(u"You cannot use HTTP {} to access this URL. Use one of {}.").format(
req.method, methods),
),
),
headers = headers,
)(environ, start_response)
if getattr(req, 'urlvars', None) is None:
req.urlvars = {}
req.urlvars.update(match.groupdict())
req.urlvars.update(vars)
req.script_name += req.path_info[:match.end()]
req.path_info = req.path_info[match.end():]
return app(req.environ, start_response)
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
return wsgihelpers.respond_json(ctx,
dict(
apiVersion = 1,
error = dict(
code = 404, # Not Found
message = ctx._(u"Path not found: {0}").format(req.path_info),
),
),
headers = headers,
)(environ, start_response)
return router | Return a WSGI application that dispatches requests to controllers | Below is the the instruction that describes the task:
### Input:
Return a WSGI application that dispatches requests to controllers
### Response:
def make_router(*routings):
"""Return a WSGI application that dispatches requests to controllers """
routes = []
for routing in routings:
methods, regex, app = routing[:3]
if isinstance(methods, basestring):
methods = (methods,)
vars = routing[3] if len(routing) >= 4 else {}
routes.append((methods, re.compile(unicode(regex)), app, vars))
def router(environ, start_response):
"""Dispatch request to controllers."""
req = webob.Request(environ)
split_path_info = req.path_info.split('/')
if split_path_info[0]:
# When path_info doesn't start with a "/" this is an error or a attack => Reject request.
# An example of an URL with such a invalid path_info: http://127.0.0.1http%3A//127.0.0.1%3A80/result?...
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
return wsgihelpers.respond_json(ctx,
dict(
apiVersion = 1,
error = dict(
code = 400, # Bad Request
message = ctx._(u"Invalid path: {0}").format(req.path_info),
),
),
headers = headers,
)(environ, start_response)
for methods, regex, app, vars in routes:
match = regex.match(req.path_info)
if match is not None:
if methods is not None and req.method not in methods:
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
return wsgihelpers.respond_json(ctx,
dict(
apiVersion = 1,
error = dict(
code = 405,
message = ctx._(u"You cannot use HTTP {} to access this URL. Use one of {}.").format(
req.method, methods),
),
),
headers = headers,
)(environ, start_response)
if getattr(req, 'urlvars', None) is None:
req.urlvars = {}
req.urlvars.update(match.groupdict())
req.urlvars.update(vars)
req.script_name += req.path_info[:match.end()]
req.path_info = req.path_info[match.end():]
return app(req.environ, start_response)
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
return wsgihelpers.respond_json(ctx,
dict(
apiVersion = 1,
error = dict(
code = 404, # Not Found
message = ctx._(u"Path not found: {0}").format(req.path_info),
),
),
headers = headers,
)(environ, start_response)
return router |
def write_how_much(self, file):
""" Write component quantities to a table.
"""
report = CaseReport(self.case)
col1_header = "Attribute"
col1_width = 24
col2_header = "P (MW)"
col3_header = "Q (MVAr)"
col_width = 8
sep = "="*col1_width +" "+ "="*col_width +" "+ "="*col_width + "\n"
# Row headers
file.write(sep)
file.write("%s" % col1_header.center(col1_width))
file.write(" ")
file.write("%s" % col2_header.center(col_width))
file.write(" ")
file.write("%s" % col3_header.center(col_width))
file.write("\n")
file.write(sep)
# Rows
pgen = getattr(report, "total_pgen_capacity")
qmin, qmax = getattr(report, "total_qgen_capacity")
file.write("%s %8.1f %4.1f to %4.1f\n" %
("Total Gen Capacity".ljust(col1_width), pgen, qmin, qmax))
pgen = getattr(report, "online_pgen_capacity")
qmin, qmax = getattr(report, "online_qgen_capacity")
file.write("%s %8.1f %4.1f to %4.1f\n" %
("On-line Capacity".ljust(col1_width), pgen, qmin, qmax))
pgen = getattr(report, "actual_pgen")
qgen = getattr(report, "actual_qgen")
file.write("%s %8.1f %8.1f\n" %
("Generation (actual)".ljust(col1_width), pgen, qgen))
pd = getattr(report, "p_demand")
qd = getattr(report, "q_demand")
file.write("%s %8.1f %8.1f\n" %
("Load".ljust(col1_width), pd, qd))
pd = getattr(report, "fixed_p_demand")
qd = getattr(report, "fixed_q_demand")
file.write("%s %8.1f %8.1f\n" %
(" Fixed".ljust(col1_width), pd, qd))
pd, pmin = getattr(report, "vload_p_demand")
qd = getattr(report, "vload_q_demand")
file.write("%s %4.1f of %4.1f %8.1f\n" %
(" Despatchable".ljust(col1_width), pd, pmin, qd))
pinj = getattr(report, "shunt_pinj")
qinj = getattr(report, "shunt_qinj")
file.write("%s %8.1f %8.1f\n" %
("Shunt (inj)".ljust(col1_width), pinj, qinj))
pl, ql = getattr(report, "losses")
file.write("%s %8.1f %8.1f\n" %
("Losses (I^2 * Z)".ljust(col1_width), pl, ql))
qinj = getattr(report, "branch_qinj")
file.write("%s %8s %8.1f\n" %
("Branch Charging (inj)".ljust(col1_width), "-", qinj))
pval = getattr(report, "total_tie_pflow")
qval = getattr(report, "total_tie_qflow")
file.write("%s %8.1f %8.1f\n" %
("Total Inter-tie Flow".ljust(col1_width), pval, qval))
file.write(sep)
file.write("\n")
del report | Write component quantities to a table. | Below is the the instruction that describes the task:
### Input:
Write component quantities to a table.
### Response:
def write_how_much(self, file):
""" Write component quantities to a table.
"""
report = CaseReport(self.case)
col1_header = "Attribute"
col1_width = 24
col2_header = "P (MW)"
col3_header = "Q (MVAr)"
col_width = 8
sep = "="*col1_width +" "+ "="*col_width +" "+ "="*col_width + "\n"
# Row headers
file.write(sep)
file.write("%s" % col1_header.center(col1_width))
file.write(" ")
file.write("%s" % col2_header.center(col_width))
file.write(" ")
file.write("%s" % col3_header.center(col_width))
file.write("\n")
file.write(sep)
# Rows
pgen = getattr(report, "total_pgen_capacity")
qmin, qmax = getattr(report, "total_qgen_capacity")
file.write("%s %8.1f %4.1f to %4.1f\n" %
("Total Gen Capacity".ljust(col1_width), pgen, qmin, qmax))
pgen = getattr(report, "online_pgen_capacity")
qmin, qmax = getattr(report, "online_qgen_capacity")
file.write("%s %8.1f %4.1f to %4.1f\n" %
("On-line Capacity".ljust(col1_width), pgen, qmin, qmax))
pgen = getattr(report, "actual_pgen")
qgen = getattr(report, "actual_qgen")
file.write("%s %8.1f %8.1f\n" %
("Generation (actual)".ljust(col1_width), pgen, qgen))
pd = getattr(report, "p_demand")
qd = getattr(report, "q_demand")
file.write("%s %8.1f %8.1f\n" %
("Load".ljust(col1_width), pd, qd))
pd = getattr(report, "fixed_p_demand")
qd = getattr(report, "fixed_q_demand")
file.write("%s %8.1f %8.1f\n" %
(" Fixed".ljust(col1_width), pd, qd))
pd, pmin = getattr(report, "vload_p_demand")
qd = getattr(report, "vload_q_demand")
file.write("%s %4.1f of %4.1f %8.1f\n" %
(" Despatchable".ljust(col1_width), pd, pmin, qd))
pinj = getattr(report, "shunt_pinj")
qinj = getattr(report, "shunt_qinj")
file.write("%s %8.1f %8.1f\n" %
("Shunt (inj)".ljust(col1_width), pinj, qinj))
pl, ql = getattr(report, "losses")
file.write("%s %8.1f %8.1f\n" %
("Losses (I^2 * Z)".ljust(col1_width), pl, ql))
qinj = getattr(report, "branch_qinj")
file.write("%s %8s %8.1f\n" %
("Branch Charging (inj)".ljust(col1_width), "-", qinj))
pval = getattr(report, "total_tie_pflow")
qval = getattr(report, "total_tie_qflow")
file.write("%s %8.1f %8.1f\n" %
("Total Inter-tie Flow".ljust(col1_width), pval, qval))
file.write(sep)
file.write("\n")
del report |
def authorize_group_permission(
self, group_name, source_group_name, source_group_owner_id):
"""
This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
"""
d = self.authorize_security_group(
group_name,
source_group_name=source_group_name,
source_group_owner_id=source_group_owner_id)
return d | This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}. | Below is the the instruction that describes the task:
### Input:
This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
### Response:
def authorize_group_permission(
self, group_name, source_group_name, source_group_owner_id):
"""
This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
"""
d = self.authorize_security_group(
group_name,
source_group_name=source_group_name,
source_group_owner_id=source_group_owner_id)
return d |
def connect(self, *args, **kwargs):
""" Proxy to DynamoDBConnection.connect. """
self.connection = DynamoDBConnection.connect(*args, **kwargs)
self._session = kwargs.get("session")
if self._session is None:
self._session = botocore.session.get_session() | Proxy to DynamoDBConnection.connect. | Below is the the instruction that describes the task:
### Input:
Proxy to DynamoDBConnection.connect.
### Response:
def connect(self, *args, **kwargs):
""" Proxy to DynamoDBConnection.connect. """
self.connection = DynamoDBConnection.connect(*args, **kwargs)
self._session = kwargs.get("session")
if self._session is None:
self._session = botocore.session.get_session() |
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
) | Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object | Below is the the instruction that describes the task:
### Input:
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
### Response:
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
) |
def simple_generate_batch(klass, create, size, **kwargs):
"""Create a factory for the given class, and simple_generate instances."""
return make_factory(klass, **kwargs).simple_generate_batch(create, size) | Create a factory for the given class, and simple_generate instances. | Below is the the instruction that describes the task:
### Input:
Create a factory for the given class, and simple_generate instances.
### Response:
def simple_generate_batch(klass, create, size, **kwargs):
"""Create a factory for the given class, and simple_generate instances."""
return make_factory(klass, **kwargs).simple_generate_batch(create, size) |
def assert_strong_password(username, password, old_password=None):
"""Raises ValueError if the password isn't strong.
Returns the password otherwise."""
# test the length
try:
minlength = settings.MIN_PASSWORD_LENGTH
except AttributeError:
minlength = 12
if len(password) < minlength:
raise ValueError(
"Password must be at least %s characters long" % minlength)
if username is not None and username in password:
raise ValueError("Password contains username")
return _assert_password(password, old_password) | Raises ValueError if the password isn't strong.
Returns the password otherwise. | Below is the the instruction that describes the task:
### Input:
Raises ValueError if the password isn't strong.
Returns the password otherwise.
### Response:
def assert_strong_password(username, password, old_password=None):
"""Raises ValueError if the password isn't strong.
Returns the password otherwise."""
# test the length
try:
minlength = settings.MIN_PASSWORD_LENGTH
except AttributeError:
minlength = 12
if len(password) < minlength:
raise ValueError(
"Password must be at least %s characters long" % minlength)
if username is not None and username in password:
raise ValueError("Password contains username")
return _assert_password(password, old_password) |
def notification(
self,
topic_name,
topic_project=None,
custom_attributes=None,
event_types=None,
blob_name_prefix=None,
payload_format=NONE_PAYLOAD_FORMAT,
):
"""Factory: create a notification resource for the bucket.
See: :class:`.BucketNotification` for parameters.
:rtype: :class:`.BucketNotification`
"""
return BucketNotification(
self,
topic_name,
topic_project=topic_project,
custom_attributes=custom_attributes,
event_types=event_types,
blob_name_prefix=blob_name_prefix,
payload_format=payload_format,
) | Factory: create a notification resource for the bucket.
See: :class:`.BucketNotification` for parameters.
:rtype: :class:`.BucketNotification` | Below is the the instruction that describes the task:
### Input:
Factory: create a notification resource for the bucket.
See: :class:`.BucketNotification` for parameters.
:rtype: :class:`.BucketNotification`
### Response:
def notification(
self,
topic_name,
topic_project=None,
custom_attributes=None,
event_types=None,
blob_name_prefix=None,
payload_format=NONE_PAYLOAD_FORMAT,
):
"""Factory: create a notification resource for the bucket.
See: :class:`.BucketNotification` for parameters.
:rtype: :class:`.BucketNotification`
"""
return BucketNotification(
self,
topic_name,
topic_project=topic_project,
custom_attributes=custom_attributes,
event_types=event_types,
blob_name_prefix=blob_name_prefix,
payload_format=payload_format,
) |
def _get_plot_data(data, ndim=None):
"""Get plot data out of an input object
Parameters
----------
data : array-like, `phate.PHATE` or `scanpy.AnnData`
ndim : int, optional (default: None)
Minimum number of dimensions
"""
out = data
if isinstance(data, PHATE):
out = data.transform()
else:
try:
if isinstance(data, anndata.AnnData):
try:
out = data.obsm['X_phate']
except KeyError:
raise RuntimeError(
"data.obsm['X_phate'] not found. "
"Please run `sc.tl.phate(adata)` before plotting.")
except NameError:
# anndata not installed
pass
if ndim is not None and out[0].shape[0] < ndim:
if isinstance(data, PHATE):
data.set_params(n_components=ndim)
out = data.transform()
else:
raise ValueError(
"Expected at least {}-dimensional data, got {}".format(
ndim, out[0].shape[0]))
return out | Get plot data out of an input object
Parameters
----------
data : array-like, `phate.PHATE` or `scanpy.AnnData`
ndim : int, optional (default: None)
Minimum number of dimensions | Below is the the instruction that describes the task:
### Input:
Get plot data out of an input object
Parameters
----------
data : array-like, `phate.PHATE` or `scanpy.AnnData`
ndim : int, optional (default: None)
Minimum number of dimensions
### Response:
def _get_plot_data(data, ndim=None):
"""Get plot data out of an input object
Parameters
----------
data : array-like, `phate.PHATE` or `scanpy.AnnData`
ndim : int, optional (default: None)
Minimum number of dimensions
"""
out = data
if isinstance(data, PHATE):
out = data.transform()
else:
try:
if isinstance(data, anndata.AnnData):
try:
out = data.obsm['X_phate']
except KeyError:
raise RuntimeError(
"data.obsm['X_phate'] not found. "
"Please run `sc.tl.phate(adata)` before plotting.")
except NameError:
# anndata not installed
pass
if ndim is not None and out[0].shape[0] < ndim:
if isinstance(data, PHATE):
data.set_params(n_components=ndim)
out = data.transform()
else:
raise ValueError(
"Expected at least {}-dimensional data, got {}".format(
ndim, out[0].shape[0]))
return out |
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data | Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params | Below is the the instruction that describes the task:
### Input:
Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
### Response:
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data |
def create_queue(self, queue_name, visibility_timeout=None):
"""
Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
"""
params = {'QueueName': queue_name}
if visibility_timeout:
params['Attribute.1.Name'] = 'VisibilityTimeout'
params['Attribute.1.Value'] = int(visibility_timeout)
return self.get_object('CreateQueue', params, Queue) | Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue. | Below is the the instruction that describes the task:
### Input:
Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
### Response:
def create_queue(self, queue_name, visibility_timeout=None):
"""
Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
"""
params = {'QueueName': queue_name}
if visibility_timeout:
params['Attribute.1.Name'] = 'VisibilityTimeout'
params['Attribute.1.Value'] = int(visibility_timeout)
return self.get_object('CreateQueue', params, Queue) |
def listDatasetArray(self):
"""
API to list datasets in DBS. To be called by datasetlist url with post call.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id)
:type dataset_id: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
ret = []
try :
body = request.body.read()
if body:
data = cjson.decode(body)
data = validateJSONInputNoCopy("dataset", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'dataset' in data.keys() and isinstance(data['dataset'], list) and len(data['dataset'])>max_array_size)\
or ('dataset_id' in data.keys() and isinstance(data['dataset_id'], list) and len(data['dataset_id'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listDatasetArray is %s." %max_array_size, self.logger.exception)
ret = self.dbsDataset.listDatasetArray(data)
except cjson.DecodeError as De:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listDatasetArray. %s \n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
for item in ret:
yield item | API to list datasets in DBS. To be called by datasetlist url with post call.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id)
:type dataset_id: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts | Below is the the instruction that describes the task:
### Input:
API to list datasets in DBS. To be called by datasetlist url with post call.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id)
:type dataset_id: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
### Response:
def listDatasetArray(self):
"""
API to list datasets in DBS. To be called by datasetlist url with post call.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id)
:type dataset_id: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
ret = []
try :
body = request.body.read()
if body:
data = cjson.decode(body)
data = validateJSONInputNoCopy("dataset", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'dataset' in data.keys() and isinstance(data['dataset'], list) and len(data['dataset'])>max_array_size)\
or ('dataset_id' in data.keys() and isinstance(data['dataset_id'], list) and len(data['dataset_id'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listDatasetArray is %s." %max_array_size, self.logger.exception)
ret = self.dbsDataset.listDatasetArray(data)
except cjson.DecodeError as De:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listDatasetArray. %s \n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
for item in ret:
yield item |
def flatten(self, keep_prob=1):
"""
Flattens 4D Tensor (from Conv Layer) into 2D Tensor (to FC Layer)
:param keep_prob: int. set to 1 for no dropout
"""
self.count['flat'] += 1
scope = 'flat_' + str(self.count['flat'])
with tf.variable_scope(scope):
# Reshape function
input_nodes = tf.Dimension(
self.input.get_shape()[1] * self.input.get_shape()[2] * self.input.get_shape()[3])
output_shape = tf.stack([-1, input_nodes])
self.input = tf.reshape(self.input, output_shape)
# Dropout function
if keep_prob != 1:
self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
print(scope + ' output: ' + str(self.input.get_shape())) | Flattens 4D Tensor (from Conv Layer) into 2D Tensor (to FC Layer)
:param keep_prob: int. set to 1 for no dropout | Below is the the instruction that describes the task:
### Input:
Flattens 4D Tensor (from Conv Layer) into 2D Tensor (to FC Layer)
:param keep_prob: int. set to 1 for no dropout
### Response:
def flatten(self, keep_prob=1):
"""
Flattens 4D Tensor (from Conv Layer) into 2D Tensor (to FC Layer)
:param keep_prob: int. set to 1 for no dropout
"""
self.count['flat'] += 1
scope = 'flat_' + str(self.count['flat'])
with tf.variable_scope(scope):
# Reshape function
input_nodes = tf.Dimension(
self.input.get_shape()[1] * self.input.get_shape()[2] * self.input.get_shape()[3])
output_shape = tf.stack([-1, input_nodes])
self.input = tf.reshape(self.input, output_shape)
# Dropout function
if keep_prob != 1:
self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
print(scope + ' output: ' + str(self.input.get_shape())) |
def read_dir_tree(self, file_hash):
""" Recursively read the directory structure beginning at hash """
json_d = self.read_index_object(file_hash, 'tree')
node = {'files' : json_d['files'], 'dirs' : {}}
for name, hsh in json_d['dirs'].iteritems(): node['dirs'][name] = self.read_dir_tree(hsh)
return node | Recursively read the directory structure beginning at hash | Below is the the instruction that describes the task:
### Input:
Recursively read the directory structure beginning at hash
### Response:
def read_dir_tree(self, file_hash):
""" Recursively read the directory structure beginning at hash """
json_d = self.read_index_object(file_hash, 'tree')
node = {'files' : json_d['files'], 'dirs' : {}}
for name, hsh in json_d['dirs'].iteritems(): node['dirs'][name] = self.read_dir_tree(hsh)
return node |
def receive_oaiharvest_job(request, records, name, **kwargs):
"""Receive a list of harvested OAI-PMH records and schedule crawls."""
spider = kwargs.get('spider')
workflow = kwargs.get('workflow')
if not spider or not workflow:
return
files_created, _ = write_to_dir(
records,
output_dir=current_app.config['CRAWLER_OAIHARVEST_OUTPUT_DIRECTORY']
)
for source_file in files_created:
# URI is required by scrapy.
file_uri = pathlib2.Path(source_file).as_uri()
schedule_crawl(spider, workflow, source_file=file_uri) | Receive a list of harvested OAI-PMH records and schedule crawls. | Below is the the instruction that describes the task:
### Input:
Receive a list of harvested OAI-PMH records and schedule crawls.
### Response:
def receive_oaiharvest_job(request, records, name, **kwargs):
"""Receive a list of harvested OAI-PMH records and schedule crawls."""
spider = kwargs.get('spider')
workflow = kwargs.get('workflow')
if not spider or not workflow:
return
files_created, _ = write_to_dir(
records,
output_dir=current_app.config['CRAWLER_OAIHARVEST_OUTPUT_DIRECTORY']
)
for source_file in files_created:
# URI is required by scrapy.
file_uri = pathlib2.Path(source_file).as_uri()
schedule_crawl(spider, workflow, source_file=file_uri) |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == []) or self.is_disable():
return ret
# Check if all GPU have the same name
same_name = all(s['name'] == self.stats[0]['name'] for s in self.stats)
# gpu_stats contain the first GPU in the list
gpu_stats = self.stats[0]
# Header
header = ''
if len(self.stats) > 1:
header += '{} '.format(len(self.stats))
if same_name:
header += '{} {}'.format('GPU', gpu_stats['name'])
else:
header += '{}'.format('GPU')
msg = header[:17]
ret.append(self.curse_add_line(msg, "TITLE"))
# Build the string message
if len(self.stats) == 1 or args.meangpu:
# GPU stat summary or mono GPU
# New line
ret.append(self.curse_new_line())
# GPU PROC
try:
mean_proc = sum(s['proc'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_proc_msg = '{:>4}'.format('N/A')
else:
mean_proc_msg = '{:>3.0f}%'.format(mean_proc)
if len(self.stats) > 1:
msg = '{:13}'.format('proc mean:')
else:
msg = '{:13}'.format('proc:')
ret.append(self.curse_add_line(msg))
ret.append(self.curse_add_line(
mean_proc_msg, self.get_views(item=gpu_stats[self.get_key()],
key='proc',
option='decoration')))
# New line
ret.append(self.curse_new_line())
# GPU MEM
try:
mean_mem = sum(s['mem'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_mem_msg = '{:>4}'.format('N/A')
else:
mean_mem_msg = '{:>3.0f}%'.format(mean_mem)
if len(self.stats) > 1:
msg = '{:13}'.format('mem mean:')
else:
msg = '{:13}'.format('mem:')
ret.append(self.curse_add_line(msg))
ret.append(self.curse_add_line(
mean_mem_msg, self.get_views(item=gpu_stats[self.get_key()],
key='mem',
option='decoration')))
else:
# Multi GPU
for gpu_stats in self.stats:
# New line
ret.append(self.curse_new_line())
# GPU ID + PROC + MEM
id_msg = '{}'.format(gpu_stats['gpu_id'])
try:
proc_msg = '{:>3.0f}%'.format(gpu_stats['proc'])
except ValueError:
proc_msg = '{:>4}'.format('N/A')
try:
mem_msg = '{:>3.0f}%'.format(gpu_stats['mem'])
except ValueError:
mem_msg = '{:>4}'.format('N/A')
msg = '{}: {} mem: {}'.format(id_msg, proc_msg, mem_msg)
ret.append(self.curse_add_line(msg))
return ret | Return the dict to display in the curse interface. | Below is the the instruction that describes the task:
### Input:
Return the dict to display in the curse interface.
### Response:
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == []) or self.is_disable():
return ret
# Check if all GPU have the same name
same_name = all(s['name'] == self.stats[0]['name'] for s in self.stats)
# gpu_stats contain the first GPU in the list
gpu_stats = self.stats[0]
# Header
header = ''
if len(self.stats) > 1:
header += '{} '.format(len(self.stats))
if same_name:
header += '{} {}'.format('GPU', gpu_stats['name'])
else:
header += '{}'.format('GPU')
msg = header[:17]
ret.append(self.curse_add_line(msg, "TITLE"))
# Build the string message
if len(self.stats) == 1 or args.meangpu:
# GPU stat summary or mono GPU
# New line
ret.append(self.curse_new_line())
# GPU PROC
try:
mean_proc = sum(s['proc'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_proc_msg = '{:>4}'.format('N/A')
else:
mean_proc_msg = '{:>3.0f}%'.format(mean_proc)
if len(self.stats) > 1:
msg = '{:13}'.format('proc mean:')
else:
msg = '{:13}'.format('proc:')
ret.append(self.curse_add_line(msg))
ret.append(self.curse_add_line(
mean_proc_msg, self.get_views(item=gpu_stats[self.get_key()],
key='proc',
option='decoration')))
# New line
ret.append(self.curse_new_line())
# GPU MEM
try:
mean_mem = sum(s['mem'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_mem_msg = '{:>4}'.format('N/A')
else:
mean_mem_msg = '{:>3.0f}%'.format(mean_mem)
if len(self.stats) > 1:
msg = '{:13}'.format('mem mean:')
else:
msg = '{:13}'.format('mem:')
ret.append(self.curse_add_line(msg))
ret.append(self.curse_add_line(
mean_mem_msg, self.get_views(item=gpu_stats[self.get_key()],
key='mem',
option='decoration')))
else:
# Multi GPU
for gpu_stats in self.stats:
# New line
ret.append(self.curse_new_line())
# GPU ID + PROC + MEM
id_msg = '{}'.format(gpu_stats['gpu_id'])
try:
proc_msg = '{:>3.0f}%'.format(gpu_stats['proc'])
except ValueError:
proc_msg = '{:>4}'.format('N/A')
try:
mem_msg = '{:>3.0f}%'.format(gpu_stats['mem'])
except ValueError:
mem_msg = '{:>4}'.format('N/A')
msg = '{}: {} mem: {}'.format(id_msg, proc_msg, mem_msg)
ret.append(self.curse_add_line(msg))
return ret |
def send_message(self, msg, stats=True):
""" Send or queue outgoing message
@param msg: Message to send
@param stats: If set to True, will update statistics after operation
completes
"""
self.send_jsonified(proto.json_encode(msg), stats) | Send or queue outgoing message
@param msg: Message to send
@param stats: If set to True, will update statistics after operation
completes | Below is the the instruction that describes the task:
### Input:
Send or queue outgoing message
@param msg: Message to send
@param stats: If set to True, will update statistics after operation
completes
### Response:
def send_message(self, msg, stats=True):
""" Send or queue outgoing message
@param msg: Message to send
@param stats: If set to True, will update statistics after operation
completes
"""
self.send_jsonified(proto.json_encode(msg), stats) |
def request_instance(vm_, conn=None, call=None):
'''
Request an instance to be built
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
kwargs = copy.deepcopy(vm_)
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.check_name'](vm_['name'], 'a-zA-Z0-9._-')
conn = get_conn()
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
if userdata is not None and os.path.isfile(userdata):
try:
with __utils__['files.fopen'](userdata, 'r') as fp_:
kwargs['userdata'] = __utils__['cloud.userdata_template'](
__opts__, vm_, fp_.read()
)
except Exception as exc:
log.exception(
'Failed to read userdata from %s: %s', userdata, exc)
if 'size' in kwargs:
kwargs['flavor'] = kwargs.pop('size')
kwargs['key_name'] = config.get_cloud_config_value(
'ssh_key_name', vm_, __opts__, search_global=False, default=None
)
kwargs['wait'] = True
try:
conn.create_server(**_clean_create_kwargs(**kwargs))
except shade.exc.OpenStackCloudException as exc:
log.error('Error creating server %s: %s', vm_['name'], exc)
destroy(vm_['name'], conn=conn, call='action')
raise SaltCloudSystemExit(six.text_type(exc))
return show_instance(vm_['name'], conn=conn, call='action') | Request an instance to be built | Below is the the instruction that describes the task:
### Input:
Request an instance to be built
### Response:
def request_instance(vm_, conn=None, call=None):
'''
Request an instance to be built
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
kwargs = copy.deepcopy(vm_)
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.check_name'](vm_['name'], 'a-zA-Z0-9._-')
conn = get_conn()
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
if userdata is not None and os.path.isfile(userdata):
try:
with __utils__['files.fopen'](userdata, 'r') as fp_:
kwargs['userdata'] = __utils__['cloud.userdata_template'](
__opts__, vm_, fp_.read()
)
except Exception as exc:
log.exception(
'Failed to read userdata from %s: %s', userdata, exc)
if 'size' in kwargs:
kwargs['flavor'] = kwargs.pop('size')
kwargs['key_name'] = config.get_cloud_config_value(
'ssh_key_name', vm_, __opts__, search_global=False, default=None
)
kwargs['wait'] = True
try:
conn.create_server(**_clean_create_kwargs(**kwargs))
except shade.exc.OpenStackCloudException as exc:
log.error('Error creating server %s: %s', vm_['name'], exc)
destroy(vm_['name'], conn=conn, call='action')
raise SaltCloudSystemExit(six.text_type(exc))
return show_instance(vm_['name'], conn=conn, call='action') |
def title(words_quantity=4):
"""Return a random sentence to be used as e.g. an e-mail subject."""
result = words(quantity=words_quantity)
result += random.choice('?.!')
return result.capitalize() | Return a random sentence to be used as e.g. an e-mail subject. | Below is the the instruction that describes the task:
### Input:
Return a random sentence to be used as e.g. an e-mail subject.
### Response:
def title(words_quantity=4):
"""Return a random sentence to be used as e.g. an e-mail subject."""
result = words(quantity=words_quantity)
result += random.choice('?.!')
return result.capitalize() |
def _unescape_value(value):
"""Unescape a value."""
def unescape(c):
return {
"\\\\": "\\",
"\\\"": "\"",
"\\n": "\n",
"\\t": "\t",
"\\b": "\b",
}[c.group(0)]
return re.sub(r"(\\.)", unescape, value) | Unescape a value. | Below is the the instruction that describes the task:
### Input:
Unescape a value.
### Response:
def _unescape_value(value):
"""Unescape a value."""
def unescape(c):
return {
"\\\\": "\\",
"\\\"": "\"",
"\\n": "\n",
"\\t": "\t",
"\\b": "\b",
}[c.group(0)]
return re.sub(r"(\\.)", unescape, value) |
def system_information():
'''
Report system versions.
'''
def system_version():
'''
Return host system version.
'''
lin_ver = linux_distribution()
mac_ver = platform.mac_ver()
win_ver = platform.win32_ver()
if lin_ver[0]:
return ' '.join(lin_ver)
elif mac_ver[0]:
if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]):
return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]])
else:
return ' '.join([mac_ver[0], mac_ver[2]])
elif win_ver[0]:
return ' '.join(win_ver)
else:
return ''
if platform.win32_ver()[0]:
# Get the version and release info based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
import win32api # pylint: disable=3rd-party-module-not-gated
import win32con # pylint: disable=3rd-party-module-not-gated
# Get the product name from the registry
hkey = win32con.HKEY_LOCAL_MACHINE
key = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
value_name = 'ProductName'
reg_handle = win32api.RegOpenKey(hkey, key)
# Returns a tuple of (product_name, value_type)
product_name, _ = win32api.RegQueryValueEx(reg_handle, value_name)
version = 'Unknown'
release = ''
if 'Server' in product_name:
for item in product_name.split(' '):
# If it's all digits, then it's version
if re.match(r'\d+', item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r'^R\d+$', item):
release = item
release = '{0}Server{1}'.format(version, release)
else:
for item in product_name.split(' '):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item):
version = item
release = version
_, ver, sp, extra = platform.win32_ver()
version = ' '.join([release, ver, sp, extra])
else:
version = system_version()
release = platform.release()
system = [
('system', platform.system()),
('dist', ' '.join(linux_distribution(full_distribution_name=False))),
('release', release),
('machine', platform.machine()),
('version', version),
('locale', __salt_system_encoding__),
]
for name, attr in system:
yield name, attr
continue | Report system versions. | Below is the the instruction that describes the task:
### Input:
Report system versions.
### Response:
def system_information():
'''
Report system versions.
'''
def system_version():
'''
Return host system version.
'''
lin_ver = linux_distribution()
mac_ver = platform.mac_ver()
win_ver = platform.win32_ver()
if lin_ver[0]:
return ' '.join(lin_ver)
elif mac_ver[0]:
if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]):
return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]])
else:
return ' '.join([mac_ver[0], mac_ver[2]])
elif win_ver[0]:
return ' '.join(win_ver)
else:
return ''
if platform.win32_ver()[0]:
# Get the version and release info based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
import win32api # pylint: disable=3rd-party-module-not-gated
import win32con # pylint: disable=3rd-party-module-not-gated
# Get the product name from the registry
hkey = win32con.HKEY_LOCAL_MACHINE
key = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
value_name = 'ProductName'
reg_handle = win32api.RegOpenKey(hkey, key)
# Returns a tuple of (product_name, value_type)
product_name, _ = win32api.RegQueryValueEx(reg_handle, value_name)
version = 'Unknown'
release = ''
if 'Server' in product_name:
for item in product_name.split(' '):
# If it's all digits, then it's version
if re.match(r'\d+', item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r'^R\d+$', item):
release = item
release = '{0}Server{1}'.format(version, release)
else:
for item in product_name.split(' '):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item):
version = item
release = version
_, ver, sp, extra = platform.win32_ver()
version = ' '.join([release, ver, sp, extra])
else:
version = system_version()
release = platform.release()
system = [
('system', platform.system()),
('dist', ' '.join(linux_distribution(full_distribution_name=False))),
('release', release),
('machine', platform.machine()),
('version', version),
('locale', __salt_system_encoding__),
]
for name, attr in system:
yield name, attr
continue |
def build_self_uri_list(self_uri_list):
"parse the self-uri tags, build Uri objects"
uri_list = []
for self_uri in self_uri_list:
uri = ea.Uri()
utils.set_attr_if_value(uri, 'xlink_href', self_uri.get('xlink_href'))
utils.set_attr_if_value(uri, 'content_type', self_uri.get('content-type'))
uri_list.append(uri)
return uri_list | parse the self-uri tags, build Uri objects | Below is the the instruction that describes the task:
### Input:
parse the self-uri tags, build Uri objects
### Response:
def build_self_uri_list(self_uri_list):
"parse the self-uri tags, build Uri objects"
uri_list = []
for self_uri in self_uri_list:
uri = ea.Uri()
utils.set_attr_if_value(uri, 'xlink_href', self_uri.get('xlink_href'))
utils.set_attr_if_value(uri, 'content_type', self_uri.get('content-type'))
uri_list.append(uri)
return uri_list |
def wait_connected(self, timeout=None):
'''Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed.
'''
result = self._peer.wait_connected(timeout)
if not result:
if timeout is not None:
log.warn("connect wait timed out after %.2f seconds" % timeout)
return result | Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed. | Below is the the instruction that describes the task:
### Input:
Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed.
### Response:
def wait_connected(self, timeout=None):
'''Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed.
'''
result = self._peer.wait_connected(timeout)
if not result:
if timeout is not None:
log.warn("connect wait timed out after %.2f seconds" % timeout)
return result |
def on_iteration(self):
""" Kombu callback for each `drain_events` loop iteration."""
self._cancel_consumers_if_requested()
if len(self._consumers) == 0:
_log.debug('requesting stop after iteration')
self.should_stop = True | Kombu callback for each `drain_events` loop iteration. | Below is the the instruction that describes the task:
### Input:
Kombu callback for each `drain_events` loop iteration.
### Response:
def on_iteration(self):
""" Kombu callback for each `drain_events` loop iteration."""
self._cancel_consumers_if_requested()
if len(self._consumers) == 0:
_log.debug('requesting stop after iteration')
self.should_stop = True |
def round_to_05(n, exp=None, mode='s'):
"""
Round to the next 0.5-value.
This function applies the round function `func` to round `n` to the
next 0.5-value with respect to its exponent with base 10 (i.e.
1.3e-4 will be rounded to 1.5e-4) if `exp` is None or with respect
to the given exponent in `exp`.
Parameters
----------
n: numpy.ndarray
number to round
exp: int or numpy.ndarray
Exponent for rounding. If None, it will be computed from `n` to be the
exponents for base 10.
mode: {'s', 'l'}
rounding mode. If 's', it will be rounded to value whose absolute
value is below `n`, if 'l' it will rounded to the value whose absolute
value is above `n`.
Returns
-------
numpy.ndarray
rounded `n`
Examples
--------
The effects of the different parameters are show in the example below::
>>> from psyplot.plotter.simple import round_to_05
>>> a = [-100.3, 40.6, 8.7, -0.00023]
>>>round_to_05(a, mode='s')
array([ -1.00000000e+02, 4.00000000e+01, 8.50000000e+00,
-2.00000000e-04])
>>> round_to_05(a, mode='l')
array([ -1.50000000e+02, 4.50000000e+01, 9.00000000e+00,
-2.50000000e-04])"""
n = np.asarray(n)
if exp is None:
exp = np.floor(np.log10(np.abs(n))) # exponent for base 10
ntmp = np.abs(n)/10.**exp # mantissa for base 10
if mode == 's':
n1 = ntmp
s = 1.
n2 = nret = np.floor(ntmp)
else:
n1 = nret = np.ceil(ntmp)
s = -1.
n2 = ntmp
return np.where(n1 - n2 > 0.5, np.sign(n)*(nret + s*0.5)*10.**exp,
np.sign(n)*nret*10.**exp) | Round to the next 0.5-value.
This function applies the round function `func` to round `n` to the
next 0.5-value with respect to its exponent with base 10 (i.e.
1.3e-4 will be rounded to 1.5e-4) if `exp` is None or with respect
to the given exponent in `exp`.
Parameters
----------
n: numpy.ndarray
number to round
exp: int or numpy.ndarray
Exponent for rounding. If None, it will be computed from `n` to be the
exponents for base 10.
mode: {'s', 'l'}
rounding mode. If 's', it will be rounded to value whose absolute
value is below `n`, if 'l' it will rounded to the value whose absolute
value is above `n`.
Returns
-------
numpy.ndarray
rounded `n`
Examples
--------
The effects of the different parameters are show in the example below::
>>> from psyplot.plotter.simple import round_to_05
>>> a = [-100.3, 40.6, 8.7, -0.00023]
>>>round_to_05(a, mode='s')
array([ -1.00000000e+02, 4.00000000e+01, 8.50000000e+00,
-2.00000000e-04])
>>> round_to_05(a, mode='l')
array([ -1.50000000e+02, 4.50000000e+01, 9.00000000e+00,
-2.50000000e-04]) | Below is the the instruction that describes the task:
### Input:
Round to the next 0.5-value.
This function applies the round function `func` to round `n` to the
next 0.5-value with respect to its exponent with base 10 (i.e.
1.3e-4 will be rounded to 1.5e-4) if `exp` is None or with respect
to the given exponent in `exp`.
Parameters
----------
n: numpy.ndarray
number to round
exp: int or numpy.ndarray
Exponent for rounding. If None, it will be computed from `n` to be the
exponents for base 10.
mode: {'s', 'l'}
rounding mode. If 's', it will be rounded to value whose absolute
value is below `n`, if 'l' it will rounded to the value whose absolute
value is above `n`.
Returns
-------
numpy.ndarray
rounded `n`
Examples
--------
The effects of the different parameters are show in the example below::
>>> from psyplot.plotter.simple import round_to_05
>>> a = [-100.3, 40.6, 8.7, -0.00023]
>>>round_to_05(a, mode='s')
array([ -1.00000000e+02, 4.00000000e+01, 8.50000000e+00,
-2.00000000e-04])
>>> round_to_05(a, mode='l')
array([ -1.50000000e+02, 4.50000000e+01, 9.00000000e+00,
-2.50000000e-04])
### Response:
def round_to_05(n, exp=None, mode='s'):
"""
Round to the next 0.5-value.
This function applies the round function `func` to round `n` to the
next 0.5-value with respect to its exponent with base 10 (i.e.
1.3e-4 will be rounded to 1.5e-4) if `exp` is None or with respect
to the given exponent in `exp`.
Parameters
----------
n: numpy.ndarray
number to round
exp: int or numpy.ndarray
Exponent for rounding. If None, it will be computed from `n` to be the
exponents for base 10.
mode: {'s', 'l'}
rounding mode. If 's', it will be rounded to value whose absolute
value is below `n`, if 'l' it will rounded to the value whose absolute
value is above `n`.
Returns
-------
numpy.ndarray
rounded `n`
Examples
--------
The effects of the different parameters are show in the example below::
>>> from psyplot.plotter.simple import round_to_05
>>> a = [-100.3, 40.6, 8.7, -0.00023]
>>>round_to_05(a, mode='s')
array([ -1.00000000e+02, 4.00000000e+01, 8.50000000e+00,
-2.00000000e-04])
>>> round_to_05(a, mode='l')
array([ -1.50000000e+02, 4.50000000e+01, 9.00000000e+00,
-2.50000000e-04])"""
n = np.asarray(n)
if exp is None:
exp = np.floor(np.log10(np.abs(n))) # exponent for base 10
ntmp = np.abs(n)/10.**exp # mantissa for base 10
if mode == 's':
n1 = ntmp
s = 1.
n2 = nret = np.floor(ntmp)
else:
n1 = nret = np.ceil(ntmp)
s = -1.
n2 = ntmp
return np.where(n1 - n2 > 0.5, np.sign(n)*(nret + s*0.5)*10.**exp,
np.sign(n)*nret*10.**exp) |
def cells_to_series(cells, args):
"""Convert a CellImpl into a Series.
`args` must be a sequence of argkeys.
`args` can be longer or shorter then the number of cell's parameters.
If shorter, then defaults are filled if any, else raise error.
If longer, then redundant args are ignored.
"""
paramlen = len(cells.formula.parameters)
is_multidx = paramlen > 1
if len(cells.data) == 0:
data = {}
indexes = None
elif paramlen == 0: # Const Cells
data = list(cells.data.values())
indexes = [np.nan]
else:
if len(args) > 0:
defaults = tuple(
param.default
for param in cells.formula.signature.parameters.values()
)
updated_args = []
for arg in args:
if len(arg) > paramlen:
arg = arg[:paramlen]
elif len(arg) < paramlen:
arg += defaults[len(arg) :]
updated_args.append(arg)
items = [
(arg, cells.data[arg])
for arg in updated_args
if arg in cells.data
]
else:
items = [(key, value) for key, value in cells.data.items()]
if not is_multidx: # Peel 1-element tuple
items = [(key[0], value) for key, value in items]
if len(items) == 0:
indexes, data = None, {}
else:
indexes, data = zip(*items)
if is_multidx:
indexes = pd.MultiIndex.from_tuples(indexes)
result = pd.Series(data=data, name=cells.name, index=indexes)
if indexes is not None and any(i is not np.nan for i in indexes):
result.index.names = list(cells.formula.parameters)
return result | Convert a CellImpl into a Series.
`args` must be a sequence of argkeys.
`args` can be longer or shorter then the number of cell's parameters.
If shorter, then defaults are filled if any, else raise error.
If longer, then redundant args are ignored. | Below is the the instruction that describes the task:
### Input:
Convert a CellImpl into a Series.
`args` must be a sequence of argkeys.
`args` can be longer or shorter then the number of cell's parameters.
If shorter, then defaults are filled if any, else raise error.
If longer, then redundant args are ignored.
### Response:
def cells_to_series(cells, args):
"""Convert a CellImpl into a Series.
`args` must be a sequence of argkeys.
`args` can be longer or shorter then the number of cell's parameters.
If shorter, then defaults are filled if any, else raise error.
If longer, then redundant args are ignored.
"""
paramlen = len(cells.formula.parameters)
is_multidx = paramlen > 1
if len(cells.data) == 0:
data = {}
indexes = None
elif paramlen == 0: # Const Cells
data = list(cells.data.values())
indexes = [np.nan]
else:
if len(args) > 0:
defaults = tuple(
param.default
for param in cells.formula.signature.parameters.values()
)
updated_args = []
for arg in args:
if len(arg) > paramlen:
arg = arg[:paramlen]
elif len(arg) < paramlen:
arg += defaults[len(arg) :]
updated_args.append(arg)
items = [
(arg, cells.data[arg])
for arg in updated_args
if arg in cells.data
]
else:
items = [(key, value) for key, value in cells.data.items()]
if not is_multidx: # Peel 1-element tuple
items = [(key[0], value) for key, value in items]
if len(items) == 0:
indexes, data = None, {}
else:
indexes, data = zip(*items)
if is_multidx:
indexes = pd.MultiIndex.from_tuples(indexes)
result = pd.Series(data=data, name=cells.name, index=indexes)
if indexes is not None and any(i is not np.nan for i in indexes):
result.index.names = list(cells.formula.parameters)
return result |
def variant(self, document_id, gene_panels=None, case_id=None):
"""Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object
"""
query = {}
if case_id:
# search for a variant in a case
query['case_id'] = case_id
query['variant_id'] = document_id
else:
# search with a unique id
query['_id'] = document_id
variant_obj = self.variant_collection.find_one(query)
if variant_obj:
variant_obj = self.add_gene_info(variant_obj, gene_panels)
if variant_obj['chromosome'] in ['X', 'Y']:
## TODO add the build here
variant_obj['is_par'] = is_par(variant_obj['chromosome'],
variant_obj['position'])
return variant_obj | Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object | Below is the the instruction that describes the task:
### Input:
Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object
### Response:
def variant(self, document_id, gene_panels=None, case_id=None):
"""Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object
"""
query = {}
if case_id:
# search for a variant in a case
query['case_id'] = case_id
query['variant_id'] = document_id
else:
# search with a unique id
query['_id'] = document_id
variant_obj = self.variant_collection.find_one(query)
if variant_obj:
variant_obj = self.add_gene_info(variant_obj, gene_panels)
if variant_obj['chromosome'] in ['X', 'Y']:
## TODO add the build here
variant_obj['is_par'] = is_par(variant_obj['chromosome'],
variant_obj['position'])
return variant_obj |
def will_print(level=1):
"""Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
"""
if level == 1:
#We only affect printability using the quiet setting.
return quiet is None or quiet == False
else:
return ((isinstance(verbosity, int) and level <= verbosity) or
(isinstance(verbosity, bool) and verbosity == True)) | Returns True if the current global status of messaging would print a
message using any of the printing functions in this module. | Below is the the instruction that describes the task:
### Input:
Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
### Response:
def will_print(level=1):
"""Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
"""
if level == 1:
#We only affect printability using the quiet setting.
return quiet is None or quiet == False
else:
return ((isinstance(verbosity, int) and level <= verbosity) or
(isinstance(verbosity, bool) and verbosity == True)) |
def upload_signing_cert(self, cert_body, user_name=None):
"""
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateBody' : cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
verb='POST') | Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user | Below is the the instruction that describes the task:
### Input:
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
### Response:
def upload_signing_cert(self, cert_body, user_name=None):
"""
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateBody' : cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
verb='POST') |
def default_database(ctx: click.Context, _param: Parameter, value: Optional[str]):
"""Try to guess a reasonable database name by looking at the repository path"""
if value:
return value
if ctx.params["repository"]:
return os.path.join(ctx.params["repository"], DB.DEFAULT_DB_FILE)
raise click.BadParameter("Could not guess a database location") | Try to guess a reasonable database name by looking at the repository path | Below is the the instruction that describes the task:
### Input:
Try to guess a reasonable database name by looking at the repository path
### Response:
def default_database(ctx: click.Context, _param: Parameter, value: Optional[str]):
"""Try to guess a reasonable database name by looking at the repository path"""
if value:
return value
if ctx.params["repository"]:
return os.path.join(ctx.params["repository"], DB.DEFAULT_DB_FILE)
raise click.BadParameter("Could not guess a database location") |
def calcMD5(path):
"""
calc MD5 based on path
"""
# check that file exists
if os.path.exists(path) is False:
yield False
else:
command = ['md5sum', path]
p = Popen(command, stdout = PIPE)
for line in p.communicate()[0].splitlines():
yield line.decode('ascii').strip().split()[0]
p.wait()
yield False | calc MD5 based on path | Below is the the instruction that describes the task:
### Input:
calc MD5 based on path
### Response:
def calcMD5(path):
"""
calc MD5 based on path
"""
# check that file exists
if os.path.exists(path) is False:
yield False
else:
command = ['md5sum', path]
p = Popen(command, stdout = PIPE)
for line in p.communicate()[0].splitlines():
yield line.decode('ascii').strip().split()[0]
p.wait()
yield False |
def __get_edges_by_vertex(self, vertex, keys=False):
""" Iterates over edges that are incident to supplied vertex argument in current :class:`BreakpointGraph`
Checks that the supplied vertex argument exists in underlying MultiGraph object as a vertex, then iterates over all edges that are incident to it. Wraps each yielded object into :class:`bg.edge.BGEdge` object.
:param vertex: a vertex object in current :class:`BreakpointGraph` object
:type vertex: any hashable object. :class:`bg.vertex.BGVertex` object is expected.
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGEVertex`
:rtype: ``generator``
"""
if vertex in self.bg:
for vertex2, edges in self.bg[vertex].items():
for key, data in self.bg[vertex][vertex2].items():
bg_edge = BGEdge(vertex1=vertex, vertex2=vertex2, multicolor=data["attr_dict"]["multicolor"],
data=data["attr_dict"]["data"])
if keys:
yield bg_edge, key
else:
yield bg_edge | Iterates over edges that are incident to supplied vertex argument in current :class:`BreakpointGraph`
Checks that the supplied vertex argument exists in underlying MultiGraph object as a vertex, then iterates over all edges that are incident to it. Wraps each yielded object into :class:`bg.edge.BGEdge` object.
:param vertex: a vertex object in current :class:`BreakpointGraph` object
:type vertex: any hashable object. :class:`bg.vertex.BGVertex` object is expected.
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGEVertex`
:rtype: ``generator`` | Below is the the instruction that describes the task:
### Input:
Iterates over edges that are incident to supplied vertex argument in current :class:`BreakpointGraph`
Checks that the supplied vertex argument exists in underlying MultiGraph object as a vertex, then iterates over all edges that are incident to it. Wraps each yielded object into :class:`bg.edge.BGEdge` object.
:param vertex: a vertex object in current :class:`BreakpointGraph` object
:type vertex: any hashable object. :class:`bg.vertex.BGVertex` object is expected.
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGEVertex`
:rtype: ``generator``
### Response:
def __get_edges_by_vertex(self, vertex, keys=False):
""" Iterates over edges that are incident to supplied vertex argument in current :class:`BreakpointGraph`
Checks that the supplied vertex argument exists in underlying MultiGraph object as a vertex, then iterates over all edges that are incident to it. Wraps each yielded object into :class:`bg.edge.BGEdge` object.
:param vertex: a vertex object in current :class:`BreakpointGraph` object
:type vertex: any hashable object. :class:`bg.vertex.BGVertex` object is expected.
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGEVertex`
:rtype: ``generator``
"""
if vertex in self.bg:
for vertex2, edges in self.bg[vertex].items():
for key, data in self.bg[vertex][vertex2].items():
bg_edge = BGEdge(vertex1=vertex, vertex2=vertex2, multicolor=data["attr_dict"]["multicolor"],
data=data["attr_dict"]["data"])
if keys:
yield bg_edge, key
else:
yield bg_edge |
def _read_weights(self):
"""Reads weights from each of the load cells.
"""
weights = []
grams_per_pound = 453.592
# Read from each of the sensors
for ser in self._serials:
ser.write('W\r')
ser.flush()
time.sleep(0.02)
for ser in self._serials:
try:
output_str = ser.readline()
weight = float(output_str) * grams_per_pound
weights.append(weight)
except:
weights.append(0.0)
# Log the output
log_output = ''
for w in weights:
log_output +='{:.2f} '.format(w)
rospy.loginfo(log_output)
return weights | Reads weights from each of the load cells. | Below is the the instruction that describes the task:
### Input:
Reads weights from each of the load cells.
### Response:
def _read_weights(self):
"""Reads weights from each of the load cells.
"""
weights = []
grams_per_pound = 453.592
# Read from each of the sensors
for ser in self._serials:
ser.write('W\r')
ser.flush()
time.sleep(0.02)
for ser in self._serials:
try:
output_str = ser.readline()
weight = float(output_str) * grams_per_pound
weights.append(weight)
except:
weights.append(0.0)
# Log the output
log_output = ''
for w in weights:
log_output +='{:.2f} '.format(w)
rospy.loginfo(log_output)
return weights |
def _validate_collections_have_default_values(class_name, property_name, property_descriptor):
"""Validate that if the property is of collection type, it has a specified default value."""
# We don't want properties of collection type having "null" values, since that may cause
# unexpected errors during GraphQL query execution and other operations.
if property_descriptor.type_id in COLLECTION_PROPERTY_TYPES:
if property_descriptor.default is None:
raise IllegalSchemaStateError(u'Class "{}" has a property "{}" of collection type with '
u'no default value.'.format(class_name, property_name)) | Validate that if the property is of collection type, it has a specified default value. | Below is the the instruction that describes the task:
### Input:
Validate that if the property is of collection type, it has a specified default value.
### Response:
def _validate_collections_have_default_values(class_name, property_name, property_descriptor):
"""Validate that if the property is of collection type, it has a specified default value."""
# We don't want properties of collection type having "null" values, since that may cause
# unexpected errors during GraphQL query execution and other operations.
if property_descriptor.type_id in COLLECTION_PROPERTY_TYPES:
if property_descriptor.default is None:
raise IllegalSchemaStateError(u'Class "{}" has a property "{}" of collection type with '
u'no default value.'.format(class_name, property_name)) |
def runContainer(image, **kwargs):
'''Run a docker container using a given image; passing keyword arguments
documented to be accepted by docker's client.containers.run function
No extra side effects. Handles and reraises ContainerError, ImageNotFound,
and APIError exceptions.
'''
container = None
try:
container = client.containers.run(image, **kwargs)
if "name" in kwargs.keys():
print("Container", kwargs["name"], "is now running.")
except ContainerError as exc:
eprint("Failed to run container")
raise exc
except ImageNotFound as exc:
eprint("Failed to find image to run as a docker container")
raise exc
except APIError as exc:
eprint("Unhandled error")
raise exc
return container | Run a docker container using a given image; passing keyword arguments
documented to be accepted by docker's client.containers.run function
No extra side effects. Handles and reraises ContainerError, ImageNotFound,
and APIError exceptions. | Below is the the instruction that describes the task:
### Input:
Run a docker container using a given image; passing keyword arguments
documented to be accepted by docker's client.containers.run function
No extra side effects. Handles and reraises ContainerError, ImageNotFound,
and APIError exceptions.
### Response:
def runContainer(image, **kwargs):
'''Run a docker container using a given image; passing keyword arguments
documented to be accepted by docker's client.containers.run function
No extra side effects. Handles and reraises ContainerError, ImageNotFound,
and APIError exceptions.
'''
container = None
try:
container = client.containers.run(image, **kwargs)
if "name" in kwargs.keys():
print("Container", kwargs["name"], "is now running.")
except ContainerError as exc:
eprint("Failed to run container")
raise exc
except ImageNotFound as exc:
eprint("Failed to find image to run as a docker container")
raise exc
except APIError as exc:
eprint("Unhandled error")
raise exc
return container |
def shutdown(self, skip_hooks=False):
""" Shuts down the process.
`skip_hooks`
Set to ``True`` to skip running task end event plugins.
"""
if not self._exited:
self._exited = True
if not skip_hooks:
self._run_events(shutdown=True)
_shutdown_pipe(self._pipe)
self._task.stop()
raise SystemExit | Shuts down the process.
`skip_hooks`
Set to ``True`` to skip running task end event plugins. | Below is the the instruction that describes the task:
### Input:
Shuts down the process.
`skip_hooks`
Set to ``True`` to skip running task end event plugins.
### Response:
def shutdown(self, skip_hooks=False):
""" Shuts down the process.
`skip_hooks`
Set to ``True`` to skip running task end event plugins.
"""
if not self._exited:
self._exited = True
if not skip_hooks:
self._run_events(shutdown=True)
_shutdown_pipe(self._pipe)
self._task.stop()
raise SystemExit |
def poll(self, conn):
"""Poll DB socket and process async tasks."""
while 1:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
while conn.notifies:
notify = conn.notifies.pop()
self.logger.info(
"Got NOTIFY (pid=%d, payload=%r)",
notify.pid, notify.payload,
)
# read the header and check seq/fin.
hdr, chunk = notify.payload.split('|', 1)
# print('RECEIVE: %s' % hdr)
header = ejson.loads(hdr)
uuid = header['uuid']
size, chunks = self.chunks.setdefault(uuid, [0, {}])
if header['fin']:
size = self.chunks[uuid][0] = header['seq']
# stash the chunk
chunks[header['seq']] = chunk
if len(chunks) != size:
# haven't got all the chunks yet
continue # process next NOTIFY in loop
# got the last chunk -> process it.
data = ''.join(
chunk for _, chunk in sorted(chunks.items())
)
del self.chunks[uuid] # don't forget to cleanup!
data = ejson.loads(data)
sender = data.pop('_sender', None)
tx_id = data.pop('_tx_id', None)
for connection_id in data.pop('_connection_ids'):
try:
websocket = self.connections[connection_id]
except KeyError:
continue # connection not in this process
if connection_id == sender:
websocket.send(data, tx_id=tx_id)
else:
websocket.send(data)
break
elif state == psycopg2.extensions.POLL_WRITE:
gevent.select.select([], [conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
gevent.select.select([conn.fileno()], [], [])
else:
self.logger.warn('POLL_ERR: %s', state) | Poll DB socket and process async tasks. | Below is the the instruction that describes the task:
### Input:
Poll DB socket and process async tasks.
### Response:
def poll(self, conn):
"""Poll DB socket and process async tasks."""
while 1:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
while conn.notifies:
notify = conn.notifies.pop()
self.logger.info(
"Got NOTIFY (pid=%d, payload=%r)",
notify.pid, notify.payload,
)
# read the header and check seq/fin.
hdr, chunk = notify.payload.split('|', 1)
# print('RECEIVE: %s' % hdr)
header = ejson.loads(hdr)
uuid = header['uuid']
size, chunks = self.chunks.setdefault(uuid, [0, {}])
if header['fin']:
size = self.chunks[uuid][0] = header['seq']
# stash the chunk
chunks[header['seq']] = chunk
if len(chunks) != size:
# haven't got all the chunks yet
continue # process next NOTIFY in loop
# got the last chunk -> process it.
data = ''.join(
chunk for _, chunk in sorted(chunks.items())
)
del self.chunks[uuid] # don't forget to cleanup!
data = ejson.loads(data)
sender = data.pop('_sender', None)
tx_id = data.pop('_tx_id', None)
for connection_id in data.pop('_connection_ids'):
try:
websocket = self.connections[connection_id]
except KeyError:
continue # connection not in this process
if connection_id == sender:
websocket.send(data, tx_id=tx_id)
else:
websocket.send(data)
break
elif state == psycopg2.extensions.POLL_WRITE:
gevent.select.select([], [conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
gevent.select.select([conn.fileno()], [], [])
else:
self.logger.warn('POLL_ERR: %s', state) |
def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authenticated.
:param authorization: Authentication token details
:type authorization: dict | Below is the the instruction that describes the task:
### Input:
Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
### Response:
def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release() |
def connect(self, host, port):
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# after another thread/greenlet/context has read and processed the
# recv_start frame. Without more re-write to add_sync_cb though,
# it will block on reading responses that will never arrive
# because the protocol header isn't written yet. TBD if needs
# refactoring. Could encapsulate entirely here, wherein
# read_frames exits if protocol header not yet written. Like other
# synchronous behaviors, adding this callback will result in a
# blocking frame read and process loop until _recv_start and any
# subsequent synchronous callbacks have been processed. In the
# event that this is /not/ a synchronous transport, but the
# caller wants the connect to be synchronous so as to ensure that
# the connection is ready, then do a read frame loop here.
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames() | Connect to a host and port. | Below is the the instruction that describes the task:
### Input:
Connect to a host and port.
### Response:
def connect(self, host, port):
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# after another thread/greenlet/context has read and processed the
# recv_start frame. Without more re-write to add_sync_cb though,
# it will block on reading responses that will never arrive
# because the protocol header isn't written yet. TBD if needs
# refactoring. Could encapsulate entirely here, wherein
# read_frames exits if protocol header not yet written. Like other
# synchronous behaviors, adding this callback will result in a
# blocking frame read and process loop until _recv_start and any
# subsequent synchronous callbacks have been processed. In the
# event that this is /not/ a synchronous transport, but the
# caller wants the connect to be synchronous so as to ensure that
# the connection is ready, then do a read frame loop here.
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames() |
def rmi(self, force=False, via_name=False):
"""
remove this image
:param force: bool, force removal of the image
:param via_name: bool, refer to the image via name, if false, refer via ID
:return: None
"""
identifier = self.get_full_name() if via_name else (self._id or self.get_id())
cmdline = ["podman", "rmi", identifier, "--force" if force else ""]
run_cmd(cmdline) | remove this image
:param force: bool, force removal of the image
:param via_name: bool, refer to the image via name, if false, refer via ID
:return: None | Below is the the instruction that describes the task:
### Input:
remove this image
:param force: bool, force removal of the image
:param via_name: bool, refer to the image via name, if false, refer via ID
:return: None
### Response:
def rmi(self, force=False, via_name=False):
"""
remove this image
:param force: bool, force removal of the image
:param via_name: bool, refer to the image via name, if false, refer via ID
:return: None
"""
identifier = self.get_full_name() if via_name else (self._id or self.get_id())
cmdline = ["podman", "rmi", identifier, "--force" if force else ""]
run_cmd(cmdline) |
def add_websocket_route(
self,
handler,
uri,
host=None,
strict_slashes=None,
subprotocols=None,
name=None,
):
"""
A helper method to register a function as a websocket route.
:param handler: a callable function or instance of a class
that can handle the websocket request
:param host: Host IP or FQDN details
:param uri: URL path that will be mapped to the websocket
handler
:param strict_slashes: If the API endpoint needs to terminate
with a "/" or not
:param subprotocols: Subprotocols to be used with websocket
handshake
:param name: A unique name assigned to the URL so that it can
be used with :func:`url_for`
:return: Objected decorated by :func:`websocket`
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
return self.websocket(
uri,
host=host,
strict_slashes=strict_slashes,
subprotocols=subprotocols,
name=name,
)(handler) | A helper method to register a function as a websocket route.
:param handler: a callable function or instance of a class
that can handle the websocket request
:param host: Host IP or FQDN details
:param uri: URL path that will be mapped to the websocket
handler
:param strict_slashes: If the API endpoint needs to terminate
with a "/" or not
:param subprotocols: Subprotocols to be used with websocket
handshake
:param name: A unique name assigned to the URL so that it can
be used with :func:`url_for`
:return: Objected decorated by :func:`websocket` | Below is the the instruction that describes the task:
### Input:
A helper method to register a function as a websocket route.
:param handler: a callable function or instance of a class
that can handle the websocket request
:param host: Host IP or FQDN details
:param uri: URL path that will be mapped to the websocket
handler
:param strict_slashes: If the API endpoint needs to terminate
with a "/" or not
:param subprotocols: Subprotocols to be used with websocket
handshake
:param name: A unique name assigned to the URL so that it can
be used with :func:`url_for`
:return: Objected decorated by :func:`websocket`
### Response:
def add_websocket_route(
self,
handler,
uri,
host=None,
strict_slashes=None,
subprotocols=None,
name=None,
):
"""
A helper method to register a function as a websocket route.
:param handler: a callable function or instance of a class
that can handle the websocket request
:param host: Host IP or FQDN details
:param uri: URL path that will be mapped to the websocket
handler
:param strict_slashes: If the API endpoint needs to terminate
with a "/" or not
:param subprotocols: Subprotocols to be used with websocket
handshake
:param name: A unique name assigned to the URL so that it can
be used with :func:`url_for`
:return: Objected decorated by :func:`websocket`
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
return self.websocket(
uri,
host=host,
strict_slashes=strict_slashes,
subprotocols=subprotocols,
name=name,
)(handler) |
def get_xml_text(source, path=None):
"""Get the text of the XML node. If path is not None, it will get the text of the descendant of
source indicated by path.
"""
if path is None:
return source.text
else:
return get_xml_text(get_xml_child(source, path)) | Get the text of the XML node. If path is not None, it will get the text of the descendant of
source indicated by path. | Below is the the instruction that describes the task:
### Input:
Get the text of the XML node. If path is not None, it will get the text of the descendant of
source indicated by path.
### Response:
def get_xml_text(source, path=None):
"""Get the text of the XML node. If path is not None, it will get the text of the descendant of
source indicated by path.
"""
if path is None:
return source.text
else:
return get_xml_text(get_xml_child(source, path)) |
def get_multi(cls, blob_keys, **ctx_options):
"""Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None.
"""
futs = cls.get_multi_async(blob_keys, **ctx_options)
return [fut.get_result() for fut in futs] | Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None. | Below is the the instruction that describes the task:
### Input:
Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None.
### Response:
def get_multi(cls, blob_keys, **ctx_options):
"""Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None.
"""
futs = cls.get_multi_async(blob_keys, **ctx_options)
return [fut.get_result() for fut in futs] |
def default_change_form_template(self):
"""
Determine what the actual `change_form_template` should be.
"""
opts = self.model._meta
app_label = opts.app_label
return select_template_name((
"admin/{0}/{1}/change_form.html".format(app_label, opts.object_name.lower()),
"admin/{0}/change_form.html".format(app_label),
"admin/change_form.html"
)) | Determine what the actual `change_form_template` should be. | Below is the the instruction that describes the task:
### Input:
Determine what the actual `change_form_template` should be.
### Response:
def default_change_form_template(self):
"""
Determine what the actual `change_form_template` should be.
"""
opts = self.model._meta
app_label = opts.app_label
return select_template_name((
"admin/{0}/{1}/change_form.html".format(app_label, opts.object_name.lower()),
"admin/{0}/change_form.html".format(app_label),
"admin/change_form.html"
)) |
def makeReplacementField(formatSpec, altFormatSpec='', testValue=None):
""" Prepends a colon and wraps the formatSpec in curly braces to yield a replacement field.
The format specification is part of a replacement field, which can be used in new-style
string formatting. See:
https://docs.python.org/3/library/string.html#format-string-syntax
https://docs.python.org/3/library/string.html#format-specification-mini-language
If the formatSpec does not contain a a color or exclamation mark, a colon is prepended.
If the formatSpec starts and end in quotes (single or double) only the quotes are removed,
no curly braces or colon charactes are added. This allows users to define a format spec.
:param formatSpec: e.g. '5.2f' will return '{:5.2f}'
:param altFormatSpec: alternative that will be used if the formatSpec evaluates to False
:param testValue: if not None, result.format(testValue) will be evaluated as a test.
:return: string
"""
check_is_a_string(formatSpec)
check_is_a_string(altFormatSpec)
fmt = altFormatSpec if not formatSpec else formatSpec
if is_quoted(fmt):
fmt = fmt[1:-1] # remove quotes
else:
if fmt and ':' not in fmt and '!' not in fmt:
fmt = ':' + fmt
fmt = '{' + fmt + '}'
# Test resulting replacement field
if testValue is not None:
try:
_dummy = fmt.format(testValue)
except Exception:
msg = ("Format specifier failed: replacement-field={!r}, test-value={!r}"
.format(fmt, testValue))
logger.error(msg)
raise ValueError(msg)
logger.debug("Resulting replacement field: {!r}".format(fmt))
return fmt | Prepends a colon and wraps the formatSpec in curly braces to yield a replacement field.
The format specification is part of a replacement field, which can be used in new-style
string formatting. See:
https://docs.python.org/3/library/string.html#format-string-syntax
https://docs.python.org/3/library/string.html#format-specification-mini-language
If the formatSpec does not contain a a color or exclamation mark, a colon is prepended.
If the formatSpec starts and end in quotes (single or double) only the quotes are removed,
no curly braces or colon charactes are added. This allows users to define a format spec.
:param formatSpec: e.g. '5.2f' will return '{:5.2f}'
:param altFormatSpec: alternative that will be used if the formatSpec evaluates to False
:param testValue: if not None, result.format(testValue) will be evaluated as a test.
:return: string | Below is the the instruction that describes the task:
### Input:
Prepends a colon and wraps the formatSpec in curly braces to yield a replacement field.
The format specification is part of a replacement field, which can be used in new-style
string formatting. See:
https://docs.python.org/3/library/string.html#format-string-syntax
https://docs.python.org/3/library/string.html#format-specification-mini-language
If the formatSpec does not contain a a color or exclamation mark, a colon is prepended.
If the formatSpec starts and end in quotes (single or double) only the quotes are removed,
no curly braces or colon charactes are added. This allows users to define a format spec.
:param formatSpec: e.g. '5.2f' will return '{:5.2f}'
:param altFormatSpec: alternative that will be used if the formatSpec evaluates to False
:param testValue: if not None, result.format(testValue) will be evaluated as a test.
:return: string
### Response:
def makeReplacementField(formatSpec, altFormatSpec='', testValue=None):
""" Prepends a colon and wraps the formatSpec in curly braces to yield a replacement field.
The format specification is part of a replacement field, which can be used in new-style
string formatting. See:
https://docs.python.org/3/library/string.html#format-string-syntax
https://docs.python.org/3/library/string.html#format-specification-mini-language
If the formatSpec does not contain a a color or exclamation mark, a colon is prepended.
If the formatSpec starts and end in quotes (single or double) only the quotes are removed,
no curly braces or colon charactes are added. This allows users to define a format spec.
:param formatSpec: e.g. '5.2f' will return '{:5.2f}'
:param altFormatSpec: alternative that will be used if the formatSpec evaluates to False
:param testValue: if not None, result.format(testValue) will be evaluated as a test.
:return: string
"""
check_is_a_string(formatSpec)
check_is_a_string(altFormatSpec)
fmt = altFormatSpec if not formatSpec else formatSpec
if is_quoted(fmt):
fmt = fmt[1:-1] # remove quotes
else:
if fmt and ':' not in fmt and '!' not in fmt:
fmt = ':' + fmt
fmt = '{' + fmt + '}'
# Test resulting replacement field
if testValue is not None:
try:
_dummy = fmt.format(testValue)
except Exception:
msg = ("Format specifier failed: replacement-field={!r}, test-value={!r}"
.format(fmt, testValue))
logger.error(msg)
raise ValueError(msg)
logger.debug("Resulting replacement field: {!r}".format(fmt))
return fmt |
def curve_fit(self):
"""
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
"""
if not hasattr(self,'_curve_fit'):
options = self.options.copy()
fit_function = options.pop('fit_function')
independent_values = self.data.array[0]
dependent_values = self.data.array[1]
if fit_function == 'lmfit':
self._curve_fit = lmfit.minimize(
self.lmfit_fcn2min, self.lmfit_parameters,
args=(independent_values, dependent_values, self.data.error), **options)
else:
p0 = [ prefix_factor(param) * param['guess'] for param in self.fitting_parameters ]
self._curve_fit = fit_function(
self.function, independent_values, dependent_values, p0, **options)
return self._curve_fit | Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm. | Below is the the instruction that describes the task:
### Input:
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
### Response:
def curve_fit(self):
"""
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
"""
if not hasattr(self,'_curve_fit'):
options = self.options.copy()
fit_function = options.pop('fit_function')
independent_values = self.data.array[0]
dependent_values = self.data.array[1]
if fit_function == 'lmfit':
self._curve_fit = lmfit.minimize(
self.lmfit_fcn2min, self.lmfit_parameters,
args=(independent_values, dependent_values, self.data.error), **options)
else:
p0 = [ prefix_factor(param) * param['guess'] for param in self.fitting_parameters ]
self._curve_fit = fit_function(
self.function, independent_values, dependent_values, p0, **options)
return self._curve_fit |
def remove_from_watched(self, watched):
"""
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/repos/" + watched._identity + "/subscription"
) | :calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None | Below is the the instruction that describes the task:
### Input:
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
### Response:
def remove_from_watched(self, watched):
"""
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/repos/" + watched._identity + "/subscription"
) |
def write_and_get_hash(path, write_to_file, hash=None):
""" write_and_get_hash: write file
Args: None
Returns: Hash of file's contents
"""
hash = hash or hashlib.md5()
try:
# Access path
r = config.DOWNLOAD_SESSION.get(path, stream=True)
r.raise_for_status()
for chunk in r:
write_to_file.write(chunk)
hash.update(chunk)
except (MissingSchema, InvalidSchema):
# If path is a local file path, try to open the file (generate hash if none provided)
with open(path, 'rb') as fobj:
for chunk in iter(lambda: fobj.read(2097152), b""):
write_to_file.write(chunk)
hash.update(chunk)
assert write_to_file.tell() > 0, "File failed to write (corrupted)."
return hash | write_and_get_hash: write file
Args: None
Returns: Hash of file's contents | Below is the the instruction that describes the task:
### Input:
write_and_get_hash: write file
Args: None
Returns: Hash of file's contents
### Response:
def write_and_get_hash(path, write_to_file, hash=None):
""" write_and_get_hash: write file
Args: None
Returns: Hash of file's contents
"""
hash = hash or hashlib.md5()
try:
# Access path
r = config.DOWNLOAD_SESSION.get(path, stream=True)
r.raise_for_status()
for chunk in r:
write_to_file.write(chunk)
hash.update(chunk)
except (MissingSchema, InvalidSchema):
# If path is a local file path, try to open the file (generate hash if none provided)
with open(path, 'rb') as fobj:
for chunk in iter(lambda: fobj.read(2097152), b""):
write_to_file.write(chunk)
hash.update(chunk)
assert write_to_file.tell() > 0, "File failed to write (corrupted)."
return hash |
def load_formatter_fn(formatter):
'''
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
'''
obj = util.load_object(formatter)
if not hasattr(obj, 'ispartial'):
obj.ispartial = util.ispartial
return obj | >>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...> | Below is the the instruction that describes the task:
### Input:
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
### Response:
def load_formatter_fn(formatter):
'''
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
'''
obj = util.load_object(formatter)
if not hasattr(obj, 'ispartial'):
obj.ispartial = util.ispartial
return obj |
def set_state_process(self, context, process):
"""Method to append process for a context in the IF state.
:param context: It can be a layer purpose or a section (impact
function, post processor).
:type context: str, unicode
:param process: A text explain the process.
:type process: str, unicode
"""
LOGGER.info('%s: %s' % (context, process))
self.state[context]["process"].append(process) | Method to append process for a context in the IF state.
:param context: It can be a layer purpose or a section (impact
function, post processor).
:type context: str, unicode
:param process: A text explain the process.
:type process: str, unicode | Below is the the instruction that describes the task:
### Input:
Method to append process for a context in the IF state.
:param context: It can be a layer purpose or a section (impact
function, post processor).
:type context: str, unicode
:param process: A text explain the process.
:type process: str, unicode
### Response:
def set_state_process(self, context, process):
"""Method to append process for a context in the IF state.
:param context: It can be a layer purpose or a section (impact
function, post processor).
:type context: str, unicode
:param process: A text explain the process.
:type process: str, unicode
"""
LOGGER.info('%s: %s' % (context, process))
self.state[context]["process"].append(process) |
def finalize(self):
"""This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily.
"""
for i in range(1, len(self._local_counts)):
self.counts[i].append(self._local_counts[i])
self.counts.pop(0)
for i in range(len(self.counts)):
self.counts[i] = np.array(self.counts[i]) | This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily. | Below is the the instruction that describes the task:
### Input:
This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily.
### Response:
def finalize(self):
"""This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily.
"""
for i in range(1, len(self._local_counts)):
self.counts[i].append(self._local_counts[i])
self.counts.pop(0)
for i in range(len(self.counts)):
self.counts[i] = np.array(self.counts[i]) |
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset):
'''
This sets a VXR to be the first and last VXR in the VDR
'''
# VDR's VXRhead
self._update_offset_value(f, vdr_offset+28, 8, VXRoffset)
# VDR's VXRtail
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset) | This sets a VXR to be the first and last VXR in the VDR | Below is the the instruction that describes the task:
### Input:
This sets a VXR to be the first and last VXR in the VDR
### Response:
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset):
'''
This sets a VXR to be the first and last VXR in the VDR
'''
# VDR's VXRhead
self._update_offset_value(f, vdr_offset+28, 8, VXRoffset)
# VDR's VXRtail
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset) |
def to_dict(self) -> OrderedDict:
"""Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict
"""
result = OrderedDict()
result["adaptive"] = self._adaptive
result["binning_type"] = type(self).__name__
self._update_dict(result)
return result | Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict | Below is the the instruction that describes the task:
### Input:
Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict
### Response:
def to_dict(self) -> OrderedDict:
"""Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict
"""
result = OrderedDict()
result["adaptive"] = self._adaptive
result["binning_type"] = type(self).__name__
self._update_dict(result)
return result |
def math_func(f):
"""
Statics the methods. wut.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if len(args) > 0:
return_type = type(args[0])
if kwargs.has_key('return_type'):
return_type = kwargs['return_type']
kwargs.pop('return_type')
return return_type(f(*args, **kwargs))
args = list((setify(x) for x in args))
return return_type(f(*args, **kwargs))
return wrapper | Statics the methods. wut. | Below is the the instruction that describes the task:
### Input:
Statics the methods. wut.
### Response:
def math_func(f):
"""
Statics the methods. wut.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if len(args) > 0:
return_type = type(args[0])
if kwargs.has_key('return_type'):
return_type = kwargs['return_type']
kwargs.pop('return_type')
return return_type(f(*args, **kwargs))
args = list((setify(x) for x in args))
return return_type(f(*args, **kwargs))
return wrapper |
def ind2sub(ind, dimensions):
"""
Calculates subscripts for indices into regularly spaced matrixes.
"""
# check that the index is within range
if ind >= np.prod(dimensions):
raise RuntimeError("ind2sub: index exceeds array size")
cum_dims = list(dimensions)
cum_dims.reverse()
m = 1
mult = []
for d in cum_dims:
m = m*d
mult.append(m)
mult.pop()
mult.reverse()
mult.append(1)
indices = []
for d in mult:
indices.append((ind/d)+1)
ind = ind - (ind/d)*d
return indices | Calculates subscripts for indices into regularly spaced matrixes. | Below is the the instruction that describes the task:
### Input:
Calculates subscripts for indices into regularly spaced matrixes.
### Response:
def ind2sub(ind, dimensions):
"""
Calculates subscripts for indices into regularly spaced matrixes.
"""
# check that the index is within range
if ind >= np.prod(dimensions):
raise RuntimeError("ind2sub: index exceeds array size")
cum_dims = list(dimensions)
cum_dims.reverse()
m = 1
mult = []
for d in cum_dims:
m = m*d
mult.append(m)
mult.pop()
mult.reverse()
mult.append(1)
indices = []
for d in mult:
indices.append((ind/d)+1)
ind = ind - (ind/d)*d
return indices |
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None):
"""Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)"""
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" })
if not extraattribs:
extraattribs = []
extraattribs.append(E.optional(E.attribute(E.text(), name='set')) )
return AbstractElement.relaxng(includechildren, extraattribs, extraelements, cls) | Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string) | Below is the the instruction that describes the task:
### Input:
Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)
### Response:
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None):
"""Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)"""
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" })
if not extraattribs:
extraattribs = []
extraattribs.append(E.optional(E.attribute(E.text(), name='set')) )
return AbstractElement.relaxng(includechildren, extraattribs, extraelements, cls) |
def update_ethernet_settings(self, configuration, force=False, timeout=-1):
"""
Updates the Ethernet interconnect settings for the logical interconnect.
Args:
configuration: Ethernet interconnect settings.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = "{}/ethernetSettings".format(self.data["uri"])
return self._helper.update(configuration, uri=uri, force=force, timeout=timeout) | Updates the Ethernet interconnect settings for the logical interconnect.
Args:
configuration: Ethernet interconnect settings.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect. | Below is the the instruction that describes the task:
### Input:
Updates the Ethernet interconnect settings for the logical interconnect.
Args:
configuration: Ethernet interconnect settings.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
### Response:
def update_ethernet_settings(self, configuration, force=False, timeout=-1):
"""
Updates the Ethernet interconnect settings for the logical interconnect.
Args:
configuration: Ethernet interconnect settings.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = "{}/ethernetSettings".format(self.data["uri"])
return self._helper.update(configuration, uri=uri, force=force, timeout=timeout) |
def offlineTable(self, login, tableName):
"""
Parameters:
- login
- tableName
"""
self.send_offlineTable(login, tableName)
self.recv_offlineTable() | Parameters:
- login
- tableName | Below is the the instruction that describes the task:
### Input:
Parameters:
- login
- tableName
### Response:
def offlineTable(self, login, tableName):
"""
Parameters:
- login
- tableName
"""
self.send_offlineTable(login, tableName)
self.recv_offlineTable() |
def setSubTitle(self, title):
"""
Sets the sub-title for this page to the inputed title.
:param title | <str>
"""
self._subTitleLabel.setText(title)
self._subTitleLabel.adjustSize()
self.adjustMargins() | Sets the sub-title for this page to the inputed title.
:param title | <str> | Below is the the instruction that describes the task:
### Input:
Sets the sub-title for this page to the inputed title.
:param title | <str>
### Response:
def setSubTitle(self, title):
"""
Sets the sub-title for this page to the inputed title.
:param title | <str>
"""
self._subTitleLabel.setText(title)
self._subTitleLabel.adjustSize()
self.adjustMargins() |
def _status_apf():
'''
Return True if apf is running otherwise return False
'''
status = 0
table = iptc.Table(iptc.Table.FILTER)
for chain in table.chains:
if 'sanity' in chain.name.lower():
status = 1
return True if status else False | Return True if apf is running otherwise return False | Below is the the instruction that describes the task:
### Input:
Return True if apf is running otherwise return False
### Response:
def _status_apf():
'''
Return True if apf is running otherwise return False
'''
status = 0
table = iptc.Table(iptc.Table.FILTER)
for chain in table.chains:
if 'sanity' in chain.name.lower():
status = 1
return True if status else False |
def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
"""
assert_is_type(breaks, [numeric])
if self.ncols != 1: raise H2OValueError("Single-column frame is expected")
if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected")
fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab),
cache=self._ex._cache)
fr._ex._cache.types = {k: "enum" for k in self.names}
return fr | Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data. | Below is the the instruction that describes the task:
### Input:
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
### Response:
def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
"""
assert_is_type(breaks, [numeric])
if self.ncols != 1: raise H2OValueError("Single-column frame is expected")
if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected")
fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab),
cache=self._ex._cache)
fr._ex._cache.types = {k: "enum" for k in self.names}
return fr |
def select(self, *properties, **aliased_properties):
"""Specify which properties of the dataset must be returned
Property extraction is based on `JMESPath <http://jmespath.org>`_ expressions.
This method returns a new Dataset narrowed down by the given selection.
:param properties: JMESPath to use for the property extraction.
The JMESPath string will be used as a key in the output dictionary.
:param aliased_properties: Same as properties, but the output dictionary will contain
the parameter name instead of the JMESPath string.
"""
if not (properties or aliased_properties):
return self
merged_properties = dict(zip(properties, properties))
merged_properties.update(aliased_properties)
for prop_name in (merged_properties.keys()):
if prop_name in self.selection:
raise Exception('The property {} has already been selected'.format(prop_name))
new_selection = self.selection.copy()
new_selection.update(merged_properties)
return self._copy(selection=new_selection) | Specify which properties of the dataset must be returned
Property extraction is based on `JMESPath <http://jmespath.org>`_ expressions.
This method returns a new Dataset narrowed down by the given selection.
:param properties: JMESPath to use for the property extraction.
The JMESPath string will be used as a key in the output dictionary.
:param aliased_properties: Same as properties, but the output dictionary will contain
the parameter name instead of the JMESPath string. | Below is the the instruction that describes the task:
### Input:
Specify which properties of the dataset must be returned
Property extraction is based on `JMESPath <http://jmespath.org>`_ expressions.
This method returns a new Dataset narrowed down by the given selection.
:param properties: JMESPath to use for the property extraction.
The JMESPath string will be used as a key in the output dictionary.
:param aliased_properties: Same as properties, but the output dictionary will contain
the parameter name instead of the JMESPath string.
### Response:
def select(self, *properties, **aliased_properties):
"""Specify which properties of the dataset must be returned
Property extraction is based on `JMESPath <http://jmespath.org>`_ expressions.
This method returns a new Dataset narrowed down by the given selection.
:param properties: JMESPath to use for the property extraction.
The JMESPath string will be used as a key in the output dictionary.
:param aliased_properties: Same as properties, but the output dictionary will contain
the parameter name instead of the JMESPath string.
"""
if not (properties or aliased_properties):
return self
merged_properties = dict(zip(properties, properties))
merged_properties.update(aliased_properties)
for prop_name in (merged_properties.keys()):
if prop_name in self.selection:
raise Exception('The property {} has already been selected'.format(prop_name))
new_selection = self.selection.copy()
new_selection.update(merged_properties)
return self._copy(selection=new_selection) |
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager) | Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object. | Below is the the instruction that describes the task:
### Input:
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
### Response:
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager) |
def token(self):
"""
Token given by Transbank for payment initialization url.
Will raise PaymentError when an error ocurred.
"""
if not self._token:
self._token = self.fetch_token()
logger.payment(self)
return self._token | Token given by Transbank for payment initialization url.
Will raise PaymentError when an error ocurred. | Below is the the instruction that describes the task:
### Input:
Token given by Transbank for payment initialization url.
Will raise PaymentError when an error ocurred.
### Response:
def token(self):
"""
Token given by Transbank for payment initialization url.
Will raise PaymentError when an error ocurred.
"""
if not self._token:
self._token = self.fetch_token()
logger.payment(self)
return self._token |
def match_concept(self,string):
'''Find all matches in this :class:`Bottle` for ``string`` and return the best match'''
matches = self.match_all_concepts(string)
if len(matches)>0:
return matches[0]
return None | Find all matches in this :class:`Bottle` for ``string`` and return the best match | Below is the the instruction that describes the task:
### Input:
Find all matches in this :class:`Bottle` for ``string`` and return the best match
### Response:
def match_concept(self,string):
'''Find all matches in this :class:`Bottle` for ``string`` and return the best match'''
matches = self.match_all_concepts(string)
if len(matches)>0:
return matches[0]
return None |
def _memoizeArgsOnly (max_cache_size=1000):
"""Python 2.4 compatible memoize decorator.
It creates a cache that has a maximum size. If the cache exceeds the max,
it is thrown out and a new one made. With such behavior, it is wise to set
the cache just a little larger that the maximum expected need.
Parameters:
max_cache_size - the size to which a cache can grow
Limitations:
The cache works only on args, not kwargs
"""
def wrapper (f):
def fn (*args):
try:
return fn.cache[args]
except KeyError:
if fn.count >= max_cache_size:
fn.cache = {}
fn.count = 0
fn.cache[args] = result = f(*args)
fn.count += 1
return result
fn.cache = {}
fn.count = 0
return fn
return wrapper | Python 2.4 compatible memoize decorator.
It creates a cache that has a maximum size. If the cache exceeds the max,
it is thrown out and a new one made. With such behavior, it is wise to set
the cache just a little larger that the maximum expected need.
Parameters:
max_cache_size - the size to which a cache can grow
Limitations:
The cache works only on args, not kwargs | Below is the the instruction that describes the task:
### Input:
Python 2.4 compatible memoize decorator.
It creates a cache that has a maximum size. If the cache exceeds the max,
it is thrown out and a new one made. With such behavior, it is wise to set
the cache just a little larger that the maximum expected need.
Parameters:
max_cache_size - the size to which a cache can grow
Limitations:
The cache works only on args, not kwargs
### Response:
def _memoizeArgsOnly (max_cache_size=1000):
"""Python 2.4 compatible memoize decorator.
It creates a cache that has a maximum size. If the cache exceeds the max,
it is thrown out and a new one made. With such behavior, it is wise to set
the cache just a little larger that the maximum expected need.
Parameters:
max_cache_size - the size to which a cache can grow
Limitations:
The cache works only on args, not kwargs
"""
def wrapper (f):
def fn (*args):
try:
return fn.cache[args]
except KeyError:
if fn.count >= max_cache_size:
fn.cache = {}
fn.count = 0
fn.cache[args] = result = f(*args)
fn.count += 1
return result
fn.cache = {}
fn.count = 0
return fn
return wrapper |
def write(self, filename=None):
"""Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
"""
if not filename:
filename = self.filename
# update time
self.properties.CurrentDate = _current_time()
# set rubber band to true
self.properties.EnableRubberBand = 'true'
# update start position
self.update_start_position()
# update well postions
self.update_well_positions()
# update counts
self.update_counts()
# remove py:pytype attributes
objectify.deannotate(self.root)
# remove namespaces added by lxml
for child in self.root.iterchildren():
etree.cleanup_namespaces(child)
xml = etree.tostring(self.root, encoding='utf8',
xml_declaration=True, pretty_print=True)
# fix format quirks
# add carriage return character
xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines())
# add space at "end/>" --> "end />"
xml = re.sub(r'(["a-z])/>', r'\1 />', xml)
xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"')
with open(filename, 'wb') as f:
f.write(xml.encode('utf8')) | Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename. | Below is the the instruction that describes the task:
### Input:
Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
### Response:
def write(self, filename=None):
"""Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
"""
if not filename:
filename = self.filename
# update time
self.properties.CurrentDate = _current_time()
# set rubber band to true
self.properties.EnableRubberBand = 'true'
# update start position
self.update_start_position()
# update well postions
self.update_well_positions()
# update counts
self.update_counts()
# remove py:pytype attributes
objectify.deannotate(self.root)
# remove namespaces added by lxml
for child in self.root.iterchildren():
etree.cleanup_namespaces(child)
xml = etree.tostring(self.root, encoding='utf8',
xml_declaration=True, pretty_print=True)
# fix format quirks
# add carriage return character
xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines())
# add space at "end/>" --> "end />"
xml = re.sub(r'(["a-z])/>', r'\1 />', xml)
xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"')
with open(filename, 'wb') as f:
f.write(xml.encode('utf8')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.