code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_appointment_group(self, appointment_group):
"""
Return single Appointment Group by id
:calls: `GET /api/v1/appointment_groups/:id \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_
:param appointment_group: The ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
response = self.__requester.request(
'GET',
'appointment_groups/{}'.format(appointment_group_id)
)
return AppointmentGroup(self.__requester, response.json()) | Return single Appointment Group by id
:calls: `GET /api/v1/appointment_groups/:id \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_
:param appointment_group: The ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup` | Below is the the instruction that describes the task:
### Input:
Return single Appointment Group by id
:calls: `GET /api/v1/appointment_groups/:id \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_
:param appointment_group: The ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
### Response:
def get_appointment_group(self, appointment_group):
"""
Return single Appointment Group by id
:calls: `GET /api/v1/appointment_groups/:id \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_
:param appointment_group: The ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
response = self.__requester.request(
'GET',
'appointment_groups/{}'.format(appointment_group_id)
)
return AppointmentGroup(self.__requester, response.json()) |
def edge_detect(image, size):
"""
Applies a Sobel filter to the given image.
:param image: An image as a list of (R,G,B) values
:param size: The size of the image as a tuple (width, height)
:return: An array of the Sobel gradient value of each image pixel
This value roughly corresponds to how much of an "edge" a pixel is.
"""
# TODO get edge data for boundaries
width, height = size
edge_data = [0] * len(image)
gray_scale_img = list(map(luma, image))
for y in range(1, height - 1):
for x in range(1, width - 1):
idx = coords_to_index((x, y), width)
a, b, c = gray_scale_img[idx - 1 - width: idx + 2 - width]
d, e, f = gray_scale_img[idx - 1: idx + 2]
g, h, i = gray_scale_img[idx - 1 + width: idx + 2 + width]
g_x = -a - 2 * d - d + c + 2 * f + i
g_y = -a - 2 * b - c + g + 2 * h + i
g = sqrt(g_x * g_x + g_y * g_y)
edge_data[idx] = g
if idx % 200000 == 0:
logger.info("Edge detection done for %d / %d pixels... (%2.2f%%)" %
(idx, width * height, 100 * idx / float(width * height)))
return edge_data | Applies a Sobel filter to the given image.
:param image: An image as a list of (R,G,B) values
:param size: The size of the image as a tuple (width, height)
:return: An array of the Sobel gradient value of each image pixel
This value roughly corresponds to how much of an "edge" a pixel is. | Below is the the instruction that describes the task:
### Input:
Applies a Sobel filter to the given image.
:param image: An image as a list of (R,G,B) values
:param size: The size of the image as a tuple (width, height)
:return: An array of the Sobel gradient value of each image pixel
This value roughly corresponds to how much of an "edge" a pixel is.
### Response:
def edge_detect(image, size):
"""
Applies a Sobel filter to the given image.
:param image: An image as a list of (R,G,B) values
:param size: The size of the image as a tuple (width, height)
:return: An array of the Sobel gradient value of each image pixel
This value roughly corresponds to how much of an "edge" a pixel is.
"""
# TODO get edge data for boundaries
width, height = size
edge_data = [0] * len(image)
gray_scale_img = list(map(luma, image))
for y in range(1, height - 1):
for x in range(1, width - 1):
idx = coords_to_index((x, y), width)
a, b, c = gray_scale_img[idx - 1 - width: idx + 2 - width]
d, e, f = gray_scale_img[idx - 1: idx + 2]
g, h, i = gray_scale_img[idx - 1 + width: idx + 2 + width]
g_x = -a - 2 * d - d + c + 2 * f + i
g_y = -a - 2 * b - c + g + 2 * h + i
g = sqrt(g_x * g_x + g_y * g_y)
edge_data[idx] = g
if idx % 200000 == 0:
logger.info("Edge detection done for %d / %d pixels... (%2.2f%%)" %
(idx, width * height, 100 * idx / float(width * height)))
return edge_data |
def tag(self):
"""
Get the string representation of the tag used to annotate the status in VOSpace.
@return: str
"""
return "{}{}_{}{:02d}".format(self.target.prefix,
self,
self.target.version,
self.target.ccd) | Get the string representation of the tag used to annotate the status in VOSpace.
@return: str | Below is the the instruction that describes the task:
### Input:
Get the string representation of the tag used to annotate the status in VOSpace.
@return: str
### Response:
def tag(self):
"""
Get the string representation of the tag used to annotate the status in VOSpace.
@return: str
"""
return "{}{}_{}{:02d}".format(self.target.prefix,
self,
self.target.version,
self.target.ccd) |
async def _get_cdn_client(self, cdn_redirect):
"""Similar to ._borrow_exported_client, but for CDNs"""
# TODO Implement
raise NotImplementedError
session = self._exported_sessions.get(cdn_redirect.dc_id)
if not session:
dc = await self._get_dc(cdn_redirect.dc_id, cdn=True)
session = self.session.clone()
await session.set_dc(dc.id, dc.ip_address, dc.port)
self._exported_sessions[cdn_redirect.dc_id] = session
self._log[__name__].info('Creating new CDN client')
client = TelegramBareClient(
session, self.api_id, self.api_hash,
proxy=self._sender.connection.conn.proxy,
timeout=self._sender.connection.get_timeout()
)
# This will make use of the new RSA keys for this specific CDN.
#
# We won't be calling GetConfigRequest because it's only called
# when needed by ._get_dc, and also it's static so it's likely
# set already. Avoid invoking non-CDN methods by not syncing updates.
client.connect(_sync_updates=False)
return client | Similar to ._borrow_exported_client, but for CDNs | Below is the the instruction that describes the task:
### Input:
Similar to ._borrow_exported_client, but for CDNs
### Response:
async def _get_cdn_client(self, cdn_redirect):
"""Similar to ._borrow_exported_client, but for CDNs"""
# TODO Implement
raise NotImplementedError
session = self._exported_sessions.get(cdn_redirect.dc_id)
if not session:
dc = await self._get_dc(cdn_redirect.dc_id, cdn=True)
session = self.session.clone()
await session.set_dc(dc.id, dc.ip_address, dc.port)
self._exported_sessions[cdn_redirect.dc_id] = session
self._log[__name__].info('Creating new CDN client')
client = TelegramBareClient(
session, self.api_id, self.api_hash,
proxy=self._sender.connection.conn.proxy,
timeout=self._sender.connection.get_timeout()
)
# This will make use of the new RSA keys for this specific CDN.
#
# We won't be calling GetConfigRequest because it's only called
# when needed by ._get_dc, and also it's static so it's likely
# set already. Avoid invoking non-CDN methods by not syncing updates.
client.connect(_sync_updates=False)
return client |
def DbDeleteDeviceAttribute(self, argin):
""" Delete device attribute properties from database
:param argin: Str[0] = Device name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbDeleteDeviceAttribute()")
if len(argin) < 2:
self.warn_stream("DataBase::db_delete_device_attribute(): insufficient number of arguments ")
th_exc(DB_IncorrectArguments,
"insufficient number of arguments to delete device attribute",
"DataBase::DeleteDeviceAttribute()")
dev_name, attr_name = argin[:2]
ret, dev_name, dfm = check_device_name(argin)
if not ret:
self.warn_stream("DataBase::db_delete_device_attribute(): device name " + argin + " incorrect ")
th_exc(DB_IncorrectDeviceName,
"failed to delete device attribute, device name incorrect",
"DataBase::DeleteDeviceAttribute()")
self.db.delete_device_attribute(dev_name, attr_name) | Delete device attribute properties from database
:param argin: Str[0] = Device name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid | Below is the the instruction that describes the task:
### Input:
Delete device attribute properties from database
:param argin: Str[0] = Device name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid
### Response:
def DbDeleteDeviceAttribute(self, argin):
""" Delete device attribute properties from database
:param argin: Str[0] = Device name
Str[1] = Attribute name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbDeleteDeviceAttribute()")
if len(argin) < 2:
self.warn_stream("DataBase::db_delete_device_attribute(): insufficient number of arguments ")
th_exc(DB_IncorrectArguments,
"insufficient number of arguments to delete device attribute",
"DataBase::DeleteDeviceAttribute()")
dev_name, attr_name = argin[:2]
ret, dev_name, dfm = check_device_name(argin)
if not ret:
self.warn_stream("DataBase::db_delete_device_attribute(): device name " + argin + " incorrect ")
th_exc(DB_IncorrectDeviceName,
"failed to delete device attribute, device name incorrect",
"DataBase::DeleteDeviceAttribute()")
self.db.delete_device_attribute(dev_name, attr_name) |
def parse_address(self, address, line_number=-1):
"""
Return an Address object from the given address. Passes itself to the Address constructor to use all the custom
loaded suffixes, cities, etc.
"""
return Address(address, self, line_number, self.logger) | Return an Address object from the given address. Passes itself to the Address constructor to use all the custom
loaded suffixes, cities, etc. | Below is the the instruction that describes the task:
### Input:
Return an Address object from the given address. Passes itself to the Address constructor to use all the custom
loaded suffixes, cities, etc.
### Response:
def parse_address(self, address, line_number=-1):
"""
Return an Address object from the given address. Passes itself to the Address constructor to use all the custom
loaded suffixes, cities, etc.
"""
return Address(address, self, line_number, self.logger) |
def _lockfile(self):
"""Pipfile.lock divided by PyPI and external dependencies."""
pfile = pipfile.load(self.pipfile_location, inject_env=False)
lockfile = json.loads(pfile.lock())
for section in ("default", "develop"):
lock_section = lockfile.get(section, {})
for key in list(lock_section.keys()):
norm_key = pep423_name(key)
lockfile[section][norm_key] = lock_section.pop(key)
return lockfile | Pipfile.lock divided by PyPI and external dependencies. | Below is the the instruction that describes the task:
### Input:
Pipfile.lock divided by PyPI and external dependencies.
### Response:
def _lockfile(self):
"""Pipfile.lock divided by PyPI and external dependencies."""
pfile = pipfile.load(self.pipfile_location, inject_env=False)
lockfile = json.loads(pfile.lock())
for section in ("default", "develop"):
lock_section = lockfile.get(section, {})
for key in list(lock_section.keys()):
norm_key = pep423_name(key)
lockfile[section][norm_key] = lock_section.pop(key)
return lockfile |
def vecangle(v1, v2, deg=True):
"""Calculate the angle between two vectors
:param v1: coordinates of vector v1
:param v2: coordinates of vector v2
:returns : angle in degree or rad
"""
if np.array_equal(v1, v2):
return 0.0
dm = np.dot(v1, v2)
cm = np.linalg.norm(v1) * np.linalg.norm(v2)
angle = np.arccos(round(dm / cm, 10)) # Round here to prevent floating point errors
return np.degrees([angle, ])[0] if deg else angle | Calculate the angle between two vectors
:param v1: coordinates of vector v1
:param v2: coordinates of vector v2
:returns : angle in degree or rad | Below is the the instruction that describes the task:
### Input:
Calculate the angle between two vectors
:param v1: coordinates of vector v1
:param v2: coordinates of vector v2
:returns : angle in degree or rad
### Response:
def vecangle(v1, v2, deg=True):
"""Calculate the angle between two vectors
:param v1: coordinates of vector v1
:param v2: coordinates of vector v2
:returns : angle in degree or rad
"""
if np.array_equal(v1, v2):
return 0.0
dm = np.dot(v1, v2)
cm = np.linalg.norm(v1) * np.linalg.norm(v2)
angle = np.arccos(round(dm / cm, 10)) # Round here to prevent floating point errors
return np.degrees([angle, ])[0] if deg else angle |
def token_generator(tree_path, source_token_vocab, target_token_vocab,
eos=None):
"""Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files at source_path and target_path have
the same number of lines and yields dictionaries of "inputs" and "targets"
where inputs and targets are token ids from source and target lines
converted to integers using the token_map.
Args:
tree_path: path to the file with WSJ format trees, one per line.
source_token_vocab: GenericVocabulary object for source vocabulary.
target_token_vocab: GenericVocabulary object for target vocabulary.
eos: integer to append at the end of each sequence (default: None).
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines.
"""
eos_list = [] if eos is None else [eos]
with tf.gfile.GFile(tree_path, mode="r") as tree_file:
tree_line = tree_file.readline()
while tree_line:
source, target = words_and_tags_from_wsj_tree(tree_line)
source_ints = source_token_vocab.encode(source.strip()) + eos_list
target_ints = target_token_vocab.encode(target.strip()) + eos_list
yield {"inputs": source_ints, "targets": target_ints}
tree_line = tree_file.readline() | Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files at source_path and target_path have
the same number of lines and yields dictionaries of "inputs" and "targets"
where inputs and targets are token ids from source and target lines
converted to integers using the token_map.
Args:
tree_path: path to the file with WSJ format trees, one per line.
source_token_vocab: GenericVocabulary object for source vocabulary.
target_token_vocab: GenericVocabulary object for target vocabulary.
eos: integer to append at the end of each sequence (default: None).
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines. | Below is the the instruction that describes the task:
### Input:
Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files at source_path and target_path have
the same number of lines and yields dictionaries of "inputs" and "targets"
where inputs and targets are token ids from source and target lines
converted to integers using the token_map.
Args:
tree_path: path to the file with WSJ format trees, one per line.
source_token_vocab: GenericVocabulary object for source vocabulary.
target_token_vocab: GenericVocabulary object for target vocabulary.
eos: integer to append at the end of each sequence (default: None).
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines.
### Response:
def token_generator(tree_path, source_token_vocab, target_token_vocab,
eos=None):
"""Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files at source_path and target_path have
the same number of lines and yields dictionaries of "inputs" and "targets"
where inputs and targets are token ids from source and target lines
converted to integers using the token_map.
Args:
tree_path: path to the file with WSJ format trees, one per line.
source_token_vocab: GenericVocabulary object for source vocabulary.
target_token_vocab: GenericVocabulary object for target vocabulary.
eos: integer to append at the end of each sequence (default: None).
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines.
"""
eos_list = [] if eos is None else [eos]
with tf.gfile.GFile(tree_path, mode="r") as tree_file:
tree_line = tree_file.readline()
while tree_line:
source, target = words_and_tags_from_wsj_tree(tree_line)
source_ints = source_token_vocab.encode(source.strip()) + eos_list
target_ints = target_token_vocab.encode(target.strip()) + eos_list
yield {"inputs": source_ints, "targets": target_ints}
tree_line = tree_file.readline() |
def main():
"""
Print lines of input along with output.
"""
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n') | Print lines of input along with output. | Below is the the instruction that describes the task:
### Input:
Print lines of input along with output.
### Response:
def main():
"""
Print lines of input along with output.
"""
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n') |
def uuid(dev=None):
'''
Return the bcache UUID of a block device.
If no device is given, the Cache UUID is returned.
CLI example:
.. code-block:: bash
salt '*' bcache.uuid
salt '*' bcache.uuid /dev/sda
salt '*' bcache.uuid bcache0
'''
try:
if dev is None:
# take the only directory in /sys/fs/bcache and return it's basename
return list(salt.utils.path.os_walk('/sys/fs/bcache/'))[0][1][0]
else:
# basename of the /sys/block/{dev}/bcache/cache symlink target
return os.path.basename(_bcsys(dev, 'cache'))
except Exception:
return False | Return the bcache UUID of a block device.
If no device is given, the Cache UUID is returned.
CLI example:
.. code-block:: bash
salt '*' bcache.uuid
salt '*' bcache.uuid /dev/sda
salt '*' bcache.uuid bcache0 | Below is the the instruction that describes the task:
### Input:
Return the bcache UUID of a block device.
If no device is given, the Cache UUID is returned.
CLI example:
.. code-block:: bash
salt '*' bcache.uuid
salt '*' bcache.uuid /dev/sda
salt '*' bcache.uuid bcache0
### Response:
def uuid(dev=None):
'''
Return the bcache UUID of a block device.
If no device is given, the Cache UUID is returned.
CLI example:
.. code-block:: bash
salt '*' bcache.uuid
salt '*' bcache.uuid /dev/sda
salt '*' bcache.uuid bcache0
'''
try:
if dev is None:
# take the only directory in /sys/fs/bcache and return it's basename
return list(salt.utils.path.os_walk('/sys/fs/bcache/'))[0][1][0]
else:
# basename of the /sys/block/{dev}/bcache/cache symlink target
return os.path.basename(_bcsys(dev, 'cache'))
except Exception:
return False |
def verify(self, pkt, key):
"""
Check that the integrity check value (icv) of a packet is valid.
@param pkt: a packet that contains a valid encrypted ESP or AH layer
@param key: the authentication key, a byte string
@raise IPSecIntegrityError: if the integrity check fails
"""
if not self.mac or self.icv_size == 0:
return
mac = self.new_mac(key)
pkt_icv = 'not found'
computed_icv = 'not computed'
if isinstance(pkt, ESP):
pkt_icv = pkt.data[len(pkt.data) - self.icv_size:]
clone = pkt.copy()
clone.data = clone.data[:len(clone.data) - self.icv_size]
elif pkt.haslayer(AH):
if len(pkt[AH].icv) != self.icv_size:
# Fill padding since we know the actual icv_size
pkt[AH].padding = pkt[AH].icv[self.icv_size:]
pkt[AH].icv = pkt[AH].icv[:self.icv_size]
pkt_icv = pkt[AH].icv
clone = zero_mutable_fields(pkt.copy(), sending=False)
mac.update(raw(clone))
computed_icv = mac.finalize()[:self.icv_size]
# XXX: Cannot use mac.verify because the ICV can be truncated
if pkt_icv != computed_icv:
raise IPSecIntegrityError('pkt_icv=%r, computed_icv=%r' %
(pkt_icv, computed_icv)) | Check that the integrity check value (icv) of a packet is valid.
@param pkt: a packet that contains a valid encrypted ESP or AH layer
@param key: the authentication key, a byte string
@raise IPSecIntegrityError: if the integrity check fails | Below is the the instruction that describes the task:
### Input:
Check that the integrity check value (icv) of a packet is valid.
@param pkt: a packet that contains a valid encrypted ESP or AH layer
@param key: the authentication key, a byte string
@raise IPSecIntegrityError: if the integrity check fails
### Response:
def verify(self, pkt, key):
"""
Check that the integrity check value (icv) of a packet is valid.
@param pkt: a packet that contains a valid encrypted ESP or AH layer
@param key: the authentication key, a byte string
@raise IPSecIntegrityError: if the integrity check fails
"""
if not self.mac or self.icv_size == 0:
return
mac = self.new_mac(key)
pkt_icv = 'not found'
computed_icv = 'not computed'
if isinstance(pkt, ESP):
pkt_icv = pkt.data[len(pkt.data) - self.icv_size:]
clone = pkt.copy()
clone.data = clone.data[:len(clone.data) - self.icv_size]
elif pkt.haslayer(AH):
if len(pkt[AH].icv) != self.icv_size:
# Fill padding since we know the actual icv_size
pkt[AH].padding = pkt[AH].icv[self.icv_size:]
pkt[AH].icv = pkt[AH].icv[:self.icv_size]
pkt_icv = pkt[AH].icv
clone = zero_mutable_fields(pkt.copy(), sending=False)
mac.update(raw(clone))
computed_icv = mac.finalize()[:self.icv_size]
# XXX: Cannot use mac.verify because the ICV can be truncated
if pkt_icv != computed_icv:
raise IPSecIntegrityError('pkt_icv=%r, computed_icv=%r' %
(pkt_icv, computed_icv)) |
def rectangular_neighbors_from_shape(shape):
"""Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with.
The uniformity of the rectangular grid's geometry is used to compute this.
"""
pixels = shape[0]*shape[1]
pixel_neighbors = -1 * np.ones(shape=(pixels, 4))
pixel_neighbors_size = np.zeros(pixels)
pixel_neighbors, pixel_neighbors_size = compute_corner_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_top_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_left_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_right_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_bottom_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_central_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
return pixel_neighbors, pixel_neighbors_size | Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with.
The uniformity of the rectangular grid's geometry is used to compute this. | Below is the the instruction that describes the task:
### Input:
Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with.
The uniformity of the rectangular grid's geometry is used to compute this.
### Response:
def rectangular_neighbors_from_shape(shape):
"""Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with.
The uniformity of the rectangular grid's geometry is used to compute this.
"""
pixels = shape[0]*shape[1]
pixel_neighbors = -1 * np.ones(shape=(pixels, 4))
pixel_neighbors_size = np.zeros(pixels)
pixel_neighbors, pixel_neighbors_size = compute_corner_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_top_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_left_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_right_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_bottom_edge_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
pixel_neighbors, pixel_neighbors_size = compute_central_neighbors(pixel_neighbors, pixel_neighbors_size,
shape, pixels)
return pixel_neighbors, pixel_neighbors_size |
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M | Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes) | Below is the the instruction that describes the task:
### Input:
Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
### Response:
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M |
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__ | Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening. | Below is the the instruction that describes the task:
### Input:
Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening.
### Response:
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__ |
def _update_exit_code_from_stats(cls, statistics: Statistics,
app: Application):
'''Set the current exit code based on the Statistics.'''
for error_type in statistics.errors:
exit_code = app.ERROR_CODE_MAP.get(error_type)
if exit_code:
app.update_exit_code(exit_code) | Set the current exit code based on the Statistics. | Below is the the instruction that describes the task:
### Input:
Set the current exit code based on the Statistics.
### Response:
def _update_exit_code_from_stats(cls, statistics: Statistics,
app: Application):
'''Set the current exit code based on the Statistics.'''
for error_type in statistics.errors:
exit_code = app.ERROR_CODE_MAP.get(error_type)
if exit_code:
app.update_exit_code(exit_code) |
def get(self,par_names=None,obs_names=None,astype=None):
"""method to get a new LinearAnalysis class using a
subset of parameters and/or observations
Parameters
----------
par_names : list
par names for new object
obs_names : list
obs names for new object
astype : pyemu.Schur or pyemu.ErrVar
type to cast the new object. If None, return type is
same as self
Returns
-------
new : LinearAnalysis
"""
# make sure we aren't fooling with unwanted prior information
self.clean()
# if there is nothing to do but copy
if par_names is None and obs_names is None:
if astype is not None:
self.logger.warn("LinearAnalysis.get(): astype is not None, " +
"but par_names and obs_names are None so" +
"\n ->Omitted attributes will not be " +
"propagated to new instance")
else:
return copy.deepcopy(self)
# make sure the args are lists
if par_names is not None and not isinstance(par_names, list):
par_names = [par_names]
if obs_names is not None and not isinstance(obs_names, list):
obs_names = [obs_names]
if par_names is None:
par_names = self.jco.col_names
if obs_names is None:
obs_names = self.jco.row_names
# if possible, get a new parcov
if self.parcov:
new_parcov = self.parcov.get(col_names=[pname for pname in\
par_names if pname in\
self.parcov.col_names])
else:
new_parcov = None
# if possible, get a new obscov
if self.obscov_arg is not None:
new_obscov = self.obscov.get(row_names=obs_names)
else:
new_obscov = None
# if possible, get a new pst
if self.pst_arg is not None:
new_pst = self.pst.get(par_names=par_names,obs_names=obs_names)
else:
new_pst = None
new_extract = None
if self.predictions:
# new_preds = []
# for prediction in self.predictions:
# new_preds.append(prediction.get(row_names=par_names))
new_preds = self.predictions.get(row_names=par_names)
else:
new_preds = None
if self.jco_arg is not None:
new_jco = self.jco.get(row_names=obs_names, col_names=par_names)
else:
new_jco = None
if astype is not None:
new = astype(jco=new_jco, pst=new_pst, parcov=new_parcov,
obscov=new_obscov, predictions=new_preds,
verbose=False)
else:
# return a new object of the same type
new = type(self)(jco=new_jco, pst=new_pst, parcov=new_parcov,
obscov=new_obscov, predictions=new_preds,
verbose=False)
return new | method to get a new LinearAnalysis class using a
subset of parameters and/or observations
Parameters
----------
par_names : list
par names for new object
obs_names : list
obs names for new object
astype : pyemu.Schur or pyemu.ErrVar
type to cast the new object. If None, return type is
same as self
Returns
-------
new : LinearAnalysis | Below is the the instruction that describes the task:
### Input:
method to get a new LinearAnalysis class using a
subset of parameters and/or observations
Parameters
----------
par_names : list
par names for new object
obs_names : list
obs names for new object
astype : pyemu.Schur or pyemu.ErrVar
type to cast the new object. If None, return type is
same as self
Returns
-------
new : LinearAnalysis
### Response:
def get(self,par_names=None,obs_names=None,astype=None):
"""method to get a new LinearAnalysis class using a
subset of parameters and/or observations
Parameters
----------
par_names : list
par names for new object
obs_names : list
obs names for new object
astype : pyemu.Schur or pyemu.ErrVar
type to cast the new object. If None, return type is
same as self
Returns
-------
new : LinearAnalysis
"""
# make sure we aren't fooling with unwanted prior information
self.clean()
# if there is nothing to do but copy
if par_names is None and obs_names is None:
if astype is not None:
self.logger.warn("LinearAnalysis.get(): astype is not None, " +
"but par_names and obs_names are None so" +
"\n ->Omitted attributes will not be " +
"propagated to new instance")
else:
return copy.deepcopy(self)
# make sure the args are lists
if par_names is not None and not isinstance(par_names, list):
par_names = [par_names]
if obs_names is not None and not isinstance(obs_names, list):
obs_names = [obs_names]
if par_names is None:
par_names = self.jco.col_names
if obs_names is None:
obs_names = self.jco.row_names
# if possible, get a new parcov
if self.parcov:
new_parcov = self.parcov.get(col_names=[pname for pname in\
par_names if pname in\
self.parcov.col_names])
else:
new_parcov = None
# if possible, get a new obscov
if self.obscov_arg is not None:
new_obscov = self.obscov.get(row_names=obs_names)
else:
new_obscov = None
# if possible, get a new pst
if self.pst_arg is not None:
new_pst = self.pst.get(par_names=par_names,obs_names=obs_names)
else:
new_pst = None
new_extract = None
if self.predictions:
# new_preds = []
# for prediction in self.predictions:
# new_preds.append(prediction.get(row_names=par_names))
new_preds = self.predictions.get(row_names=par_names)
else:
new_preds = None
if self.jco_arg is not None:
new_jco = self.jco.get(row_names=obs_names, col_names=par_names)
else:
new_jco = None
if astype is not None:
new = astype(jco=new_jco, pst=new_pst, parcov=new_parcov,
obscov=new_obscov, predictions=new_preds,
verbose=False)
else:
# return a new object of the same type
new = type(self)(jco=new_jco, pst=new_pst, parcov=new_parcov,
obscov=new_obscov, predictions=new_preds,
verbose=False)
return new |
def plot(x, y, rows=None, columns=None):
"""
x, y list of values on x- and y-axis
plot those values within canvas size (rows and columns)
"""
if not rows or not columns:
rows, columns = get_terminal_size()
# offset for caption
rows -= 4
# Scale points such that they fit on canvas
x_scaled = scale(x, columns)
y_scaled = scale(y, rows)
# Create empty canvas
canvas = [[' ' for _ in range(columns)] for _ in range(rows)]
# Add scaled points to canvas
for ix, iy in zip(x_scaled, y_scaled):
canvas[rows - iy - 1][ix] = '*'
# Print rows of canvas
for row in [''.join(row) for row in canvas]:
print(row)
# Print scale
print(''.join([
'\nMin x: ', str(min(x)),
' Max x: ', str(max(x)),
' Min y: ', str(min(y)),
' Max y: ', str(max(y))
])) | x, y list of values on x- and y-axis
plot those values within canvas size (rows and columns) | Below is the the instruction that describes the task:
### Input:
x, y list of values on x- and y-axis
plot those values within canvas size (rows and columns)
### Response:
def plot(x, y, rows=None, columns=None):
"""
x, y list of values on x- and y-axis
plot those values within canvas size (rows and columns)
"""
if not rows or not columns:
rows, columns = get_terminal_size()
# offset for caption
rows -= 4
# Scale points such that they fit on canvas
x_scaled = scale(x, columns)
y_scaled = scale(y, rows)
# Create empty canvas
canvas = [[' ' for _ in range(columns)] for _ in range(rows)]
# Add scaled points to canvas
for ix, iy in zip(x_scaled, y_scaled):
canvas[rows - iy - 1][ix] = '*'
# Print rows of canvas
for row in [''.join(row) for row in canvas]:
print(row)
# Print scale
print(''.join([
'\nMin x: ', str(min(x)),
' Max x: ', str(max(x)),
' Min y: ', str(min(y)),
' Max y: ', str(max(y))
])) |
def info(name, root=None):
'''
Return information about a group
name
Name of the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
if root is not None:
getgrnam = functools.partial(_getgrnam, root=root)
else:
getgrnam = functools.partial(grp.getgrnam)
try:
grinfo = getgrnam(name)
except KeyError:
return {}
else:
return _format_info(grinfo) | Return information about a group
name
Name of the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.info foo | Below is the the instruction that describes the task:
### Input:
Return information about a group
name
Name of the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.info foo
### Response:
def info(name, root=None):
'''
Return information about a group
name
Name of the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
if root is not None:
getgrnam = functools.partial(_getgrnam, root=root)
else:
getgrnam = functools.partial(grp.getgrnam)
try:
grinfo = getgrnam(name)
except KeyError:
return {}
else:
return _format_info(grinfo) |
def get(vm, key='uuid'):
'''
Output the JSON object describing a VM
vm : string
vm to be targeted
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.get 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.get nacl key=alias
'''
ret = {}
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm get <uuid>
cmd = 'vmadm get {0}'.format(vm)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res['stdout']) | Output the JSON object describing a VM
vm : string
vm to be targeted
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.get 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.get nacl key=alias | Below is the the instruction that describes the task:
### Input:
Output the JSON object describing a VM
vm : string
vm to be targeted
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.get 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.get nacl key=alias
### Response:
def get(vm, key='uuid'):
'''
Output the JSON object describing a VM
vm : string
vm to be targeted
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.get 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.get nacl key=alias
'''
ret = {}
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm get <uuid>
cmd = 'vmadm get {0}'.format(vm)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res['stdout']) |
def create_alias(alias_name, target_key_id, region=None, key=None, keyid=None,
profile=None):
'''
Create a display name for a key.
CLI example::
salt myminion boto_kms.create_alias 'alias/mykey' key_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
conn.create_alias(alias_name, target_key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r | Create a display name for a key.
CLI example::
salt myminion boto_kms.create_alias 'alias/mykey' key_id | Below is the the instruction that describes the task:
### Input:
Create a display name for a key.
CLI example::
salt myminion boto_kms.create_alias 'alias/mykey' key_id
### Response:
def create_alias(alias_name, target_key_id, region=None, key=None, keyid=None,
profile=None):
'''
Create a display name for a key.
CLI example::
salt myminion boto_kms.create_alias 'alias/mykey' key_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
conn.create_alias(alias_name, target_key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r |
def run_sync(self, func: Callable, timeout: float = None) -> Any:
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either an awaitable object or
``None``. If the function returns an awaitable object, the
`IOLoop` will run until the awaitable is resolved (and
`run_sync()` will return the awaitable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful to allow asynchronous calls in a
``main()`` function::
async def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-awaitable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None] # type: List[Optional[Future]]
def run() -> None:
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
fut = Future() # type: Future[Any]
future_cell[0] = fut
future_set_exc_info(fut, sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
fut = Future()
future_cell[0] = fut
fut.set_result(result)
assert future_cell[0] is not None
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback() -> None:
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
assert future_cell[0] is not None
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
assert future_cell[0] is not None
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError("Operation timed out after %s seconds" % timeout)
return future_cell[0].result() | Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either an awaitable object or
``None``. If the function returns an awaitable object, the
`IOLoop` will run until the awaitable is resolved (and
`run_sync()` will return the awaitable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful to allow asynchronous calls in a
``main()`` function::
async def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-awaitable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled. | Below is the the instruction that describes the task:
### Input:
Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either an awaitable object or
``None``. If the function returns an awaitable object, the
`IOLoop` will run until the awaitable is resolved (and
`run_sync()` will return the awaitable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful to allow asynchronous calls in a
``main()`` function::
async def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-awaitable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
### Response:
def run_sync(self, func: Callable, timeout: float = None) -> Any:
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either an awaitable object or
``None``. If the function returns an awaitable object, the
`IOLoop` will run until the awaitable is resolved (and
`run_sync()` will return the awaitable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful to allow asynchronous calls in a
``main()`` function::
async def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-awaitable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None] # type: List[Optional[Future]]
def run() -> None:
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
fut = Future() # type: Future[Any]
future_cell[0] = fut
future_set_exc_info(fut, sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
fut = Future()
future_cell[0] = fut
fut.set_result(result)
assert future_cell[0] is not None
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback() -> None:
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
assert future_cell[0] is not None
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
assert future_cell[0] is not None
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError("Operation timed out after %s seconds" % timeout)
return future_cell[0].result() |
def main():
"""
Entry point when used via command line.
Features are given using the environment variable ``PRODUCT_EQUATION``.
If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points
to an existing equation file that selection is used.
(if ``APE_PREPEND_FEATURES`` is given, those features are prepended)
If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised.
"""
# check APE_PREPEND_FEATURES
features = os.environ.get('APE_PREPEND_FEATURES', '').split()
# features can be specified inline in PRODUCT_EQUATION
inline_features = os.environ.get('PRODUCT_EQUATION', '').split()
if inline_features:
# append inline features
features += inline_features
else:
# fallback: features are specified in equation file
feature_file = os.environ.get('PRODUCT_EQUATION_FILENAME', '')
if feature_file:
# append features from equation file
features += get_features_from_equation_file(feature_file)
else:
if not features:
raise EnvironmentIncomplete(
'Error running ape:\n'
'Either the PRODUCT_EQUATION or '
'PRODUCT_EQUATION_FILENAME environment '
'variable needs to be set!'
)
# run ape with features selected
run(sys.argv, features=features) | Entry point when used via command line.
Features are given using the environment variable ``PRODUCT_EQUATION``.
If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points
to an existing equation file that selection is used.
(if ``APE_PREPEND_FEATURES`` is given, those features are prepended)
If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised. | Below is the the instruction that describes the task:
### Input:
Entry point when used via command line.
Features are given using the environment variable ``PRODUCT_EQUATION``.
If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points
to an existing equation file that selection is used.
(if ``APE_PREPEND_FEATURES`` is given, those features are prepended)
If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised.
### Response:
def main():
"""
Entry point when used via command line.
Features are given using the environment variable ``PRODUCT_EQUATION``.
If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points
to an existing equation file that selection is used.
(if ``APE_PREPEND_FEATURES`` is given, those features are prepended)
If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised.
"""
# check APE_PREPEND_FEATURES
features = os.environ.get('APE_PREPEND_FEATURES', '').split()
# features can be specified inline in PRODUCT_EQUATION
inline_features = os.environ.get('PRODUCT_EQUATION', '').split()
if inline_features:
# append inline features
features += inline_features
else:
# fallback: features are specified in equation file
feature_file = os.environ.get('PRODUCT_EQUATION_FILENAME', '')
if feature_file:
# append features from equation file
features += get_features_from_equation_file(feature_file)
else:
if not features:
raise EnvironmentIncomplete(
'Error running ape:\n'
'Either the PRODUCT_EQUATION or '
'PRODUCT_EQUATION_FILENAME environment '
'variable needs to be set!'
)
# run ape with features selected
run(sys.argv, features=features) |
def _parse_reports_by_type(self):
""" Returns a data dictionary
Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type.
"""
data = dict()
for file_meta in self.find_log_files('picard/sam_file_validation', filehandles=True):
sample = file_meta['s_name']
if sample in data:
log.debug("Duplicate sample name found! Overwriting: {}".format(sample))
filehandle = file_meta['f']
first_line = filehandle.readline().rstrip()
filehandle.seek(0) # Rewind reading of the file
if 'No errors found' in first_line:
sample_data = _parse_no_error_report()
elif first_line.startswith('ERROR') or first_line.startswith('WARNING'):
sample_data = _parse_verbose_report(filehandle)
else:
sample_data = _parse_summary_report(filehandle)
data[sample] = sample_data
return data | Returns a data dictionary
Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type. | Below is the the instruction that describes the task:
### Input:
Returns a data dictionary
Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type.
### Response:
def _parse_reports_by_type(self):
""" Returns a data dictionary
Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type.
"""
data = dict()
for file_meta in self.find_log_files('picard/sam_file_validation', filehandles=True):
sample = file_meta['s_name']
if sample in data:
log.debug("Duplicate sample name found! Overwriting: {}".format(sample))
filehandle = file_meta['f']
first_line = filehandle.readline().rstrip()
filehandle.seek(0) # Rewind reading of the file
if 'No errors found' in first_line:
sample_data = _parse_no_error_report()
elif first_line.startswith('ERROR') or first_line.startswith('WARNING'):
sample_data = _parse_verbose_report(filehandle)
else:
sample_data = _parse_summary_report(filehandle)
data[sample] = sample_data
return data |
def can_handle(cls, pkt, rpc):
"""heuristical guess_payload_class"""
# type = 0 => request
if rpc.getfieldval("type") == 0 and \
str(rpc.object_uuid).startswith("dea00000-6c97-11d1-8271-"):
return True
return False | heuristical guess_payload_class | Below is the the instruction that describes the task:
### Input:
heuristical guess_payload_class
### Response:
def can_handle(cls, pkt, rpc):
"""heuristical guess_payload_class"""
# type = 0 => request
if rpc.getfieldval("type") == 0 and \
str(rpc.object_uuid).startswith("dea00000-6c97-11d1-8271-"):
return True
return False |
def url_params_previous_page(self):
"""
:rtype: dict[str, str]
"""
self.assert_has_previous_page()
params = {self.PARAM_OLDER_ID: str(self.older_id)}
self._add_count_to_params_if_needed(params)
return params | :rtype: dict[str, str] | Below is the the instruction that describes the task:
### Input:
:rtype: dict[str, str]
### Response:
def url_params_previous_page(self):
"""
:rtype: dict[str, str]
"""
self.assert_has_previous_page()
params = {self.PARAM_OLDER_ID: str(self.older_id)}
self._add_count_to_params_if_needed(params)
return params |
def list_server_certificates(path_prefix='/', region=None, key=None, keyid=None, profile=None):
'''
Lists the server certificates stored in IAM that have the specified path prefix.
.. versionadded:: ???
:param path_prefix:
The path prefix for filtering the results. For example: /company/servercerts would get
all server certificates for which the path starts with /company/servercerts .
This parameter is optional. If it is not included, it defaults to a slash (/), listing all
server certificates. This parameter allows (per its regex pattern) a string of characters
consisting of either a forward slash (/) by itself or a string that must begin and end with
forward slashes. In addition, it can contain any ASCII character from the ! (u0021)
through the DEL character (u007F), including most punctuation characters, digits, and upper
and lowercased letters.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_server_certificates path_prefix=/somepath/
'''
retries = 10
sleep = 6
conn = __utils__['boto3.get_connection']('iam', region=region, key=key, keyid=keyid,
profile=profile)
Items = []
while retries:
try:
log.debug('Garnering list of IAM Server Certificates')
IsTruncated = True
while IsTruncated:
kwargs = {'PathPrefix': path_prefix}
ret = conn.list_server_certificates(**kwargs)
Items += ret.get('ServerCertificateMetadataList', [])
IsTruncated = ret.get('IsTruncated')
kwargs.update({'Marker': ret.get('Marker')})
return Items
except botocore.exceptions.ParamValidationError as err:
raise SaltInvocationError(str(err))
except botocore.exceptions.ClientError as err:
if retries and jmespath.search('Error.Code', err.response) == 'Throttling':
retries -= 1
log.debug('Throttled by AWS API, retrying in %s seconds...', sleep)
time.sleep(sleep)
continue
log.error('Failed to list IAM Server Certificates: %s', err.message)
return None | Lists the server certificates stored in IAM that have the specified path prefix.
.. versionadded:: ???
:param path_prefix:
The path prefix for filtering the results. For example: /company/servercerts would get
all server certificates for which the path starts with /company/servercerts .
This parameter is optional. If it is not included, it defaults to a slash (/), listing all
server certificates. This parameter allows (per its regex pattern) a string of characters
consisting of either a forward slash (/) by itself or a string that must begin and end with
forward slashes. In addition, it can contain any ASCII character from the ! (u0021)
through the DEL character (u007F), including most punctuation characters, digits, and upper
and lowercased letters.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_server_certificates path_prefix=/somepath/ | Below is the the instruction that describes the task:
### Input:
Lists the server certificates stored in IAM that have the specified path prefix.
.. versionadded:: ???
:param path_prefix:
The path prefix for filtering the results. For example: /company/servercerts would get
all server certificates for which the path starts with /company/servercerts .
This parameter is optional. If it is not included, it defaults to a slash (/), listing all
server certificates. This parameter allows (per its regex pattern) a string of characters
consisting of either a forward slash (/) by itself or a string that must begin and end with
forward slashes. In addition, it can contain any ASCII character from the ! (u0021)
through the DEL character (u007F), including most punctuation characters, digits, and upper
and lowercased letters.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_server_certificates path_prefix=/somepath/
### Response:
def list_server_certificates(path_prefix='/', region=None, key=None, keyid=None, profile=None):
'''
Lists the server certificates stored in IAM that have the specified path prefix.
.. versionadded:: ???
:param path_prefix:
The path prefix for filtering the results. For example: /company/servercerts would get
all server certificates for which the path starts with /company/servercerts .
This parameter is optional. If it is not included, it defaults to a slash (/), listing all
server certificates. This parameter allows (per its regex pattern) a string of characters
consisting of either a forward slash (/) by itself or a string that must begin and end with
forward slashes. In addition, it can contain any ASCII character from the ! (u0021)
through the DEL character (u007F), including most punctuation characters, digits, and upper
and lowercased letters.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_server_certificates path_prefix=/somepath/
'''
retries = 10
sleep = 6
conn = __utils__['boto3.get_connection']('iam', region=region, key=key, keyid=keyid,
profile=profile)
Items = []
while retries:
try:
log.debug('Garnering list of IAM Server Certificates')
IsTruncated = True
while IsTruncated:
kwargs = {'PathPrefix': path_prefix}
ret = conn.list_server_certificates(**kwargs)
Items += ret.get('ServerCertificateMetadataList', [])
IsTruncated = ret.get('IsTruncated')
kwargs.update({'Marker': ret.get('Marker')})
return Items
except botocore.exceptions.ParamValidationError as err:
raise SaltInvocationError(str(err))
except botocore.exceptions.ClientError as err:
if retries and jmespath.search('Error.Code', err.response) == 'Throttling':
retries -= 1
log.debug('Throttled by AWS API, retrying in %s seconds...', sleep)
time.sleep(sleep)
continue
log.error('Failed to list IAM Server Certificates: %s', err.message)
return None |
def start_process_monitor(self):
""" Monitor all processes in processes_to_monitor dict,
restarting any if they fail, up to max_runs times.
"""
# Now wait for any child to die
Log.info("Start process monitor")
while True:
if len(self.processes_to_monitor) > 0:
(pid, status) = os.wait()
with self.process_lock:
if pid in self.processes_to_monitor.keys():
old_process_info = self.processes_to_monitor[pid]
name = old_process_info.name
command = old_process_info.command
Log.info("%s (pid=%s) exited with status %d. command=%s" % (name, pid, status, command))
# Log the stdout & stderr of the failed process
self._wait_process_std_out_err(name, old_process_info.process)
# Just make it world readable
if os.path.isfile("core.%d" % pid):
os.system("chmod a+r core.%d" % pid)
if old_process_info.attempts >= self.max_runs:
Log.info("%s exited too many times" % name)
sys.exit(1)
time.sleep(self.interval_between_runs)
p = self._run_process(name, command)
del self.processes_to_monitor[pid]
self.processes_to_monitor[p.pid] =\
ProcessInfo(p, name, command, old_process_info.attempts + 1)
# Log down the pid file
log_pid_for_process(name, p.pid) | Monitor all processes in processes_to_monitor dict,
restarting any if they fail, up to max_runs times. | Below is the the instruction that describes the task:
### Input:
Monitor all processes in processes_to_monitor dict,
restarting any if they fail, up to max_runs times.
### Response:
def start_process_monitor(self):
""" Monitor all processes in processes_to_monitor dict,
restarting any if they fail, up to max_runs times.
"""
# Now wait for any child to die
Log.info("Start process monitor")
while True:
if len(self.processes_to_monitor) > 0:
(pid, status) = os.wait()
with self.process_lock:
if pid in self.processes_to_monitor.keys():
old_process_info = self.processes_to_monitor[pid]
name = old_process_info.name
command = old_process_info.command
Log.info("%s (pid=%s) exited with status %d. command=%s" % (name, pid, status, command))
# Log the stdout & stderr of the failed process
self._wait_process_std_out_err(name, old_process_info.process)
# Just make it world readable
if os.path.isfile("core.%d" % pid):
os.system("chmod a+r core.%d" % pid)
if old_process_info.attempts >= self.max_runs:
Log.info("%s exited too many times" % name)
sys.exit(1)
time.sleep(self.interval_between_runs)
p = self._run_process(name, command)
del self.processes_to_monitor[pid]
self.processes_to_monitor[p.pid] =\
ProcessInfo(p, name, command, old_process_info.attempts + 1)
# Log down the pid file
log_pid_for_process(name, p.pid) |
def _parseResourceDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_RESOURCE_DIRECTORY} data.
"""
return self.getDataAtRva(rva, size) | Parses the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_RESOURCE_DIRECTORY} data. | Below is the the instruction that describes the task:
### Input:
Parses the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_RESOURCE_DIRECTORY} data.
### Response:
def _parseResourceDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_RESOURCE_DIRECTORY} data.
"""
return self.getDataAtRva(rva, size) |
def await_item_handle(self, original, loc, tokens):
"""Check for Python 3.5 await expression."""
internal_assert(len(tokens) == 1, "invalid await statement tokens", tokens)
if not self.target:
self.make_err(
CoconutTargetError,
"await requires a specific target",
original, loc,
target="sys",
)
elif self.target_info >= (3, 5):
return "await " + tokens[0]
elif self.target_info >= (3, 3):
return "(yield from " + tokens[0] + ")"
else:
return "(yield _coconut.asyncio.From(" + tokens[0] + "))" | Check for Python 3.5 await expression. | Below is the the instruction that describes the task:
### Input:
Check for Python 3.5 await expression.
### Response:
def await_item_handle(self, original, loc, tokens):
"""Check for Python 3.5 await expression."""
internal_assert(len(tokens) == 1, "invalid await statement tokens", tokens)
if not self.target:
self.make_err(
CoconutTargetError,
"await requires a specific target",
original, loc,
target="sys",
)
elif self.target_info >= (3, 5):
return "await " + tokens[0]
elif self.target_info >= (3, 3):
return "(yield from " + tokens[0] + ")"
else:
return "(yield _coconut.asyncio.From(" + tokens[0] + "))" |
def generate_address(self, passphrase):
"""
Make sure the confirm code is valid for the given password and address.
"""
inter = Bip38IntermediatePoint.create(passphrase, ownersalt=self.ownersalt)
public_key = privtopub(inter.passpoint)
# from Bip38EncryptedPrivateKey.create_from_intermediate
derived = scrypt.hash(inter.passpoint, self.addresshash + inter.ownerentropy, 1024, 1, 1, 64)
derivedhalf1, derivedhalf2 = derived[:32], derived[32:]
unencrypted_prefix = bytes_to_int(self.pointbprefix) ^ (bytes_to_int(derived[63]) & 0x01);
aes = AES.new(derivedhalf2)
block1 = aes.decrypt(self.pointbx1)
block2 = aes.decrypt(self.pointbx2)
raise Exception("Not done yet")
return
block2 = long(hexlify(pointb2), 16) ^ long(hexlify(derivedhalf1[16:]), 16)
return pubtoaddr(*fast_multiply(pointb, passfactor)) | Make sure the confirm code is valid for the given password and address. | Below is the the instruction that describes the task:
### Input:
Make sure the confirm code is valid for the given password and address.
### Response:
def generate_address(self, passphrase):
"""
Make sure the confirm code is valid for the given password and address.
"""
inter = Bip38IntermediatePoint.create(passphrase, ownersalt=self.ownersalt)
public_key = privtopub(inter.passpoint)
# from Bip38EncryptedPrivateKey.create_from_intermediate
derived = scrypt.hash(inter.passpoint, self.addresshash + inter.ownerentropy, 1024, 1, 1, 64)
derivedhalf1, derivedhalf2 = derived[:32], derived[32:]
unencrypted_prefix = bytes_to_int(self.pointbprefix) ^ (bytes_to_int(derived[63]) & 0x01);
aes = AES.new(derivedhalf2)
block1 = aes.decrypt(self.pointbx1)
block2 = aes.decrypt(self.pointbx2)
raise Exception("Not done yet")
return
block2 = long(hexlify(pointb2), 16) ^ long(hexlify(derivedhalf1[16:]), 16)
return pubtoaddr(*fast_multiply(pointb, passfactor)) |
def offer_random(pool, answer, rationale, student_id, options):
"""
The random selection algorithm. The same as simple algorithm
"""
offer_simple(pool, answer, rationale, student_id, options) | The random selection algorithm. The same as simple algorithm | Below is the the instruction that describes the task:
### Input:
The random selection algorithm. The same as simple algorithm
### Response:
def offer_random(pool, answer, rationale, student_id, options):
"""
The random selection algorithm. The same as simple algorithm
"""
offer_simple(pool, answer, rationale, student_id, options) |
def get_master_key(password_hash_hash, nt_response):
"""
GetMasterKey(
IN 16-octet PasswordHashHash,
IN 24-octet NTResponse,
OUT 16-octet MasterKey )
{
20-octet Digest
ZeroMemory(Digest, sizeof(Digest));
/*
* SHSInit(), SHSUpdate() and SHSFinal()
* are an implementation of the Secure Hash Standard [7].
*/
SHSInit(Context);
SHSUpdate(Context, PasswordHashHash, 16);
SHSUpdate(Context, NTResponse, 24);
SHSUpdate(Context, Magic1, 27);
SHSFinal(Context, Digest);
MoveMemory(MasterKey, Digest, 16);
}
"""
sha_hash = hashlib.sha1()
sha_hash.update(password_hash_hash)
sha_hash.update(nt_response)
sha_hash.update(Magic1)
return sha_hash.digest()[:16] | GetMasterKey(
IN 16-octet PasswordHashHash,
IN 24-octet NTResponse,
OUT 16-octet MasterKey )
{
20-octet Digest
ZeroMemory(Digest, sizeof(Digest));
/*
* SHSInit(), SHSUpdate() and SHSFinal()
* are an implementation of the Secure Hash Standard [7].
*/
SHSInit(Context);
SHSUpdate(Context, PasswordHashHash, 16);
SHSUpdate(Context, NTResponse, 24);
SHSUpdate(Context, Magic1, 27);
SHSFinal(Context, Digest);
MoveMemory(MasterKey, Digest, 16);
} | Below is the the instruction that describes the task:
### Input:
GetMasterKey(
IN 16-octet PasswordHashHash,
IN 24-octet NTResponse,
OUT 16-octet MasterKey )
{
20-octet Digest
ZeroMemory(Digest, sizeof(Digest));
/*
* SHSInit(), SHSUpdate() and SHSFinal()
* are an implementation of the Secure Hash Standard [7].
*/
SHSInit(Context);
SHSUpdate(Context, PasswordHashHash, 16);
SHSUpdate(Context, NTResponse, 24);
SHSUpdate(Context, Magic1, 27);
SHSFinal(Context, Digest);
MoveMemory(MasterKey, Digest, 16);
}
### Response:
def get_master_key(password_hash_hash, nt_response):
"""
GetMasterKey(
IN 16-octet PasswordHashHash,
IN 24-octet NTResponse,
OUT 16-octet MasterKey )
{
20-octet Digest
ZeroMemory(Digest, sizeof(Digest));
/*
* SHSInit(), SHSUpdate() and SHSFinal()
* are an implementation of the Secure Hash Standard [7].
*/
SHSInit(Context);
SHSUpdate(Context, PasswordHashHash, 16);
SHSUpdate(Context, NTResponse, 24);
SHSUpdate(Context, Magic1, 27);
SHSFinal(Context, Digest);
MoveMemory(MasterKey, Digest, 16);
}
"""
sha_hash = hashlib.sha1()
sha_hash.update(password_hash_hash)
sha_hash.update(nt_response)
sha_hash.update(Magic1)
return sha_hash.digest()[:16] |
def write_dfile(self):
"""
Write the generated d_file to a temporary file.
"""
f_in = self.tempfiles.get_tempfile(prefix="bmds-", suffix=".(d)")
with open(f_in, "w") as f:
f.write(self.as_dfile())
return f_in | Write the generated d_file to a temporary file. | Below is the the instruction that describes the task:
### Input:
Write the generated d_file to a temporary file.
### Response:
def write_dfile(self):
"""
Write the generated d_file to a temporary file.
"""
f_in = self.tempfiles.get_tempfile(prefix="bmds-", suffix=".(d)")
with open(f_in, "w") as f:
f.write(self.as_dfile())
return f_in |
def ensure_python(specs):
"""Given a list of range specifiers for python, ensure compatibility.
"""
if not isinstance(specs, (list, tuple)):
specs = [specs]
v = sys.version_info
part = '%s.%s' % (v.major, v.minor)
for spec in specs:
if part == spec:
return
try:
if eval(part + spec):
return
except SyntaxError:
pass
raise ValueError('Python version %s unsupported' % part) | Given a list of range specifiers for python, ensure compatibility. | Below is the the instruction that describes the task:
### Input:
Given a list of range specifiers for python, ensure compatibility.
### Response:
def ensure_python(specs):
"""Given a list of range specifiers for python, ensure compatibility.
"""
if not isinstance(specs, (list, tuple)):
specs = [specs]
v = sys.version_info
part = '%s.%s' % (v.major, v.minor)
for spec in specs:
if part == spec:
return
try:
if eval(part + spec):
return
except SyntaxError:
pass
raise ValueError('Python version %s unsupported' % part) |
def _set_class_parser(self, init_parser, methods_to_parse, cls):
"""Creates the complete argument parser for the decorated class.
Args:
init_parser: argument parser for the __init__ method or None
methods_to_parse: dict of method name pointing to their associated
argument parser
cls: the class we are decorating
Returns:
The decorated class with an added attribute 'parser'
"""
top_level_parents = [init_parser] if init_parser else []
description = self._description or cls.__doc__
top_level_parser = argparse.ArgumentParser(description=description,
parents=top_level_parents,
add_help=False,
conflict_handler="resolve")
top_level_parser.add_argument("-h", "--help", action=FullHelpAction,
help="Display this help message")
parser_to_method = self._add_sub_parsers(top_level_parser,
methods_to_parse,
cls.__name__)
# Update the dict with the __init__ method so we can instantiate
# the decorated class
if init_parser:
parser_to_method["__init__"] = "__init__"
top_level_parser.call = self._get_parser_call_method(parser_to_method)
cls.parser = top_level_parser | Creates the complete argument parser for the decorated class.
Args:
init_parser: argument parser for the __init__ method or None
methods_to_parse: dict of method name pointing to their associated
argument parser
cls: the class we are decorating
Returns:
The decorated class with an added attribute 'parser' | Below is the the instruction that describes the task:
### Input:
Creates the complete argument parser for the decorated class.
Args:
init_parser: argument parser for the __init__ method or None
methods_to_parse: dict of method name pointing to their associated
argument parser
cls: the class we are decorating
Returns:
The decorated class with an added attribute 'parser'
### Response:
def _set_class_parser(self, init_parser, methods_to_parse, cls):
"""Creates the complete argument parser for the decorated class.
Args:
init_parser: argument parser for the __init__ method or None
methods_to_parse: dict of method name pointing to their associated
argument parser
cls: the class we are decorating
Returns:
The decorated class with an added attribute 'parser'
"""
top_level_parents = [init_parser] if init_parser else []
description = self._description or cls.__doc__
top_level_parser = argparse.ArgumentParser(description=description,
parents=top_level_parents,
add_help=False,
conflict_handler="resolve")
top_level_parser.add_argument("-h", "--help", action=FullHelpAction,
help="Display this help message")
parser_to_method = self._add_sub_parsers(top_level_parser,
methods_to_parse,
cls.__name__)
# Update the dict with the __init__ method so we can instantiate
# the decorated class
if init_parser:
parser_to_method["__init__"] = "__init__"
top_level_parser.call = self._get_parser_call_method(parser_to_method)
cls.parser = top_level_parser |
def get_names_and_paths(compiler_output: Dict[str, Any]) -> Dict[str, str]:
"""
Return a mapping of contract name to relative path as defined in compiler output.
"""
return {
contract_name: make_path_relative(path)
for path in compiler_output
for contract_name in compiler_output[path].keys()
} | Return a mapping of contract name to relative path as defined in compiler output. | Below is the the instruction that describes the task:
### Input:
Return a mapping of contract name to relative path as defined in compiler output.
### Response:
def get_names_and_paths(compiler_output: Dict[str, Any]) -> Dict[str, str]:
"""
Return a mapping of contract name to relative path as defined in compiler output.
"""
return {
contract_name: make_path_relative(path)
for path in compiler_output
for contract_name in compiler_output[path].keys()
} |
def create_template(self):
"""Create template (main function called by Stacker)."""
template = self.template
variables = self.get_variables()
self.template.add_version('2010-09-09')
self.template.add_description('Terraform State Resources')
# Conditions
for i in ['BucketName', 'TableName']:
template.add_condition(
"%sOmitted" % i,
Or(Equals(variables[i].ref, ''),
Equals(variables[i].ref, 'undefined'))
)
# Resources
terraformlocktable = template.add_resource(
dynamodb.Table(
'TerraformStateTable',
AttributeDefinitions=[
dynamodb.AttributeDefinition(
AttributeName='LockID',
AttributeType='S'
)
],
KeySchema=[
dynamodb.KeySchema(
AttributeName='LockID',
KeyType='HASH'
)
],
ProvisionedThroughput=dynamodb.ProvisionedThroughput(
ReadCapacityUnits=2,
WriteCapacityUnits=2
),
TableName=If(
'TableNameOmitted',
NoValue,
variables['TableName'].ref
)
)
)
template.add_output(Output(
'%sName' % terraformlocktable.title,
Description='Name of DynamoDB table for Terraform state',
Value=terraformlocktable.ref()
))
terraformstatebucket = template.add_resource(
s3.Bucket(
'TerraformStateBucket',
AccessControl=s3.Private,
BucketName=If(
'BucketNameOmitted',
NoValue,
variables['BucketName'].ref
),
LifecycleConfiguration=s3.LifecycleConfiguration(
Rules=[
s3.LifecycleRule(
NoncurrentVersionExpirationInDays=90,
Status='Enabled'
)
]
),
VersioningConfiguration=s3.VersioningConfiguration(
Status='Enabled'
)
)
)
template.add_output(Output(
'%sName' % terraformstatebucket.title,
Description='Name of bucket storing Terraform state',
Value=terraformstatebucket.ref()
))
template.add_output(Output(
'%sArn' % terraformstatebucket.title,
Description='Arn of bucket storing Terraform state',
Value=terraformstatebucket.get_att('Arn')
))
managementpolicy = template.add_resource(
iam.ManagedPolicy(
'ManagementPolicy',
Description='Managed policy for Terraform state management.',
Path='/',
PolicyDocument=PolicyDocument(
Version='2012-10-17',
Statement=[
# https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions
Statement(
Action=[awacs.s3.ListBucket],
Effect=Allow,
Resource=[terraformstatebucket.get_att('Arn')]
),
Statement(
Action=[awacs.s3.GetObject,
awacs.s3.PutObject],
Effect=Allow,
Resource=[
Join('', [terraformstatebucket.get_att('Arn'),
'/*'])
]
),
Statement(
Action=[awacs.dynamodb.GetItem,
awacs.dynamodb.PutItem,
awacs.dynamodb.DeleteItem],
Effect=Allow,
Resource=[terraformlocktable.get_att('Arn')]
)
]
)
)
)
template.add_output(
Output(
'PolicyArn',
Description='Managed policy Arn',
Value=managementpolicy.ref()
)
) | Create template (main function called by Stacker). | Below is the the instruction that describes the task:
### Input:
Create template (main function called by Stacker).
### Response:
def create_template(self):
"""Create template (main function called by Stacker)."""
template = self.template
variables = self.get_variables()
self.template.add_version('2010-09-09')
self.template.add_description('Terraform State Resources')
# Conditions
for i in ['BucketName', 'TableName']:
template.add_condition(
"%sOmitted" % i,
Or(Equals(variables[i].ref, ''),
Equals(variables[i].ref, 'undefined'))
)
# Resources
terraformlocktable = template.add_resource(
dynamodb.Table(
'TerraformStateTable',
AttributeDefinitions=[
dynamodb.AttributeDefinition(
AttributeName='LockID',
AttributeType='S'
)
],
KeySchema=[
dynamodb.KeySchema(
AttributeName='LockID',
KeyType='HASH'
)
],
ProvisionedThroughput=dynamodb.ProvisionedThroughput(
ReadCapacityUnits=2,
WriteCapacityUnits=2
),
TableName=If(
'TableNameOmitted',
NoValue,
variables['TableName'].ref
)
)
)
template.add_output(Output(
'%sName' % terraformlocktable.title,
Description='Name of DynamoDB table for Terraform state',
Value=terraformlocktable.ref()
))
terraformstatebucket = template.add_resource(
s3.Bucket(
'TerraformStateBucket',
AccessControl=s3.Private,
BucketName=If(
'BucketNameOmitted',
NoValue,
variables['BucketName'].ref
),
LifecycleConfiguration=s3.LifecycleConfiguration(
Rules=[
s3.LifecycleRule(
NoncurrentVersionExpirationInDays=90,
Status='Enabled'
)
]
),
VersioningConfiguration=s3.VersioningConfiguration(
Status='Enabled'
)
)
)
template.add_output(Output(
'%sName' % terraformstatebucket.title,
Description='Name of bucket storing Terraform state',
Value=terraformstatebucket.ref()
))
template.add_output(Output(
'%sArn' % terraformstatebucket.title,
Description='Arn of bucket storing Terraform state',
Value=terraformstatebucket.get_att('Arn')
))
managementpolicy = template.add_resource(
iam.ManagedPolicy(
'ManagementPolicy',
Description='Managed policy for Terraform state management.',
Path='/',
PolicyDocument=PolicyDocument(
Version='2012-10-17',
Statement=[
# https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions
Statement(
Action=[awacs.s3.ListBucket],
Effect=Allow,
Resource=[terraformstatebucket.get_att('Arn')]
),
Statement(
Action=[awacs.s3.GetObject,
awacs.s3.PutObject],
Effect=Allow,
Resource=[
Join('', [terraformstatebucket.get_att('Arn'),
'/*'])
]
),
Statement(
Action=[awacs.dynamodb.GetItem,
awacs.dynamodb.PutItem,
awacs.dynamodb.DeleteItem],
Effect=Allow,
Resource=[terraformlocktable.get_att('Arn')]
)
]
)
)
)
template.add_output(
Output(
'PolicyArn',
Description='Managed policy Arn',
Value=managementpolicy.ref()
)
) |
def get_assignees(self):
"""
:calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/assignees",
None
) | :calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
### Response:
def get_assignees(self):
"""
:calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/assignees",
None
) |
def write_pagerange(self, pagerange, prefix=''):
"""
Save the subset of pages specified in `pagerange` (dict) as separate PDF.
e.g. pagerange = {'title':'First chapter', 'page_start':0, 'page_end':5}
"""
writer = PdfFileWriter()
slug = "".join([c for c in pagerange['title'].replace(" ", "-") if c.isalnum() or c == "-"])
write_to_path = os.path.sep.join([self.directory, "{}{}.pdf".format(prefix, slug)])
for page in range(pagerange['page_start'], pagerange['page_end']):
writer.addPage(self.pdf.getPage(page))
writer.removeLinks() # must be done every page
with open(write_to_path, 'wb') as outfile:
writer.write(outfile)
return write_to_path | Save the subset of pages specified in `pagerange` (dict) as separate PDF.
e.g. pagerange = {'title':'First chapter', 'page_start':0, 'page_end':5} | Below is the the instruction that describes the task:
### Input:
Save the subset of pages specified in `pagerange` (dict) as separate PDF.
e.g. pagerange = {'title':'First chapter', 'page_start':0, 'page_end':5}
### Response:
def write_pagerange(self, pagerange, prefix=''):
"""
Save the subset of pages specified in `pagerange` (dict) as separate PDF.
e.g. pagerange = {'title':'First chapter', 'page_start':0, 'page_end':5}
"""
writer = PdfFileWriter()
slug = "".join([c for c in pagerange['title'].replace(" ", "-") if c.isalnum() or c == "-"])
write_to_path = os.path.sep.join([self.directory, "{}{}.pdf".format(prefix, slug)])
for page in range(pagerange['page_start'], pagerange['page_end']):
writer.addPage(self.pdf.getPage(page))
writer.removeLinks() # must be done every page
with open(write_to_path, 'wb') as outfile:
writer.write(outfile)
return write_to_path |
def register_catchall_controlreq(self, callback, callback_parsed=None):
"""
Registers a callback that is called for all control requests received by your Thing
`Example`
#!python
def controlreq_callback(data):
print(data)
...
client.register_catchall_controlreq(controlreq_callback)
`callback` (required) the function name that you want to be called on receipt of a new control request
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of a control ask/tell.
This is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both `callback_parsed` and
`callback` have been specified, the former takes precedence and `callback` is only called if the point data
could not be parsed according to its current value description.
More details on the contents of the `data` dictionary for controls see:
[create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control)
"""
if callback_parsed:
callback = self._get_parsed_control_callback(callback_parsed, callback)
return self.__client.register_callback_controlreq(callback) | Registers a callback that is called for all control requests received by your Thing
`Example`
#!python
def controlreq_callback(data):
print(data)
...
client.register_catchall_controlreq(controlreq_callback)
`callback` (required) the function name that you want to be called on receipt of a new control request
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of a control ask/tell.
This is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both `callback_parsed` and
`callback` have been specified, the former takes precedence and `callback` is only called if the point data
could not be parsed according to its current value description.
More details on the contents of the `data` dictionary for controls see:
[create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control) | Below is the the instruction that describes the task:
### Input:
Registers a callback that is called for all control requests received by your Thing
`Example`
#!python
def controlreq_callback(data):
print(data)
...
client.register_catchall_controlreq(controlreq_callback)
`callback` (required) the function name that you want to be called on receipt of a new control request
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of a control ask/tell.
This is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both `callback_parsed` and
`callback` have been specified, the former takes precedence and `callback` is only called if the point data
could not be parsed according to its current value description.
More details on the contents of the `data` dictionary for controls see:
[create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control)
### Response:
def register_catchall_controlreq(self, callback, callback_parsed=None):
"""
Registers a callback that is called for all control requests received by your Thing
`Example`
#!python
def controlreq_callback(data):
print(data)
...
client.register_catchall_controlreq(controlreq_callback)
`callback` (required) the function name that you want to be called on receipt of a new control request
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of a control ask/tell.
This is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both `callback_parsed` and
`callback` have been specified, the former takes precedence and `callback` is only called if the point data
could not be parsed according to its current value description.
More details on the contents of the `data` dictionary for controls see:
[create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control)
"""
if callback_parsed:
callback = self._get_parsed_control_callback(callback_parsed, callback)
return self.__client.register_callback_controlreq(callback) |
def rotate(self, log):
"""Move the current log to a new file with timestamp and create a new empty log file."""
self.write(log, rotate=True)
self.write({}) | Move the current log to a new file with timestamp and create a new empty log file. | Below is the the instruction that describes the task:
### Input:
Move the current log to a new file with timestamp and create a new empty log file.
### Response:
def rotate(self, log):
"""Move the current log to a new file with timestamp and create a new empty log file."""
self.write(log, rotate=True)
self.write({}) |
def find_duplicates(items, k=2, key=None):
"""
Find all duplicate items in a list.
Search for all items that appear more than `k` times and return a mapping
from each (k)-duplicate item to the positions it appeared in.
Args:
items (Iterable): hashable items possibly containing duplicates
k (int): only return items that appear at least `k` times (default=2)
key (Callable, optional): Returns indices where `key(items[i])`
maps to a particular value at least k times.
Returns:
dict: maps each duplicate item to the indices at which it appears
CommandLine:
python -m ubelt.util_dict find_duplicates
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> duplicates = ub.find_duplicates(items)
>>> print('items = %r' % (items,))
>>> print('duplicates = %r' % (duplicates,))
>>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}
>>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> # note: k can be 0
>>> duplicates = ub.find_duplicates(items, k=0)
>>> print(ub.repr2(duplicates, nl=0))
{0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}
Example:
>>> import ubelt as ub
>>> items = [10, 11, 12, 13, 14, 15, 16]
>>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)
>>> print(ub.repr2(duplicates, nl=0))
{5: [0, 1], 6: [2, 3], 7: [4, 5]}
"""
# Build mapping from items to the indices at which they appear
# if key is not None:
# items = map(key, items)
duplicates = defaultdict(list)
if key is None:
for count, item in enumerate(items):
duplicates[item].append(count)
else:
for count, item in enumerate(items):
duplicates[key(item)].append(count)
# remove items seen fewer than k times.
for key in list(duplicates.keys()):
if len(duplicates[key]) < k:
del duplicates[key]
duplicates = dict(duplicates)
return duplicates | Find all duplicate items in a list.
Search for all items that appear more than `k` times and return a mapping
from each (k)-duplicate item to the positions it appeared in.
Args:
items (Iterable): hashable items possibly containing duplicates
k (int): only return items that appear at least `k` times (default=2)
key (Callable, optional): Returns indices where `key(items[i])`
maps to a particular value at least k times.
Returns:
dict: maps each duplicate item to the indices at which it appears
CommandLine:
python -m ubelt.util_dict find_duplicates
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> duplicates = ub.find_duplicates(items)
>>> print('items = %r' % (items,))
>>> print('duplicates = %r' % (duplicates,))
>>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}
>>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> # note: k can be 0
>>> duplicates = ub.find_duplicates(items, k=0)
>>> print(ub.repr2(duplicates, nl=0))
{0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}
Example:
>>> import ubelt as ub
>>> items = [10, 11, 12, 13, 14, 15, 16]
>>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)
>>> print(ub.repr2(duplicates, nl=0))
{5: [0, 1], 6: [2, 3], 7: [4, 5]} | Below is the the instruction that describes the task:
### Input:
Find all duplicate items in a list.
Search for all items that appear more than `k` times and return a mapping
from each (k)-duplicate item to the positions it appeared in.
Args:
items (Iterable): hashable items possibly containing duplicates
k (int): only return items that appear at least `k` times (default=2)
key (Callable, optional): Returns indices where `key(items[i])`
maps to a particular value at least k times.
Returns:
dict: maps each duplicate item to the indices at which it appears
CommandLine:
python -m ubelt.util_dict find_duplicates
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> duplicates = ub.find_duplicates(items)
>>> print('items = %r' % (items,))
>>> print('duplicates = %r' % (duplicates,))
>>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}
>>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> # note: k can be 0
>>> duplicates = ub.find_duplicates(items, k=0)
>>> print(ub.repr2(duplicates, nl=0))
{0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}
Example:
>>> import ubelt as ub
>>> items = [10, 11, 12, 13, 14, 15, 16]
>>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)
>>> print(ub.repr2(duplicates, nl=0))
{5: [0, 1], 6: [2, 3], 7: [4, 5]}
### Response:
def find_duplicates(items, k=2, key=None):
"""
Find all duplicate items in a list.
Search for all items that appear more than `k` times and return a mapping
from each (k)-duplicate item to the positions it appeared in.
Args:
items (Iterable): hashable items possibly containing duplicates
k (int): only return items that appear at least `k` times (default=2)
key (Callable, optional): Returns indices where `key(items[i])`
maps to a particular value at least k times.
Returns:
dict: maps each duplicate item to the indices at which it appears
CommandLine:
python -m ubelt.util_dict find_duplicates
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> duplicates = ub.find_duplicates(items)
>>> print('items = %r' % (items,))
>>> print('duplicates = %r' % (duplicates,))
>>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}
>>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> # note: k can be 0
>>> duplicates = ub.find_duplicates(items, k=0)
>>> print(ub.repr2(duplicates, nl=0))
{0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}
Example:
>>> import ubelt as ub
>>> items = [10, 11, 12, 13, 14, 15, 16]
>>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)
>>> print(ub.repr2(duplicates, nl=0))
{5: [0, 1], 6: [2, 3], 7: [4, 5]}
"""
# Build mapping from items to the indices at which they appear
# if key is not None:
# items = map(key, items)
duplicates = defaultdict(list)
if key is None:
for count, item in enumerate(items):
duplicates[item].append(count)
else:
for count, item in enumerate(items):
duplicates[key(item)].append(count)
# remove items seen fewer than k times.
for key in list(duplicates.keys()):
if len(duplicates[key]) < k:
del duplicates[key]
duplicates = dict(duplicates)
return duplicates |
def Copy(self):
"""Return a copy of the queue manager.
Returns:
Copy of the QueueManager object.
NOTE: pending writes/deletions are not copied. On the other hand, if the
original object has a frozen timestamp, a copy will have it as well.
"""
result = QueueManager(store=self.data_store, token=self.token)
result.prev_frozen_timestamps = self.prev_frozen_timestamps
result.frozen_timestamp = self.frozen_timestamp
return result | Return a copy of the queue manager.
Returns:
Copy of the QueueManager object.
NOTE: pending writes/deletions are not copied. On the other hand, if the
original object has a frozen timestamp, a copy will have it as well. | Below is the the instruction that describes the task:
### Input:
Return a copy of the queue manager.
Returns:
Copy of the QueueManager object.
NOTE: pending writes/deletions are not copied. On the other hand, if the
original object has a frozen timestamp, a copy will have it as well.
### Response:
def Copy(self):
"""Return a copy of the queue manager.
Returns:
Copy of the QueueManager object.
NOTE: pending writes/deletions are not copied. On the other hand, if the
original object has a frozen timestamp, a copy will have it as well.
"""
result = QueueManager(store=self.data_store, token=self.token)
result.prev_frozen_timestamps = self.prev_frozen_timestamps
result.frozen_timestamp = self.frozen_timestamp
return result |
def op(cls,text,*args,**kwargs):
""" This method must be overriden in derived classes """
return cls.fn(text,*args,**kwargs) | This method must be overriden in derived classes | Below is the the instruction that describes the task:
### Input:
This method must be overriden in derived classes
### Response:
def op(cls,text,*args,**kwargs):
""" This method must be overriden in derived classes """
return cls.fn(text,*args,**kwargs) |
async def _create_transaction(self, msg, *args, **kwargs):
"""
Create a transaction with the distant server
:param msg: message to be sent
:param args: args to be sent to the coroutines given to `register_transaction`
:param kwargs: kwargs to be sent to the coroutines given to `register_transaction`
"""
recv_msgs, get_key, _1, _2, _3 = self._msgs_registered[msg.__msgtype__]
key = get_key(msg)
if key in self._transactions[recv_msgs[0]]:
# If we already have a request for this particular key, just add it on the list of things to call
for recv_msg in recv_msgs:
self._transactions[recv_msg][key].append((args, kwargs))
else:
# If that's not the case, add us in the queue, and send the message
for recv_msg in recv_msgs:
self._transactions[recv_msg][key] = [(args, kwargs)]
await ZMQUtils.send(self._socket, msg) | Create a transaction with the distant server
:param msg: message to be sent
:param args: args to be sent to the coroutines given to `register_transaction`
:param kwargs: kwargs to be sent to the coroutines given to `register_transaction` | Below is the the instruction that describes the task:
### Input:
Create a transaction with the distant server
:param msg: message to be sent
:param args: args to be sent to the coroutines given to `register_transaction`
:param kwargs: kwargs to be sent to the coroutines given to `register_transaction`
### Response:
async def _create_transaction(self, msg, *args, **kwargs):
"""
Create a transaction with the distant server
:param msg: message to be sent
:param args: args to be sent to the coroutines given to `register_transaction`
:param kwargs: kwargs to be sent to the coroutines given to `register_transaction`
"""
recv_msgs, get_key, _1, _2, _3 = self._msgs_registered[msg.__msgtype__]
key = get_key(msg)
if key in self._transactions[recv_msgs[0]]:
# If we already have a request for this particular key, just add it on the list of things to call
for recv_msg in recv_msgs:
self._transactions[recv_msg][key].append((args, kwargs))
else:
# If that's not the case, add us in the queue, and send the message
for recv_msg in recv_msgs:
self._transactions[recv_msg][key] = [(args, kwargs)]
await ZMQUtils.send(self._socket, msg) |
def print_accuracies(filepath, train_start=TRAIN_START, train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER):
"""
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(20181014)
set_log_level(logging.INFO)
sess = tf.Session()
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
x_data, y_data = dataset.get_set(which_set)
impl(sess, model, dataset, factory, x_data, y_data, base_eps_iter, nb_iter) | Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class | Below is the the instruction that describes the task:
### Input:
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
### Response:
def print_accuracies(filepath, train_start=TRAIN_START, train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER):
"""
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(20181014)
set_log_level(logging.INFO)
sess = tf.Session()
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
x_data, y_data = dataset.get_set(which_set)
impl(sess, model, dataset, factory, x_data, y_data, base_eps_iter, nb_iter) |
def with_blob(self, blob):
"""
Init Azure Blob Lease with existing blob.
"""
content = json.loads(blob.content)
self.partition_id = content["partition_id"]
self.owner = content["owner"]
self.token = content["token"]
self.epoch = content["epoch"]
self.offset = content["offset"]
self.sequence_number = content["sequence_number"]
self.event_processor_context = content.get("event_processor_context") | Init Azure Blob Lease with existing blob. | Below is the the instruction that describes the task:
### Input:
Init Azure Blob Lease with existing blob.
### Response:
def with_blob(self, blob):
"""
Init Azure Blob Lease with existing blob.
"""
content = json.loads(blob.content)
self.partition_id = content["partition_id"]
self.owner = content["owner"]
self.token = content["token"]
self.epoch = content["epoch"]
self.offset = content["offset"]
self.sequence_number = content["sequence_number"]
self.event_processor_context = content.get("event_processor_context") |
def check_import_stdlib(module):
"""Check if module is in Python stdlib.
Args:
module (str): The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('2.7') # pylint: disable=R0916
or module in stdlib_list('3.4')
or module in stdlib_list('3.5')
or module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in ['app', 'args', 'playbook_app']
):
return True
return False | Check if module is in Python stdlib.
Args:
module (str): The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template. | Below is the the instruction that describes the task:
### Input:
Check if module is in Python stdlib.
Args:
module (str): The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
### Response:
def check_import_stdlib(module):
"""Check if module is in Python stdlib.
Args:
module (str): The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('2.7') # pylint: disable=R0916
or module in stdlib_list('3.4')
or module in stdlib_list('3.5')
or module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in ['app', 'args', 'playbook_app']
):
return True
return False |
def upload_document_fileobj(file_obj, file_name, session, documents_resource, log=None):
"""Uploads a single file-like object to the One Codex server directly to S3.
Parameters
----------
file_obj : `FilePassthru`, or a file-like object
If a file-like object is given, its mime-type will be sent as 'text/plain'. Otherwise,
`FilePassthru` will send a compressed type if the file is gzip'd or bzip'd.
file_name : `string`
The file_name you wish to associate this file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Notes
-----
In contrast to `upload_sample_fileobj`, this method will /only/ upload to an S3 intermediate
bucket--not via our direct proxy or directly to a user's S3 bucket with a signed request.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample UUID of newly uploaded file.
"""
try:
fields = documents_resource.init_multipart_upload()
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
fields,
session,
documents_resource._client._root_url + fields["callback_url"], # full callback url
)
document_id = s3_upload.get("document_id", "<UUID not yet assigned>")
logging.info("{}: finished as document {}".format(file_name, document_id))
return document_id | Uploads a single file-like object to the One Codex server directly to S3.
Parameters
----------
file_obj : `FilePassthru`, or a file-like object
If a file-like object is given, its mime-type will be sent as 'text/plain'. Otherwise,
`FilePassthru` will send a compressed type if the file is gzip'd or bzip'd.
file_name : `string`
The file_name you wish to associate this file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Notes
-----
In contrast to `upload_sample_fileobj`, this method will /only/ upload to an S3 intermediate
bucket--not via our direct proxy or directly to a user's S3 bucket with a signed request.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample UUID of newly uploaded file. | Below is the the instruction that describes the task:
### Input:
Uploads a single file-like object to the One Codex server directly to S3.
Parameters
----------
file_obj : `FilePassthru`, or a file-like object
If a file-like object is given, its mime-type will be sent as 'text/plain'. Otherwise,
`FilePassthru` will send a compressed type if the file is gzip'd or bzip'd.
file_name : `string`
The file_name you wish to associate this file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Notes
-----
In contrast to `upload_sample_fileobj`, this method will /only/ upload to an S3 intermediate
bucket--not via our direct proxy or directly to a user's S3 bucket with a signed request.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample UUID of newly uploaded file.
### Response:
def upload_document_fileobj(file_obj, file_name, session, documents_resource, log=None):
"""Uploads a single file-like object to the One Codex server directly to S3.
Parameters
----------
file_obj : `FilePassthru`, or a file-like object
If a file-like object is given, its mime-type will be sent as 'text/plain'. Otherwise,
`FilePassthru` will send a compressed type if the file is gzip'd or bzip'd.
file_name : `string`
The file_name you wish to associate this file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Notes
-----
In contrast to `upload_sample_fileobj`, this method will /only/ upload to an S3 intermediate
bucket--not via our direct proxy or directly to a user's S3 bucket with a signed request.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample UUID of newly uploaded file.
"""
try:
fields = documents_resource.init_multipart_upload()
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
fields,
session,
documents_resource._client._root_url + fields["callback_url"], # full callback url
)
document_id = s3_upload.get("document_id", "<UUID not yet assigned>")
logging.info("{}: finished as document {}".format(file_name, document_id))
return document_id |
def atleast_1d(*arrs):
r"""Convert inputs to arrays with at least one dimension.
Scalars are converted to 1-dimensional arrays, whilst other
higher-dimensional inputs are preserved. This is a thin wrapper
around `numpy.atleast_1d` to preserve units.
Parameters
----------
arrs : arbitrary positional arguments
Input arrays to be converted if necessary
Returns
-------
`pint.Quantity`
A single quantity or a list of quantities, matching the number of inputs.
"""
mags = [a.magnitude if hasattr(a, 'magnitude') else a for a in arrs]
orig_units = [a.units if hasattr(a, 'units') else None for a in arrs]
ret = np.atleast_1d(*mags)
if len(mags) == 1:
if orig_units[0] is not None:
return units.Quantity(ret, orig_units[0])
else:
return ret
return [units.Quantity(m, u) if u is not None else m for m, u in zip(ret, orig_units)] | r"""Convert inputs to arrays with at least one dimension.
Scalars are converted to 1-dimensional arrays, whilst other
higher-dimensional inputs are preserved. This is a thin wrapper
around `numpy.atleast_1d` to preserve units.
Parameters
----------
arrs : arbitrary positional arguments
Input arrays to be converted if necessary
Returns
-------
`pint.Quantity`
A single quantity or a list of quantities, matching the number of inputs. | Below is the the instruction that describes the task:
### Input:
r"""Convert inputs to arrays with at least one dimension.
Scalars are converted to 1-dimensional arrays, whilst other
higher-dimensional inputs are preserved. This is a thin wrapper
around `numpy.atleast_1d` to preserve units.
Parameters
----------
arrs : arbitrary positional arguments
Input arrays to be converted if necessary
Returns
-------
`pint.Quantity`
A single quantity or a list of quantities, matching the number of inputs.
### Response:
def atleast_1d(*arrs):
r"""Convert inputs to arrays with at least one dimension.
Scalars are converted to 1-dimensional arrays, whilst other
higher-dimensional inputs are preserved. This is a thin wrapper
around `numpy.atleast_1d` to preserve units.
Parameters
----------
arrs : arbitrary positional arguments
Input arrays to be converted if necessary
Returns
-------
`pint.Quantity`
A single quantity or a list of quantities, matching the number of inputs.
"""
mags = [a.magnitude if hasattr(a, 'magnitude') else a for a in arrs]
orig_units = [a.units if hasattr(a, 'units') else None for a in arrs]
ret = np.atleast_1d(*mags)
if len(mags) == 1:
if orig_units[0] is not None:
return units.Quantity(ret, orig_units[0])
else:
return ret
return [units.Quantity(m, u) if u is not None else m for m, u in zip(ret, orig_units)] |
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim) | Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe | Below is the the instruction that describes the task:
### Input:
Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
### Response:
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim) |
def verifySignature(ecPublicSigningKey, message, signature):
"""
:type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray
"""
if ecPublicSigningKey.getType() == Curve.DJB_TYPE:
result = _curve.verifySignature(ecPublicSigningKey.getPublicKey(), message, signature)
return result == 0
else:
raise InvalidKeyException("Unknown type: %s" % ecPublicSigningKey.getType()) | :type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray | Below is the the instruction that describes the task:
### Input:
:type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray
### Response:
def verifySignature(ecPublicSigningKey, message, signature):
"""
:type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray
"""
if ecPublicSigningKey.getType() == Curve.DJB_TYPE:
result = _curve.verifySignature(ecPublicSigningKey.getPublicKey(), message, signature)
return result == 0
else:
raise InvalidKeyException("Unknown type: %s" % ecPublicSigningKey.getType()) |
def QA_fetch_get_stock_info(code, ip=None, port=None):
'股票基本信息'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
market_code = _select_market_code(code)
with api.connect(ip, port):
return api.to_df(api.get_finance_info(market_code, code)) | 股票基本信息 | Below is the the instruction that describes the task:
### Input:
股票基本信息
### Response:
def QA_fetch_get_stock_info(code, ip=None, port=None):
'股票基本信息'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
market_code = _select_market_code(code)
with api.connect(ip, port):
return api.to_df(api.get_finance_info(market_code, code)) |
def kmip_version(self, value):
"""
Set the KMIP version for the client.
Args:
value (KMIPVersion): A KMIPVersion enumeration
Return:
None
Raises:
ValueError: if value is not a KMIPVersion enumeration
Example:
>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1
>>>
"""
if isinstance(value, enums.KMIPVersion):
self.proxy.kmip_version = value
else:
raise ValueError("KMIP version must be a KMIPVersion enumeration") | Set the KMIP version for the client.
Args:
value (KMIPVersion): A KMIPVersion enumeration
Return:
None
Raises:
ValueError: if value is not a KMIPVersion enumeration
Example:
>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1
>>> | Below is the the instruction that describes the task:
### Input:
Set the KMIP version for the client.
Args:
value (KMIPVersion): A KMIPVersion enumeration
Return:
None
Raises:
ValueError: if value is not a KMIPVersion enumeration
Example:
>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1
>>>
### Response:
def kmip_version(self, value):
"""
Set the KMIP version for the client.
Args:
value (KMIPVersion): A KMIPVersion enumeration
Return:
None
Raises:
ValueError: if value is not a KMIPVersion enumeration
Example:
>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1
>>>
"""
if isinstance(value, enums.KMIPVersion):
self.proxy.kmip_version = value
else:
raise ValueError("KMIP version must be a KMIPVersion enumeration") |
def _set_cluster(self, v, load=False):
"""
Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True)""",
})
self.__cluster = t
if hasattr(self, '_set'):
self._set() | Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster() directly.
### Response:
def _set_cluster(self, v, load=False):
"""
Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True)""",
})
self.__cluster = t
if hasattr(self, '_set'):
self._set() |
def window(self, time_interval=None, force_calculation=False):
"""
Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object
"""
if not time_interval:
if self.calculated_intervals:
time_interval = self.calculated_intervals[-1]
else:
raise ValueError("No calculations have been performed and no time interval was provided")
elif isinstance(time_interval, TimeInterval):
time_interval = TimeInterval(time_interval.start, time_interval.end)
elif isinstance(time_interval, Iterable):
time_interval = parse_time_tuple(*time_interval)
if isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
elif isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
else:
raise TypeError("Expected TimeInterval or (start, end) tuple of type str or datetime, got {}"
.format(type(time_interval)))
return StreamView(stream=self, time_interval=time_interval, force_calculation=force_calculation) | Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object | Below is the the instruction that describes the task:
### Input:
Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object
### Response:
def window(self, time_interval=None, force_calculation=False):
"""
Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object
"""
if not time_interval:
if self.calculated_intervals:
time_interval = self.calculated_intervals[-1]
else:
raise ValueError("No calculations have been performed and no time interval was provided")
elif isinstance(time_interval, TimeInterval):
time_interval = TimeInterval(time_interval.start, time_interval.end)
elif isinstance(time_interval, Iterable):
time_interval = parse_time_tuple(*time_interval)
if isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
elif isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
else:
raise TypeError("Expected TimeInterval or (start, end) tuple of type str or datetime, got {}"
.format(type(time_interval)))
return StreamView(stream=self, time_interval=time_interval, force_calculation=force_calculation) |
def decrypt(self, encrypted_number):
"""Return the decrypted & decoded plaintext of *encrypted_number*.
Args:
encrypted_number (EncryptedNumber): encrypted against a known public
key, i.e., one for which the private key is on this keyring.
Returns:
the int or float that *encrypted_number* was holding. N.B. if
the number returned is an integer, it will not be of type
float.
Raises:
KeyError: If the keyring does not hold the private key that
decrypts *encrypted_number*.
"""
relevant_private_key = self.__keyring[encrypted_number.public_key]
return relevant_private_key.decrypt(encrypted_number) | Return the decrypted & decoded plaintext of *encrypted_number*.
Args:
encrypted_number (EncryptedNumber): encrypted against a known public
key, i.e., one for which the private key is on this keyring.
Returns:
the int or float that *encrypted_number* was holding. N.B. if
the number returned is an integer, it will not be of type
float.
Raises:
KeyError: If the keyring does not hold the private key that
decrypts *encrypted_number*. | Below is the the instruction that describes the task:
### Input:
Return the decrypted & decoded plaintext of *encrypted_number*.
Args:
encrypted_number (EncryptedNumber): encrypted against a known public
key, i.e., one for which the private key is on this keyring.
Returns:
the int or float that *encrypted_number* was holding. N.B. if
the number returned is an integer, it will not be of type
float.
Raises:
KeyError: If the keyring does not hold the private key that
decrypts *encrypted_number*.
### Response:
def decrypt(self, encrypted_number):
"""Return the decrypted & decoded plaintext of *encrypted_number*.
Args:
encrypted_number (EncryptedNumber): encrypted against a known public
key, i.e., one for which the private key is on this keyring.
Returns:
the int or float that *encrypted_number* was holding. N.B. if
the number returned is an integer, it will not be of type
float.
Raises:
KeyError: If the keyring does not hold the private key that
decrypts *encrypted_number*.
"""
relevant_private_key = self.__keyring[encrypted_number.public_key]
return relevant_private_key.decrypt(encrypted_number) |
def findall(self, title=None):
"""Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty)
"""
if title is None:
return list(self)
files = backend.iterfiles(self._drive, name=title)
return [self[id] for id, _ in files] | Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty) | Below is the the instruction that describes the task:
### Input:
Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty)
### Response:
def findall(self, title=None):
"""Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty)
"""
if title is None:
return list(self)
files = backend.iterfiles(self._drive, name=title)
return [self[id] for id, _ in files] |
def find(model, rid):
""" Find a model from the store by resource id """
validate_rid(model, rid)
rid_field = model.rid_field
model = goldman.sess.store.find(model.RTYPE, rid_field, rid)
if not model:
abort(exceptions.DocumentNotFound)
return model | Find a model from the store by resource id | Below is the the instruction that describes the task:
### Input:
Find a model from the store by resource id
### Response:
def find(model, rid):
""" Find a model from the store by resource id """
validate_rid(model, rid)
rid_field = model.rid_field
model = goldman.sess.store.find(model.RTYPE, rid_field, rid)
if not model:
abort(exceptions.DocumentNotFound)
return model |
def progress_bar(iteration,
total,
prefix=None,
suffix=None,
decs=1,
length=100):
"""Creates a console progress bar.
This should be called in a loop to create a progress bar.
See `StackOverflow <http://stackoverflow.com/questions/3173320/>`__.
Args:
iteration (int): current iteration
total (int): total iterations
prefix (str): prefix string
suffix (str): suffix string
decs (int): positive number of decimals in percent complete
length (int): character length of the bar
Returns:
``None``
Note:
This function assumes that nothing else is printed to the console in the
interim.
"""
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
format_str = '{0:.' + str(decs) + 'f}'
percents = format_str.format(100 * (iteration / float(total)))
filled_length = int(round(length * iteration / float(total)))
bar = '█' * filled_length + '-' * (length - filled_length)
prefix, suffix = prefix.strip(), suffix.strip()
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
return None | Creates a console progress bar.
This should be called in a loop to create a progress bar.
See `StackOverflow <http://stackoverflow.com/questions/3173320/>`__.
Args:
iteration (int): current iteration
total (int): total iterations
prefix (str): prefix string
suffix (str): suffix string
decs (int): positive number of decimals in percent complete
length (int): character length of the bar
Returns:
``None``
Note:
This function assumes that nothing else is printed to the console in the
interim. | Below is the the instruction that describes the task:
### Input:
Creates a console progress bar.
This should be called in a loop to create a progress bar.
See `StackOverflow <http://stackoverflow.com/questions/3173320/>`__.
Args:
iteration (int): current iteration
total (int): total iterations
prefix (str): prefix string
suffix (str): suffix string
decs (int): positive number of decimals in percent complete
length (int): character length of the bar
Returns:
``None``
Note:
This function assumes that nothing else is printed to the console in the
interim.
### Response:
def progress_bar(iteration,
total,
prefix=None,
suffix=None,
decs=1,
length=100):
"""Creates a console progress bar.
This should be called in a loop to create a progress bar.
See `StackOverflow <http://stackoverflow.com/questions/3173320/>`__.
Args:
iteration (int): current iteration
total (int): total iterations
prefix (str): prefix string
suffix (str): suffix string
decs (int): positive number of decimals in percent complete
length (int): character length of the bar
Returns:
``None``
Note:
This function assumes that nothing else is printed to the console in the
interim.
"""
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
format_str = '{0:.' + str(decs) + 'f}'
percents = format_str.format(100 * (iteration / float(total)))
filled_length = int(round(length * iteration / float(total)))
bar = '█' * filled_length + '-' * (length - filled_length)
prefix, suffix = prefix.strip(), suffix.strip()
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
return None |
def has_pfn(self, url, site=None):
""" Wrapper of the pegasus hasPFN function, that allows it to be called
outside of specific pegasus functions.
"""
curr_pfn = dax.PFN(url, site)
return self.hasPFN(curr_pfn) | Wrapper of the pegasus hasPFN function, that allows it to be called
outside of specific pegasus functions. | Below is the the instruction that describes the task:
### Input:
Wrapper of the pegasus hasPFN function, that allows it to be called
outside of specific pegasus functions.
### Response:
def has_pfn(self, url, site=None):
""" Wrapper of the pegasus hasPFN function, that allows it to be called
outside of specific pegasus functions.
"""
curr_pfn = dax.PFN(url, site)
return self.hasPFN(curr_pfn) |
def download(self, path, file):
"""Download remote file to disk."""
resp = self._sendRequest("GET", path)
if resp.status_code == 200:
with open(file, "wb") as f:
f.write(resp.content)
else:
raise YaDiskException(resp.status_code, resp.content) | Download remote file to disk. | Below is the the instruction that describes the task:
### Input:
Download remote file to disk.
### Response:
def download(self, path, file):
"""Download remote file to disk."""
resp = self._sendRequest("GET", path)
if resp.status_code == 200:
with open(file, "wb") as f:
f.write(resp.content)
else:
raise YaDiskException(resp.status_code, resp.content) |
def service(flavour):
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
"""
def service_unit_decorator(raw_cls):
__new__ = raw_cls.__new__
def __new_service__(cls, *args, **kwargs):
if __new__ is object.__new__:
self = __new__(cls)
else:
self = __new__(cls, *args, **kwargs)
service_unit = ServiceUnit(self, flavour)
self.__service_unit__ = service_unit
return self
raw_cls.__new__ = __new_service__
if raw_cls.run.__doc__ is None:
raw_cls.run.__doc__ = "Service entry point"
return raw_cls
return service_unit_decorator | r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``. | Below is the the instruction that describes the task:
### Input:
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
### Response:
def service(flavour):
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
"""
def service_unit_decorator(raw_cls):
__new__ = raw_cls.__new__
def __new_service__(cls, *args, **kwargs):
if __new__ is object.__new__:
self = __new__(cls)
else:
self = __new__(cls, *args, **kwargs)
service_unit = ServiceUnit(self, flavour)
self.__service_unit__ = service_unit
return self
raw_cls.__new__ = __new_service__
if raw_cls.run.__doc__ is None:
raw_cls.run.__doc__ = "Service entry point"
return raw_cls
return service_unit_decorator |
def load_terminfo(terminal_name=None, fallback='vt100'):
"""
If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64.
"""
terminal_name = os.getenv('TERM')
if not terminal_name:
if not fallback:
raise TerminfoError('Environment variable TERM is unset and no fallback was requested')
else:
terminal_name = fallback
if os.getenv('TERMINFO'):
# from man terminfo(5):
# if the environment variable TERMINFO is set,
# only that directory is searched
terminfo_locations = [os.getenv('TERMINFO')]
else:
terminfo_locations = [] # from most to least important
if os.getenv('TERMINFO_DIRS'):
for i in os.getenv('TERMINFO_DIRS').split(':'):
# from man terminfo(5)
# An empty directory name is interpreted as /usr/share/terminfo.
terminfo_locations.append(i or '/usr/share/terminfo')
terminfo_locations += [
os.path.expanduser('~/.terminfo'),
'/etc/terminfo',
'/usr/local/ncurses/share/terminfo',
'/lib/terminfo',
'/usr/share/terminfo'
]
# remove duplicates preserving order
terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations))
terminfo_path = None
for dirpath in terminfo_locations:
path = os.path.join(dirpath, terminal_name[0], terminal_name)
if os.path.exists(path):
terminfo_path = path
break
if not path:
raise TerminfoError("Couldn't find a terminfo file for terminal '%s'" % terminal_name)
from terminfo_index import BOOLEAN_CAPABILITIES, NUMBER_CAPABILITIES, STRING_CAPABILITIES
data = open(terminfo_path, 'rb').read()
# header (see man term(5), STORAGE FORMAT)
header = struct.unpack('<hhhhhh', data[:12]) # 2 bytes == 1 short integer
magic_number = header[0] # the magic number (octal 0432)
size_names = header[1] # the size, in bytes, of the names section
size_booleans = header[2] # the number of bytes in the boolean section
num_numbers = header[3] # the number of short integers in the numbers section
num_offsets = header[4] # the number of offsets (short integers) in the strings section
size_strings = header[5] # the size, in bytes, of the string table
if magic_number != 0o432:
raise TerminfoError('Bad magic number')
# sections indexes
idx_section_names = 12
idx_section_booleans = idx_section_names + size_names
idx_section_numbers = idx_section_booleans + size_booleans
if idx_section_numbers % 2 != 0:
idx_section_numbers += 1 # must start on an even byte
idx_section_strings = idx_section_numbers + 2 * num_numbers
idx_section_string_table = idx_section_strings + 2 * num_offsets
# terminal names
terminal_names = data[idx_section_names:idx_section_booleans].decode('ascii')
terminal_names = terminal_names[:-1].split('|') # remove ASCII NUL and split
terminfo = Terminfo(terminal_names[0], terminal_names[1:])
# booleans
for i, idx in enumerate(range(idx_section_booleans, idx_section_booleans + size_booleans)):
cap = BooleanCapability(*BOOLEAN_CAPABILITIES[i], value=data[i] == b'\x00')
terminfo.booleans[cap.variable] = cap
# numbers
numbers = struct.unpack('<'+'h' * num_numbers, data[idx_section_numbers:idx_section_strings])
for i,strnum in enumerate(numbers):
cap = NumberCapability(*NUMBER_CAPABILITIES[i], value=strnum)
terminfo.numbers[cap.variable] = cap
# strings
offsets = struct.unpack('<'+'h' * num_offsets, data[idx_section_strings:idx_section_string_table])
idx = 0
for offset in offsets:
k = 0
string = []
while True and offset != -1:
char = data[idx_section_string_table + offset + k:idx_section_string_table + offset + k + 1]
if char == b'\x00':
break
string.append(char.decode('iso-8859-1'))
k += 1
string = u''.join(string)
cap = StringCapability(*STRING_CAPABILITIES[idx], value=string)
terminfo.strings[cap.variable] = cap
idx += 1
terminfo._reset_index()
return terminfo | If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64. | Below is the the instruction that describes the task:
### Input:
If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64.
### Response:
def load_terminfo(terminal_name=None, fallback='vt100'):
"""
If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64.
"""
terminal_name = os.getenv('TERM')
if not terminal_name:
if not fallback:
raise TerminfoError('Environment variable TERM is unset and no fallback was requested')
else:
terminal_name = fallback
if os.getenv('TERMINFO'):
# from man terminfo(5):
# if the environment variable TERMINFO is set,
# only that directory is searched
terminfo_locations = [os.getenv('TERMINFO')]
else:
terminfo_locations = [] # from most to least important
if os.getenv('TERMINFO_DIRS'):
for i in os.getenv('TERMINFO_DIRS').split(':'):
# from man terminfo(5)
# An empty directory name is interpreted as /usr/share/terminfo.
terminfo_locations.append(i or '/usr/share/terminfo')
terminfo_locations += [
os.path.expanduser('~/.terminfo'),
'/etc/terminfo',
'/usr/local/ncurses/share/terminfo',
'/lib/terminfo',
'/usr/share/terminfo'
]
# remove duplicates preserving order
terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations))
terminfo_path = None
for dirpath in terminfo_locations:
path = os.path.join(dirpath, terminal_name[0], terminal_name)
if os.path.exists(path):
terminfo_path = path
break
if not path:
raise TerminfoError("Couldn't find a terminfo file for terminal '%s'" % terminal_name)
from terminfo_index import BOOLEAN_CAPABILITIES, NUMBER_CAPABILITIES, STRING_CAPABILITIES
data = open(terminfo_path, 'rb').read()
# header (see man term(5), STORAGE FORMAT)
header = struct.unpack('<hhhhhh', data[:12]) # 2 bytes == 1 short integer
magic_number = header[0] # the magic number (octal 0432)
size_names = header[1] # the size, in bytes, of the names section
size_booleans = header[2] # the number of bytes in the boolean section
num_numbers = header[3] # the number of short integers in the numbers section
num_offsets = header[4] # the number of offsets (short integers) in the strings section
size_strings = header[5] # the size, in bytes, of the string table
if magic_number != 0o432:
raise TerminfoError('Bad magic number')
# sections indexes
idx_section_names = 12
idx_section_booleans = idx_section_names + size_names
idx_section_numbers = idx_section_booleans + size_booleans
if idx_section_numbers % 2 != 0:
idx_section_numbers += 1 # must start on an even byte
idx_section_strings = idx_section_numbers + 2 * num_numbers
idx_section_string_table = idx_section_strings + 2 * num_offsets
# terminal names
terminal_names = data[idx_section_names:idx_section_booleans].decode('ascii')
terminal_names = terminal_names[:-1].split('|') # remove ASCII NUL and split
terminfo = Terminfo(terminal_names[0], terminal_names[1:])
# booleans
for i, idx in enumerate(range(idx_section_booleans, idx_section_booleans + size_booleans)):
cap = BooleanCapability(*BOOLEAN_CAPABILITIES[i], value=data[i] == b'\x00')
terminfo.booleans[cap.variable] = cap
# numbers
numbers = struct.unpack('<'+'h' * num_numbers, data[idx_section_numbers:idx_section_strings])
for i,strnum in enumerate(numbers):
cap = NumberCapability(*NUMBER_CAPABILITIES[i], value=strnum)
terminfo.numbers[cap.variable] = cap
# strings
offsets = struct.unpack('<'+'h' * num_offsets, data[idx_section_strings:idx_section_string_table])
idx = 0
for offset in offsets:
k = 0
string = []
while True and offset != -1:
char = data[idx_section_string_table + offset + k:idx_section_string_table + offset + k + 1]
if char == b'\x00':
break
string.append(char.decode('iso-8859-1'))
k += 1
string = u''.join(string)
cap = StringCapability(*STRING_CAPABILITIES[idx], value=string)
terminfo.strings[cap.variable] = cap
idx += 1
terminfo._reset_index()
return terminfo |
def header_size(self):
"""Total size of `file`'s header (including extended) in bytes.
The size of the header is determined from `header`. If this is not
possible (i.e., before the header has been read), 0 is returned.
If the header contains an ``'nsymbt'`` entry (size of the extra
header in bytes), its value is added to the regular header size.
"""
standard_header_size = MRC_HEADER_SIZE
try:
extra_header_size = int(self.header['nsymbt']['value'])
except KeyError:
extra_header_size = 0
return standard_header_size + extra_header_size | Total size of `file`'s header (including extended) in bytes.
The size of the header is determined from `header`. If this is not
possible (i.e., before the header has been read), 0 is returned.
If the header contains an ``'nsymbt'`` entry (size of the extra
header in bytes), its value is added to the regular header size. | Below is the the instruction that describes the task:
### Input:
Total size of `file`'s header (including extended) in bytes.
The size of the header is determined from `header`. If this is not
possible (i.e., before the header has been read), 0 is returned.
If the header contains an ``'nsymbt'`` entry (size of the extra
header in bytes), its value is added to the regular header size.
### Response:
def header_size(self):
"""Total size of `file`'s header (including extended) in bytes.
The size of the header is determined from `header`. If this is not
possible (i.e., before the header has been read), 0 is returned.
If the header contains an ``'nsymbt'`` entry (size of the extra
header in bytes), its value is added to the regular header size.
"""
standard_header_size = MRC_HEADER_SIZE
try:
extra_header_size = int(self.header['nsymbt']['value'])
except KeyError:
extra_header_size = 0
return standard_header_size + extra_header_size |
def _which(command):
"""protected"""
if isinstance(command, str):
command = [command]
command = list(command)
name = command[0]
args = command[1:]
if _is_windows():
pathext = _decode_if_not_text(os.environ.get("PATHEXT", ""))
path = _find_executable(name, pathext.split(os.pathsep))
else:
path = _find_executable(name)
if not path:
return None
return [path] + args | protected | Below is the the instruction that describes the task:
### Input:
protected
### Response:
def _which(command):
"""protected"""
if isinstance(command, str):
command = [command]
command = list(command)
name = command[0]
args = command[1:]
if _is_windows():
pathext = _decode_if_not_text(os.environ.get("PATHEXT", ""))
path = _find_executable(name, pathext.split(os.pathsep))
else:
path = _find_executable(name)
if not path:
return None
return [path] + args |
def _to_string(val):
"""Convert to text."""
if isinstance(val, binary_type):
return val.decode('utf-8')
assert isinstance(val, text_type)
return val | Convert to text. | Below is the the instruction that describes the task:
### Input:
Convert to text.
### Response:
def _to_string(val):
"""Convert to text."""
if isinstance(val, binary_type):
return val.decode('utf-8')
assert isinstance(val, text_type)
return val |
def root_adb(self):
"""Change adb to root mode for this device if allowed.
If executed on a production build, adb will not be switched to root
mode per security restrictions.
"""
self.adb.root()
self.adb.wait_for_device(
timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND) | Change adb to root mode for this device if allowed.
If executed on a production build, adb will not be switched to root
mode per security restrictions. | Below is the the instruction that describes the task:
### Input:
Change adb to root mode for this device if allowed.
If executed on a production build, adb will not be switched to root
mode per security restrictions.
### Response:
def root_adb(self):
"""Change adb to root mode for this device if allowed.
If executed on a production build, adb will not be switched to root
mode per security restrictions.
"""
self.adb.root()
self.adb.wait_for_device(
timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND) |
def libvlc_media_list_new(p_instance):
'''Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_new', None) or \
_Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList),
ctypes.c_void_p, Instance)
return f(p_instance) | Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or NULL on error. | Below is the the instruction that describes the task:
### Input:
Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or NULL on error.
### Response:
def libvlc_media_list_new(p_instance):
'''Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_new', None) or \
_Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList),
ctypes.c_void_p, Instance)
return f(p_instance) |
def tag_text(self, text, **kwargs):
"""Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
"""
for analysis_match in text.analysis:
for candidate in analysis_match:
if candidate['partofspeech'] in PYVABAMORF_TO_WORDNET_POS_MAP:
# Wordnet contains data about the given lemma and pos combination - will annotate.
wordnet_obj = {}
tag_synsets(wordnet_obj, candidate, **kwargs)
return text | Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`. | Below is the the instruction that describes the task:
### Input:
Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
### Response:
def tag_text(self, text, **kwargs):
"""Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
"""
for analysis_match in text.analysis:
for candidate in analysis_match:
if candidate['partofspeech'] in PYVABAMORF_TO_WORDNET_POS_MAP:
# Wordnet contains data about the given lemma and pos combination - will annotate.
wordnet_obj = {}
tag_synsets(wordnet_obj, candidate, **kwargs)
return text |
def read_by(cls, removed=False, **kwargs):
"""
filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read.
:param removed: whether to include soft-deleted rows
:param kwargs: where clause mappings to pass to filter_by
:return: row object generator
"""
if not removed:
kwargs['time_removed'] = 0
return cls.query.filter_by(**kwargs) | filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read.
:param removed: whether to include soft-deleted rows
:param kwargs: where clause mappings to pass to filter_by
:return: row object generator | Below is the the instruction that describes the task:
### Input:
filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read.
:param removed: whether to include soft-deleted rows
:param kwargs: where clause mappings to pass to filter_by
:return: row object generator
### Response:
def read_by(cls, removed=False, **kwargs):
"""
filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read.
:param removed: whether to include soft-deleted rows
:param kwargs: where clause mappings to pass to filter_by
:return: row object generator
"""
if not removed:
kwargs['time_removed'] = 0
return cls.query.filter_by(**kwargs) |
def cli(config, in_file, out_file, verbose):
"""Main Interface to generate xml documents
from custom dictionaries using legal xsd files
complying with legal documents in all countires
around the world.
"""
config.out_file = out_file
config.verbose = verbose
config.in_file = in_file
config.out_file = out_file | Main Interface to generate xml documents
from custom dictionaries using legal xsd files
complying with legal documents in all countires
around the world. | Below is the the instruction that describes the task:
### Input:
Main Interface to generate xml documents
from custom dictionaries using legal xsd files
complying with legal documents in all countires
around the world.
### Response:
def cli(config, in_file, out_file, verbose):
"""Main Interface to generate xml documents
from custom dictionaries using legal xsd files
complying with legal documents in all countires
around the world.
"""
config.out_file = out_file
config.verbose = verbose
config.in_file = in_file
config.out_file = out_file |
def cluster_commit():
'''
Commit Cluster Changes
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_commit
'''
ret = {'comment': '', 'success': False}
cmd = __execute_cmd('riak-admin', 'cluster commit')
if cmd['retcode'] != 0:
ret['comment'] = cmd['stdout']
else:
ret['comment'] = cmd['stdout']
ret['success'] = True
return ret | Commit Cluster Changes
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_commit | Below is the the instruction that describes the task:
### Input:
Commit Cluster Changes
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_commit
### Response:
def cluster_commit():
'''
Commit Cluster Changes
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_commit
'''
ret = {'comment': '', 'success': False}
cmd = __execute_cmd('riak-admin', 'cluster commit')
if cmd['retcode'] != 0:
ret['comment'] = cmd['stdout']
else:
ret['comment'] = cmd['stdout']
ret['success'] = True
return ret |
def get_subpackages_names(dir_):
"""Figures out the names of the subpackages of a package
Args:
dir_: (str) path to package directory
Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
"""
def is_package(d):
d = os.path.join(dir_, d)
return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*'))
ret = list(filter(is_package, os.listdir(dir_)))
ret.sort()
return ret | Figures out the names of the subpackages of a package
Args:
dir_: (str) path to package directory
Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package | Below is the the instruction that describes the task:
### Input:
Figures out the names of the subpackages of a package
Args:
dir_: (str) path to package directory
Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
### Response:
def get_subpackages_names(dir_):
"""Figures out the names of the subpackages of a package
Args:
dir_: (str) path to package directory
Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
"""
def is_package(d):
d = os.path.join(dir_, d)
return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*'))
ret = list(filter(is_package, os.listdir(dir_)))
ret.sort()
return ret |
def urlencode_utf8(params):
"""
UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242
"""
if hasattr(params, 'items'):
params = params.items()
params = (
'='.join((
quote_plus(k.encode('utf8'), safe='/'),
quote_plus(v.encode('utf8'), safe='/')
)) for k, v in params
)
return '&'.join(params) | UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242 | Below is the the instruction that describes the task:
### Input:
UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242
### Response:
def urlencode_utf8(params):
"""
UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242
"""
if hasattr(params, 'items'):
params = params.items()
params = (
'='.join((
quote_plus(k.encode('utf8'), safe='/'),
quote_plus(v.encode('utf8'), safe='/')
)) for k, v in params
)
return '&'.join(params) |
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights | compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups | Below is the the instruction that describes the task:
### Input:
compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
### Response:
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights |
def xtime(b, n):
"""Repeated polynomial multiplication in GF(2^8)."""
b = b.reshape(8)
for _ in range(n):
b = exprzeros(1) + b[:7] ^ uint2exprs(0x1b, 8) & b[7]*8
return b | Repeated polynomial multiplication in GF(2^8). | Below is the the instruction that describes the task:
### Input:
Repeated polynomial multiplication in GF(2^8).
### Response:
def xtime(b, n):
"""Repeated polynomial multiplication in GF(2^8)."""
b = b.reshape(8)
for _ in range(n):
b = exprzeros(1) + b[:7] ^ uint2exprs(0x1b, 8) & b[7]*8
return b |
def get_sort_field(model=None, sort_field='sort', order_name='asc'):
"""
Get sort column info according request, the data format just likes:
?sort=fieldA.asc&sort=fieldB.desc
or:
?sort=fieldA&sort=fieldB&order=asc&order=desc
default order is 'asc'. `field` can be just like `model.field`
:param model: default model, if no model existed in field
:param sort_field: sort field name in request
:param order_name: order field name in request, the order direction can be
set in field, just like `model.field.asc` or `field.asc`, etc.
:return:
"""
from uliweb import request
if request.values.getlist('sort'):
sort_fields = request.values.getlist('sort')
order_by = []
orders = request.values.getlist('order')
for i, f in enumerate(sort_fields):
_order = 'asc'
if not orders:
if f.endswith('.asc') or f.endswith('.desc'):
f, _order = f.rsplit('.', 1)
else:
continue
field = get_column(f, model)
if field is not None:
if orders:
_order = orders[i]
if _order == 'asc':
order_by.append(field)
else:
order_by.append(field.desc())
else:
order_by = None
return order_by | Get sort column info according request, the data format just likes:
?sort=fieldA.asc&sort=fieldB.desc
or:
?sort=fieldA&sort=fieldB&order=asc&order=desc
default order is 'asc'. `field` can be just like `model.field`
:param model: default model, if no model existed in field
:param sort_field: sort field name in request
:param order_name: order field name in request, the order direction can be
set in field, just like `model.field.asc` or `field.asc`, etc.
:return: | Below is the the instruction that describes the task:
### Input:
Get sort column info according request, the data format just likes:
?sort=fieldA.asc&sort=fieldB.desc
or:
?sort=fieldA&sort=fieldB&order=asc&order=desc
default order is 'asc'. `field` can be just like `model.field`
:param model: default model, if no model existed in field
:param sort_field: sort field name in request
:param order_name: order field name in request, the order direction can be
set in field, just like `model.field.asc` or `field.asc`, etc.
:return:
### Response:
def get_sort_field(model=None, sort_field='sort', order_name='asc'):
"""
Get sort column info according request, the data format just likes:
?sort=fieldA.asc&sort=fieldB.desc
or:
?sort=fieldA&sort=fieldB&order=asc&order=desc
default order is 'asc'. `field` can be just like `model.field`
:param model: default model, if no model existed in field
:param sort_field: sort field name in request
:param order_name: order field name in request, the order direction can be
set in field, just like `model.field.asc` or `field.asc`, etc.
:return:
"""
from uliweb import request
if request.values.getlist('sort'):
sort_fields = request.values.getlist('sort')
order_by = []
orders = request.values.getlist('order')
for i, f in enumerate(sort_fields):
_order = 'asc'
if not orders:
if f.endswith('.asc') or f.endswith('.desc'):
f, _order = f.rsplit('.', 1)
else:
continue
field = get_column(f, model)
if field is not None:
if orders:
_order = orders[i]
if _order == 'asc':
order_by.append(field)
else:
order_by.append(field.desc())
else:
order_by = None
return order_by |
def configure(self, cnf=None, **kw):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Table.keys`.
See :meth:`~Table.__init__` for a description of the widget specific option.
"""
if cnf == 'drag_cols':
return 'drag_cols', self._drag_cols
elif cnf == 'drag_rows':
return 'drag_rows', self._drag_rows
elif cnf == 'sortable':
return 'sortable', self._sortable
if isinstance(cnf, dict):
kwargs = cnf.copy()
kwargs.update(kw) # keyword arguments override cnf content
cnf = {} # everything is in kwargs so no need of cnf
cnf2 = {} # to configure the preview
else:
kwargs = kw
cnf2 = cnf
sortable = bool(kwargs.pop("sortable", self._sortable))
if sortable != self._sortable:
self._config_sortable(sortable)
drag_cols = bool(kwargs.pop("drag_cols", self._drag_cols))
if drag_cols != self._drag_cols:
self._config_drag_cols(drag_cols)
self._drag_rows = bool(kwargs.pop("drag_rows", self._drag_rows))
if 'columns' in kwargs:
# update column type dict
for col in list(self._column_types.keys()):
if col not in kwargs['columns']:
del self._column_types[col]
for col in kwargs['columns']:
if col not in self._column_types:
self._column_types[col] = str
# Remove some keywords from the preview configuration dict
kw2 = kwargs.copy()
kw2.pop('displaycolumns', None)
kw2.pop('xscrollcommand', None)
kw2.pop('yscrollcommand', None)
self._visual_drag.configure(cnf2, **kw2)
if len(kwargs) != 0:
return ttk.Treeview.configure(self, cnf, **kwargs) | Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Table.keys`.
See :meth:`~Table.__init__` for a description of the widget specific option. | Below is the the instruction that describes the task:
### Input:
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Table.keys`.
See :meth:`~Table.__init__` for a description of the widget specific option.
### Response:
def configure(self, cnf=None, **kw):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Table.keys`.
See :meth:`~Table.__init__` for a description of the widget specific option.
"""
if cnf == 'drag_cols':
return 'drag_cols', self._drag_cols
elif cnf == 'drag_rows':
return 'drag_rows', self._drag_rows
elif cnf == 'sortable':
return 'sortable', self._sortable
if isinstance(cnf, dict):
kwargs = cnf.copy()
kwargs.update(kw) # keyword arguments override cnf content
cnf = {} # everything is in kwargs so no need of cnf
cnf2 = {} # to configure the preview
else:
kwargs = kw
cnf2 = cnf
sortable = bool(kwargs.pop("sortable", self._sortable))
if sortable != self._sortable:
self._config_sortable(sortable)
drag_cols = bool(kwargs.pop("drag_cols", self._drag_cols))
if drag_cols != self._drag_cols:
self._config_drag_cols(drag_cols)
self._drag_rows = bool(kwargs.pop("drag_rows", self._drag_rows))
if 'columns' in kwargs:
# update column type dict
for col in list(self._column_types.keys()):
if col not in kwargs['columns']:
del self._column_types[col]
for col in kwargs['columns']:
if col not in self._column_types:
self._column_types[col] = str
# Remove some keywords from the preview configuration dict
kw2 = kwargs.copy()
kw2.pop('displaycolumns', None)
kw2.pop('xscrollcommand', None)
kw2.pop('yscrollcommand', None)
self._visual_drag.configure(cnf2, **kw2)
if len(kwargs) != 0:
return ttk.Treeview.configure(self, cnf, **kwargs) |
def open_url(url, retries=0, sleep=0.5):
'''
Open a mysql connection to a url. Note that if your password has
punctuation characters, it might break the parsing of url.
url: A string in the form "mysql://username:password@host.domain/database"
'''
return open_conn(retries=retries, sleep=sleep, **parse_url(url)) | Open a mysql connection to a url. Note that if your password has
punctuation characters, it might break the parsing of url.
url: A string in the form "mysql://username:password@host.domain/database" | Below is the the instruction that describes the task:
### Input:
Open a mysql connection to a url. Note that if your password has
punctuation characters, it might break the parsing of url.
url: A string in the form "mysql://username:password@host.domain/database"
### Response:
def open_url(url, retries=0, sleep=0.5):
'''
Open a mysql connection to a url. Note that if your password has
punctuation characters, it might break the parsing of url.
url: A string in the form "mysql://username:password@host.domain/database"
'''
return open_conn(retries=retries, sleep=sleep, **parse_url(url)) |
def get_value(self,column=None,keyValue=None,table=None,verbose=None):
"""
Returns the value from a cell as specified by row and column ids.
:param column (string, optional): Specifies the name of a column in the tab
le
:param keyValue (string, optional): Specifies a row of a table using the pr
imary key as the indentifier
:param table (string, optional): Specifies a table by table name. If the pr
efix SUID: is used, the table corresponding the SUID will be returne
d.
:returns: value from a cell as specified by row and column ids
"""
PARAMS=set_param(['column','keyValue','table'],[column,keyValue,table])
response=api(url=self.__url+"/get value", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Returns the value from a cell as specified by row and column ids.
:param column (string, optional): Specifies the name of a column in the tab
le
:param keyValue (string, optional): Specifies a row of a table using the pr
imary key as the indentifier
:param table (string, optional): Specifies a table by table name. If the pr
efix SUID: is used, the table corresponding the SUID will be returne
d.
:returns: value from a cell as specified by row and column ids | Below is the the instruction that describes the task:
### Input:
Returns the value from a cell as specified by row and column ids.
:param column (string, optional): Specifies the name of a column in the tab
le
:param keyValue (string, optional): Specifies a row of a table using the pr
imary key as the indentifier
:param table (string, optional): Specifies a table by table name. If the pr
efix SUID: is used, the table corresponding the SUID will be returne
d.
:returns: value from a cell as specified by row and column ids
### Response:
def get_value(self,column=None,keyValue=None,table=None,verbose=None):
"""
Returns the value from a cell as specified by row and column ids.
:param column (string, optional): Specifies the name of a column in the tab
le
:param keyValue (string, optional): Specifies a row of a table using the pr
imary key as the indentifier
:param table (string, optional): Specifies a table by table name. If the pr
efix SUID: is used, the table corresponding the SUID will be returne
d.
:returns: value from a cell as specified by row and column ids
"""
PARAMS=set_param(['column','keyValue','table'],[column,keyValue,table])
response=api(url=self.__url+"/get value", PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def convert_numpy_type(cls, dtype):
"""Convert a numpy dtype into a Column datatype. Only handles common
types.
Implemented as a function to decouple from numpy
"""
m = {
'int64': cls.DATATYPE_INTEGER64,
'float64': cls.DATATYPE_FLOAT,
'object': cls.DATATYPE_TEXT # Hack. Pandas makes strings into object.
}
t = m.get(dtype.name, None)
if not t:
raise TypeError(
"Failed to convert numpy type: '{}' ".format(
dtype.name))
return t | Convert a numpy dtype into a Column datatype. Only handles common
types.
Implemented as a function to decouple from numpy | Below is the the instruction that describes the task:
### Input:
Convert a numpy dtype into a Column datatype. Only handles common
types.
Implemented as a function to decouple from numpy
### Response:
def convert_numpy_type(cls, dtype):
"""Convert a numpy dtype into a Column datatype. Only handles common
types.
Implemented as a function to decouple from numpy
"""
m = {
'int64': cls.DATATYPE_INTEGER64,
'float64': cls.DATATYPE_FLOAT,
'object': cls.DATATYPE_TEXT # Hack. Pandas makes strings into object.
}
t = m.get(dtype.name, None)
if not t:
raise TypeError(
"Failed to convert numpy type: '{}' ".format(
dtype.name))
return t |
def clean_value(self, value):
'''
Additional clean action to preprocess value before :meth:`to_python`
method.
Subclasses may define own clean_value method to allow additional clean
actions like html cleanup, etc.
'''
# We have to clean before checking min/max length. It's done in
# separate method to allow additional clean action in subclasses.
if self.nontext_replacement is not None:
value = replace_nontext(value, self.nontext_replacement)
if self.strip:
value = value.strip()
return value | Additional clean action to preprocess value before :meth:`to_python`
method.
Subclasses may define own clean_value method to allow additional clean
actions like html cleanup, etc. | Below is the the instruction that describes the task:
### Input:
Additional clean action to preprocess value before :meth:`to_python`
method.
Subclasses may define own clean_value method to allow additional clean
actions like html cleanup, etc.
### Response:
def clean_value(self, value):
'''
Additional clean action to preprocess value before :meth:`to_python`
method.
Subclasses may define own clean_value method to allow additional clean
actions like html cleanup, etc.
'''
# We have to clean before checking min/max length. It's done in
# separate method to allow additional clean action in subclasses.
if self.nontext_replacement is not None:
value = replace_nontext(value, self.nontext_replacement)
if self.strip:
value = value.strip()
return value |
def getComponentButtonMask(self, pchRenderModelName, pchComponentName):
"""
Get the button mask for all buttons associated with this component
If no buttons (or axes) are associated with this component, return 0
Note: multiple components may be associated with the same button. Ex: two grip buttons on a single controller.
Note: A single component may be associated with multiple buttons. Ex: A trackpad which also provides "D-pad" functionality
"""
fn = self.function_table.getComponentButtonMask
result = fn(pchRenderModelName, pchComponentName)
return result | Get the button mask for all buttons associated with this component
If no buttons (or axes) are associated with this component, return 0
Note: multiple components may be associated with the same button. Ex: two grip buttons on a single controller.
Note: A single component may be associated with multiple buttons. Ex: A trackpad which also provides "D-pad" functionality | Below is the the instruction that describes the task:
### Input:
Get the button mask for all buttons associated with this component
If no buttons (or axes) are associated with this component, return 0
Note: multiple components may be associated with the same button. Ex: two grip buttons on a single controller.
Note: A single component may be associated with multiple buttons. Ex: A trackpad which also provides "D-pad" functionality
### Response:
def getComponentButtonMask(self, pchRenderModelName, pchComponentName):
"""
Get the button mask for all buttons associated with this component
If no buttons (or axes) are associated with this component, return 0
Note: multiple components may be associated with the same button. Ex: two grip buttons on a single controller.
Note: A single component may be associated with multiple buttons. Ex: A trackpad which also provides "D-pad" functionality
"""
fn = self.function_table.getComponentButtonMask
result = fn(pchRenderModelName, pchComponentName)
return result |
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
# translate triangle to origin
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
# XXX translation would be easier by defining add and sub for points
# t = self - self.A
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y) | The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane | Below is the the instruction that describes the task:
### Input:
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
### Response:
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
# translate triangle to origin
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
# XXX translation would be easier by defining add and sub for points
# t = self - self.A
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y) |
def get_tagged_sections(book_dir=BOOK_PATH, include_tags=None):
""" Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str))
>>> get_tagged_sections()
[('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
"""
return [(filepath, tag_lines(lines, include_tags=include_tags)) for filepath, lines in get_lines(book_dir)] | Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str))
>>> get_tagged_sections()
[('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)] | Below is the the instruction that describes the task:
### Input:
Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str))
>>> get_tagged_sections()
[('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
### Response:
def get_tagged_sections(book_dir=BOOK_PATH, include_tags=None):
""" Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str))
>>> get_tagged_sections()
[('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
"""
return [(filepath, tag_lines(lines, include_tags=include_tags)) for filepath, lines in get_lines(book_dir)] |
def splitSong(songToSplit, start1, start2):
"""Split a song into two parts, one starting at start1, the other at start2"""
print "start1 " + str(start1)
print "start2 " + str(start2)
# songs = [songToSplit[:start1+2000], songToSplit[start2-2000:]]
songs = [songToSplit[:start1], songToSplit[start2:]]
return songs | Split a song into two parts, one starting at start1, the other at start2 | Below is the the instruction that describes the task:
### Input:
Split a song into two parts, one starting at start1, the other at start2
### Response:
def splitSong(songToSplit, start1, start2):
"""Split a song into two parts, one starting at start1, the other at start2"""
print "start1 " + str(start1)
print "start2 " + str(start2)
# songs = [songToSplit[:start1+2000], songToSplit[start2-2000:]]
songs = [songToSplit[:start1], songToSplit[start2:]]
return songs |
def add_series(self, name, values=(), number_format=None):
"""
Add a series to this data set entitled *name* and having the data
points specified by *values*, an iterable of numeric values.
*number_format* specifies how the series values will be displayed,
and may be a string, e.g. '#,##0' corresponding to an Excel number
format.
"""
series_data = CategorySeriesData(self, name, number_format)
self.append(series_data)
for value in values:
series_data.add_data_point(value)
return series_data | Add a series to this data set entitled *name* and having the data
points specified by *values*, an iterable of numeric values.
*number_format* specifies how the series values will be displayed,
and may be a string, e.g. '#,##0' corresponding to an Excel number
format. | Below is the the instruction that describes the task:
### Input:
Add a series to this data set entitled *name* and having the data
points specified by *values*, an iterable of numeric values.
*number_format* specifies how the series values will be displayed,
and may be a string, e.g. '#,##0' corresponding to an Excel number
format.
### Response:
def add_series(self, name, values=(), number_format=None):
"""
Add a series to this data set entitled *name* and having the data
points specified by *values*, an iterable of numeric values.
*number_format* specifies how the series values will be displayed,
and may be a string, e.g. '#,##0' corresponding to an Excel number
format.
"""
series_data = CategorySeriesData(self, name, number_format)
self.append(series_data)
for value in values:
series_data.add_data_point(value)
return series_data |
def write_base (self, url_data):
"""Write url_data.base_ref."""
self.writeln(u"<tr><td>"+self.part("base")+u"</td><td>"+
cgi.escape(url_data.base_ref)+u"</td></tr>") | Write url_data.base_ref. | Below is the the instruction that describes the task:
### Input:
Write url_data.base_ref.
### Response:
def write_base (self, url_data):
"""Write url_data.base_ref."""
self.writeln(u"<tr><td>"+self.part("base")+u"</td><td>"+
cgi.escape(url_data.base_ref)+u"</td></tr>") |
def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None):
"""
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
"""
ssl_info = self.get_ssl_termination(loadbalancer)
if not ssl_info:
raise exc.NoSSLTerminationConfiguration("You must configure SSL "
"termination on this load balancer before attempting "
"to update it.")
if securePort is None:
securePort = ssl_info["securePort"]
if enabled is None:
enabled = ssl_info["enabled"]
if secureTrafficOnly is None:
secureTrafficOnly = ssl_info["secureTrafficOnly"]
uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer)
req_body = {"sslTermination": {
"enabled": enabled,
"secureTrafficOnly": secureTrafficOnly,
"securePort": securePort,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body | Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys. | Below is the the instruction that describes the task:
### Input:
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
### Response:
def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None):
"""
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
"""
ssl_info = self.get_ssl_termination(loadbalancer)
if not ssl_info:
raise exc.NoSSLTerminationConfiguration("You must configure SSL "
"termination on this load balancer before attempting "
"to update it.")
if securePort is None:
securePort = ssl_info["securePort"]
if enabled is None:
enabled = ssl_info["enabled"]
if secureTrafficOnly is None:
secureTrafficOnly = ssl_info["secureTrafficOnly"]
uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer)
req_body = {"sslTermination": {
"enabled": enabled,
"secureTrafficOnly": secureTrafficOnly,
"securePort": securePort,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body |
def get_block(block_id, api_code=None):
"""Get a single block based on a block hash.
:param str block_id: block hash to look up
:param str api_code: Blockchain.info API code (optional)
:return: an instance of :class:`Block` class
"""
resource = 'rawblock/' + block_id
if api_code is not None:
resource += '?api_code=' + api_code
response = util.call_api(resource)
json_response = json.loads(response)
return Block(json_response) | Get a single block based on a block hash.
:param str block_id: block hash to look up
:param str api_code: Blockchain.info API code (optional)
:return: an instance of :class:`Block` class | Below is the the instruction that describes the task:
### Input:
Get a single block based on a block hash.
:param str block_id: block hash to look up
:param str api_code: Blockchain.info API code (optional)
:return: an instance of :class:`Block` class
### Response:
def get_block(block_id, api_code=None):
"""Get a single block based on a block hash.
:param str block_id: block hash to look up
:param str api_code: Blockchain.info API code (optional)
:return: an instance of :class:`Block` class
"""
resource = 'rawblock/' + block_id
if api_code is not None:
resource += '?api_code=' + api_code
response = util.call_api(resource)
json_response = json.loads(response)
return Block(json_response) |
def to_dict(self, model):
"""Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
"""
# Get the basic Json object from the super class
obj = super(ModelRegistry, self).to_dict(model)
# Add model parameter
obj['parameters'] = [
para.to_dict() for para in model.parameters
]
obj['outputs'] = model.outputs.to_dict()
obj['connector'] = model.connector
return obj | Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model | Below is the the instruction that describes the task:
### Input:
Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
### Response:
def to_dict(self, model):
"""Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
"""
# Get the basic Json object from the super class
obj = super(ModelRegistry, self).to_dict(model)
# Add model parameter
obj['parameters'] = [
para.to_dict() for para in model.parameters
]
obj['outputs'] = model.outputs.to_dict()
obj['connector'] = model.connector
return obj |
def create(cls, spec_path, address_maps):
"""Creates an address family from the given set of address maps.
:param spec_path: The directory prefix shared by all address_maps.
:param address_maps: The family of maps that form this namespace.
:type address_maps: :class:`collections.Iterable` of :class:`AddressMap`
:returns: a new address family.
:rtype: :class:`AddressFamily`
:raises: :class:`MappingError` if the given address maps do not form a family.
"""
if spec_path == '.':
spec_path = ''
for address_map in address_maps:
if not address_map.path.startswith(spec_path):
raise DifferingFamiliesError('Expected AddressMaps to share the same parent directory {}, '
'but received: {}'
.format(spec_path, address_map.path))
objects_by_name = {}
for address_map in address_maps:
current_path = address_map.path
for name, obj in address_map.objects_by_name.items():
previous = objects_by_name.get(name)
if previous:
previous_path, _ = previous
raise DuplicateNameError('An object with name {name!r} is already defined in '
'{previous_path!r}, will not overwrite with {obj!r} from '
'{current_path!r}.'
.format(name=name,
previous_path=previous_path,
obj=obj,
current_path=current_path))
objects_by_name[name] = (current_path, obj)
return AddressFamily(namespace=spec_path,
objects_by_name=OrderedDict((name, (path, obj)) for name, (path, obj)
in sorted(objects_by_name.items()))) | Creates an address family from the given set of address maps.
:param spec_path: The directory prefix shared by all address_maps.
:param address_maps: The family of maps that form this namespace.
:type address_maps: :class:`collections.Iterable` of :class:`AddressMap`
:returns: a new address family.
:rtype: :class:`AddressFamily`
:raises: :class:`MappingError` if the given address maps do not form a family. | Below is the the instruction that describes the task:
### Input:
Creates an address family from the given set of address maps.
:param spec_path: The directory prefix shared by all address_maps.
:param address_maps: The family of maps that form this namespace.
:type address_maps: :class:`collections.Iterable` of :class:`AddressMap`
:returns: a new address family.
:rtype: :class:`AddressFamily`
:raises: :class:`MappingError` if the given address maps do not form a family.
### Response:
def create(cls, spec_path, address_maps):
"""Creates an address family from the given set of address maps.
:param spec_path: The directory prefix shared by all address_maps.
:param address_maps: The family of maps that form this namespace.
:type address_maps: :class:`collections.Iterable` of :class:`AddressMap`
:returns: a new address family.
:rtype: :class:`AddressFamily`
:raises: :class:`MappingError` if the given address maps do not form a family.
"""
if spec_path == '.':
spec_path = ''
for address_map in address_maps:
if not address_map.path.startswith(spec_path):
raise DifferingFamiliesError('Expected AddressMaps to share the same parent directory {}, '
'but received: {}'
.format(spec_path, address_map.path))
objects_by_name = {}
for address_map in address_maps:
current_path = address_map.path
for name, obj in address_map.objects_by_name.items():
previous = objects_by_name.get(name)
if previous:
previous_path, _ = previous
raise DuplicateNameError('An object with name {name!r} is already defined in '
'{previous_path!r}, will not overwrite with {obj!r} from '
'{current_path!r}.'
.format(name=name,
previous_path=previous_path,
obj=obj,
current_path=current_path))
objects_by_name[name] = (current_path, obj)
return AddressFamily(namespace=spec_path,
objects_by_name=OrderedDict((name, (path, obj)) for name, (path, obj)
in sorted(objects_by_name.items()))) |
def save(self, calc, session):
'''
Saves tilde_obj into the database
NB: this is the PUBLIC method
@returns checksum, error
'''
checksum = calc.get_checksum()
try:
existing_calc = session.query(model.Calculation).filter(model.Calculation.checksum == checksum).one()
except NoResultFound:
pass
else:
del calc
return None, "This calculation already exists!"
if not calc.download_size:
for f in calc.related_files:
calc.download_size += os.stat(f).st_size
ormcalc = model.Calculation(checksum = checksum)
if calc._calcset:
ormcalc.meta_data = model.Metadata(chemical_formula = calc.info['standard'], download_size = calc.download_size)
for child in session.query(model.Calculation).filter(model.Calculation.checksum.in_(calc._calcset)).all():
ormcalc.children.append(child)
ormcalc.siblings_count = len(ormcalc.children)
ormcalc.nested_depth = calc._nested_depth
else:
# prepare phonon data for saving
# this is actually a dict to list conversion TODO re-structure this
if calc.phonons['modes']:
phonons_json = []
for bzpoint, frqset in calc.phonons['modes'].items():
# re-orientate eigenvectors
for i in range(0, len(calc.phonons['ph_eigvecs'][bzpoint])):
for j in range(0, len(calc.phonons['ph_eigvecs'][bzpoint][i])//3):
eigv = array([calc.phonons['ph_eigvecs'][bzpoint][i][j*3], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+1], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+2]])
R = dot( eigv, calc.structures[-1].cell ).tolist()
calc.phonons['ph_eigvecs'][bzpoint][i][j*3], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+1], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+2] = [round(x, 3) for x in R]
try: irreps = calc.phonons['irreps'][bzpoint]
except KeyError:
empty = []
for i in range(len(frqset)):
empty.append('')
irreps = empty
phonons_json.append({ 'bzpoint':bzpoint, 'freqs':frqset, 'irreps':irreps, 'ph_eigvecs':calc.phonons['ph_eigvecs'][bzpoint] })
if bzpoint == '0 0 0':
phonons_json[-1]['ir_active'] = calc.phonons['ir_active']
phonons_json[-1]['raman_active'] = calc.phonons['raman_active']
if calc.phonons['ph_k_degeneracy']:
phonons_json[-1]['ph_k_degeneracy'] = calc.phonons['ph_k_degeneracy'][bzpoint]
ormcalc.phonons = model.Phonons()
ormcalc.spectra.append( model.Spectra(kind = model.Spectra.PHONON, eigenvalues = json.dumps(phonons_json)) )
# prepare electron data for saving TODO re-structure this
for task in ['dos', 'bands']: # projected?
if calc.electrons[task]:
calc.electrons[task] = calc.electrons[task].todict()
if calc.electrons['dos'] or calc.electrons['bands']:
ormcalc.electrons = model.Electrons(gap = calc.info['bandgap'])
if 'bandgaptype' in calc.info:
ormcalc.electrons.is_direct = 1 if calc.info['bandgaptype'] == 'direct' else -1
ormcalc.spectra.append(model.Spectra(
kind = model.Spectra.ELECTRON,
dos = json.dumps(calc.electrons['dos']),
bands = json.dumps(calc.electrons['bands']),
projected = json.dumps(calc.electrons['projected']),
eigenvalues = json.dumps(calc.electrons['eigvals'])
))
# construct ORM for other props
calc.related_files = list(map(virtualize_path, calc.related_files))
ormcalc.meta_data = model.Metadata(location = calc.info['location'], finished = calc.info['finished'], raw_input = calc.info['input'], modeling_time = calc.info['duration'], chemical_formula = html_formula(calc.info['standard']), download_size = calc.download_size, filenames = json.dumps(calc.related_files))
codefamily = model.Codefamily.as_unique(session, content = calc.info['framework'])
codeversion = model.Codeversion.as_unique(session, content = calc.info['prog'])
codeversion.instances.append( ormcalc.meta_data )
codefamily.versions.append( codeversion )
pot = model.Pottype.as_unique(session, name = calc.info['H'])
pot.instances.append(ormcalc)
ormcalc.recipinteg = model.Recipinteg(kgrid = calc.info['k'], kshift = calc.info['kshift'], smearing = calc.info['smear'], smeartype = calc.info['smeartype'])
ormcalc.basis = model.Basis(kind = calc.info['ansatz'], content = json.dumps(calc.electrons['basis_set']) if calc.electrons['basis_set'] else None)
ormcalc.energy = model.Energy(convergence = json.dumps(calc.convergence), total = calc.info['energy'])
ormcalc.spacegroup = model.Spacegroup(n=calc.info['ng'])
ormcalc.struct_ratios = model.Struct_ratios(chemical_formula=calc.info['standard'], formula_units=calc.info['expanded'], nelem=calc.info['nelem'], dimensions=calc.info['dims'])
if len(calc.tresholds) > 1:
ormcalc.struct_optimisation = model.Struct_optimisation(tresholds=json.dumps(calc.tresholds), ncycles=json.dumps(calc.ncycles))
for n, ase_repr in enumerate(calc.structures):
is_final = True if n == len(calc.structures)-1 else False
struct = model.Structure(step = n, final = is_final)
s = cell_to_cellpar(ase_repr.cell)
struct.lattice = model.Lattice(a=s[0], b=s[1], c=s[2], alpha=s[3], beta=s[4], gamma=s[5], a11=ase_repr.cell[0][0], a12=ase_repr.cell[0][1], a13=ase_repr.cell[0][2], a21=ase_repr.cell[1][0], a22=ase_repr.cell[1][1], a23=ase_repr.cell[1][2], a31=ase_repr.cell[2][0], a32=ase_repr.cell[2][1], a33=ase_repr.cell[2][2])
#rmts = ase_repr.get_array('rmts') if 'rmts' in ase_repr.arrays else [None for j in range(len(ase_repr))]
charges = ase_repr.get_array('charges') if 'charges' in ase_repr.arrays else [None for j in range(len(ase_repr))]
magmoms = ase_repr.get_array('magmoms') if 'magmoms' in ase_repr.arrays else [None for j in range(len(ase_repr))]
for n, i in enumerate(ase_repr):
struct.atoms.append( model.Atom( number=chemical_symbols.index(i.symbol), x=i.x, y=i.y, z=i.z, charge=charges[n], magmom=magmoms[n] ) )
ormcalc.structures.append(struct)
# TODO Forces
ormcalc.uigrid = model.Grid(info=json.dumps(calc.info))
# tags ORM
uitopics = []
for entity in self.hierarchy:
if not entity['creates_topic']:
continue
if entity['multiple'] or calc._calcset:
for item in calc.info.get( entity['source'], [] ):
uitopics.append( model.topic(cid=entity['cid'], topic=item) )
else:
topic = calc.info.get(entity['source'])
if topic or not entity['optional']:
uitopics.append( model.topic(cid=entity['cid'], topic=topic) )
uitopics = [model.Topic.as_unique(session, cid=x.cid, topic="%s" % x.topic) for x in uitopics]
ormcalc.uitopics.extend(uitopics)
if calc._calcset:
session.add(ormcalc)
else:
session.add_all([codefamily, codeversion, pot, ormcalc])
session.commit()
del calc, ormcalc
return checksum, None | Saves tilde_obj into the database
NB: this is the PUBLIC method
@returns checksum, error | Below is the the instruction that describes the task:
### Input:
Saves tilde_obj into the database
NB: this is the PUBLIC method
@returns checksum, error
### Response:
def save(self, calc, session):
'''
Saves tilde_obj into the database
NB: this is the PUBLIC method
@returns checksum, error
'''
checksum = calc.get_checksum()
try:
existing_calc = session.query(model.Calculation).filter(model.Calculation.checksum == checksum).one()
except NoResultFound:
pass
else:
del calc
return None, "This calculation already exists!"
if not calc.download_size:
for f in calc.related_files:
calc.download_size += os.stat(f).st_size
ormcalc = model.Calculation(checksum = checksum)
if calc._calcset:
ormcalc.meta_data = model.Metadata(chemical_formula = calc.info['standard'], download_size = calc.download_size)
for child in session.query(model.Calculation).filter(model.Calculation.checksum.in_(calc._calcset)).all():
ormcalc.children.append(child)
ormcalc.siblings_count = len(ormcalc.children)
ormcalc.nested_depth = calc._nested_depth
else:
# prepare phonon data for saving
# this is actually a dict to list conversion TODO re-structure this
if calc.phonons['modes']:
phonons_json = []
for bzpoint, frqset in calc.phonons['modes'].items():
# re-orientate eigenvectors
for i in range(0, len(calc.phonons['ph_eigvecs'][bzpoint])):
for j in range(0, len(calc.phonons['ph_eigvecs'][bzpoint][i])//3):
eigv = array([calc.phonons['ph_eigvecs'][bzpoint][i][j*3], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+1], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+2]])
R = dot( eigv, calc.structures[-1].cell ).tolist()
calc.phonons['ph_eigvecs'][bzpoint][i][j*3], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+1], calc.phonons['ph_eigvecs'][bzpoint][i][j*3+2] = [round(x, 3) for x in R]
try: irreps = calc.phonons['irreps'][bzpoint]
except KeyError:
empty = []
for i in range(len(frqset)):
empty.append('')
irreps = empty
phonons_json.append({ 'bzpoint':bzpoint, 'freqs':frqset, 'irreps':irreps, 'ph_eigvecs':calc.phonons['ph_eigvecs'][bzpoint] })
if bzpoint == '0 0 0':
phonons_json[-1]['ir_active'] = calc.phonons['ir_active']
phonons_json[-1]['raman_active'] = calc.phonons['raman_active']
if calc.phonons['ph_k_degeneracy']:
phonons_json[-1]['ph_k_degeneracy'] = calc.phonons['ph_k_degeneracy'][bzpoint]
ormcalc.phonons = model.Phonons()
ormcalc.spectra.append( model.Spectra(kind = model.Spectra.PHONON, eigenvalues = json.dumps(phonons_json)) )
# prepare electron data for saving TODO re-structure this
for task in ['dos', 'bands']: # projected?
if calc.electrons[task]:
calc.electrons[task] = calc.electrons[task].todict()
if calc.electrons['dos'] or calc.electrons['bands']:
ormcalc.electrons = model.Electrons(gap = calc.info['bandgap'])
if 'bandgaptype' in calc.info:
ormcalc.electrons.is_direct = 1 if calc.info['bandgaptype'] == 'direct' else -1
ormcalc.spectra.append(model.Spectra(
kind = model.Spectra.ELECTRON,
dos = json.dumps(calc.electrons['dos']),
bands = json.dumps(calc.electrons['bands']),
projected = json.dumps(calc.electrons['projected']),
eigenvalues = json.dumps(calc.electrons['eigvals'])
))
# construct ORM for other props
calc.related_files = list(map(virtualize_path, calc.related_files))
ormcalc.meta_data = model.Metadata(location = calc.info['location'], finished = calc.info['finished'], raw_input = calc.info['input'], modeling_time = calc.info['duration'], chemical_formula = html_formula(calc.info['standard']), download_size = calc.download_size, filenames = json.dumps(calc.related_files))
codefamily = model.Codefamily.as_unique(session, content = calc.info['framework'])
codeversion = model.Codeversion.as_unique(session, content = calc.info['prog'])
codeversion.instances.append( ormcalc.meta_data )
codefamily.versions.append( codeversion )
pot = model.Pottype.as_unique(session, name = calc.info['H'])
pot.instances.append(ormcalc)
ormcalc.recipinteg = model.Recipinteg(kgrid = calc.info['k'], kshift = calc.info['kshift'], smearing = calc.info['smear'], smeartype = calc.info['smeartype'])
ormcalc.basis = model.Basis(kind = calc.info['ansatz'], content = json.dumps(calc.electrons['basis_set']) if calc.electrons['basis_set'] else None)
ormcalc.energy = model.Energy(convergence = json.dumps(calc.convergence), total = calc.info['energy'])
ormcalc.spacegroup = model.Spacegroup(n=calc.info['ng'])
ormcalc.struct_ratios = model.Struct_ratios(chemical_formula=calc.info['standard'], formula_units=calc.info['expanded'], nelem=calc.info['nelem'], dimensions=calc.info['dims'])
if len(calc.tresholds) > 1:
ormcalc.struct_optimisation = model.Struct_optimisation(tresholds=json.dumps(calc.tresholds), ncycles=json.dumps(calc.ncycles))
for n, ase_repr in enumerate(calc.structures):
is_final = True if n == len(calc.structures)-1 else False
struct = model.Structure(step = n, final = is_final)
s = cell_to_cellpar(ase_repr.cell)
struct.lattice = model.Lattice(a=s[0], b=s[1], c=s[2], alpha=s[3], beta=s[4], gamma=s[5], a11=ase_repr.cell[0][0], a12=ase_repr.cell[0][1], a13=ase_repr.cell[0][2], a21=ase_repr.cell[1][0], a22=ase_repr.cell[1][1], a23=ase_repr.cell[1][2], a31=ase_repr.cell[2][0], a32=ase_repr.cell[2][1], a33=ase_repr.cell[2][2])
#rmts = ase_repr.get_array('rmts') if 'rmts' in ase_repr.arrays else [None for j in range(len(ase_repr))]
charges = ase_repr.get_array('charges') if 'charges' in ase_repr.arrays else [None for j in range(len(ase_repr))]
magmoms = ase_repr.get_array('magmoms') if 'magmoms' in ase_repr.arrays else [None for j in range(len(ase_repr))]
for n, i in enumerate(ase_repr):
struct.atoms.append( model.Atom( number=chemical_symbols.index(i.symbol), x=i.x, y=i.y, z=i.z, charge=charges[n], magmom=magmoms[n] ) )
ormcalc.structures.append(struct)
# TODO Forces
ormcalc.uigrid = model.Grid(info=json.dumps(calc.info))
# tags ORM
uitopics = []
for entity in self.hierarchy:
if not entity['creates_topic']:
continue
if entity['multiple'] or calc._calcset:
for item in calc.info.get( entity['source'], [] ):
uitopics.append( model.topic(cid=entity['cid'], topic=item) )
else:
topic = calc.info.get(entity['source'])
if topic or not entity['optional']:
uitopics.append( model.topic(cid=entity['cid'], topic=topic) )
uitopics = [model.Topic.as_unique(session, cid=x.cid, topic="%s" % x.topic) for x in uitopics]
ormcalc.uitopics.extend(uitopics)
if calc._calcset:
session.add(ormcalc)
else:
session.add_all([codefamily, codeversion, pot, ormcalc])
session.commit()
del calc, ormcalc
return checksum, None |
def try_enqueue(conn, queue_name, msg):
"""
Try to enqueue a message. If it succeeds, return the message ID.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_name: name of queue to put message in
:type queue_name: str
:param msg: JSON-serialized message body
:type msg: str
:return: message ID
:rtype: str
"""
logger.debug('Getting Queue URL for queue %s', queue_name)
qurl = conn.get_queue_url(QueueName=queue_name)['QueueUrl']
logger.debug('Sending message to queue at: %s', qurl)
resp = conn.send_message(
QueueUrl=qurl,
MessageBody=msg,
DelaySeconds=0
)
logger.debug('Enqueued message in %s with ID %s', queue_name,
resp['MessageId'])
return resp['MessageId'] | Try to enqueue a message. If it succeeds, return the message ID.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_name: name of queue to put message in
:type queue_name: str
:param msg: JSON-serialized message body
:type msg: str
:return: message ID
:rtype: str | Below is the the instruction that describes the task:
### Input:
Try to enqueue a message. If it succeeds, return the message ID.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_name: name of queue to put message in
:type queue_name: str
:param msg: JSON-serialized message body
:type msg: str
:return: message ID
:rtype: str
### Response:
def try_enqueue(conn, queue_name, msg):
"""
Try to enqueue a message. If it succeeds, return the message ID.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_name: name of queue to put message in
:type queue_name: str
:param msg: JSON-serialized message body
:type msg: str
:return: message ID
:rtype: str
"""
logger.debug('Getting Queue URL for queue %s', queue_name)
qurl = conn.get_queue_url(QueueName=queue_name)['QueueUrl']
logger.debug('Sending message to queue at: %s', qurl)
resp = conn.send_message(
QueueUrl=qurl,
MessageBody=msg,
DelaySeconds=0
)
logger.debug('Enqueued message in %s with ID %s', queue_name,
resp['MessageId'])
return resp['MessageId'] |
def serve_record(self, environ, coll='$root', url=''):
"""Serve a URL's content from a WARC/ARC record in replay mode or from the live web in
live, proxy, and record mode.
:param dict environ: The WSGI environment dictionary for the request
:param str coll: The name of the collection the record is to be served from
:param str url: The URL for the corresponding record to be served if it exists
:return: WbResponse containing the contents of the record/URL
:rtype: WbResponse
"""
if coll in self.warcserver.list_fixed_routes():
return WbResponse.text_response('Error: Can Not Record Into Custom Collection "{0}"'.format(coll))
return self.serve_content(environ, coll, url, record=True) | Serve a URL's content from a WARC/ARC record in replay mode or from the live web in
live, proxy, and record mode.
:param dict environ: The WSGI environment dictionary for the request
:param str coll: The name of the collection the record is to be served from
:param str url: The URL for the corresponding record to be served if it exists
:return: WbResponse containing the contents of the record/URL
:rtype: WbResponse | Below is the the instruction that describes the task:
### Input:
Serve a URL's content from a WARC/ARC record in replay mode or from the live web in
live, proxy, and record mode.
:param dict environ: The WSGI environment dictionary for the request
:param str coll: The name of the collection the record is to be served from
:param str url: The URL for the corresponding record to be served if it exists
:return: WbResponse containing the contents of the record/URL
:rtype: WbResponse
### Response:
def serve_record(self, environ, coll='$root', url=''):
"""Serve a URL's content from a WARC/ARC record in replay mode or from the live web in
live, proxy, and record mode.
:param dict environ: The WSGI environment dictionary for the request
:param str coll: The name of the collection the record is to be served from
:param str url: The URL for the corresponding record to be served if it exists
:return: WbResponse containing the contents of the record/URL
:rtype: WbResponse
"""
if coll in self.warcserver.list_fixed_routes():
return WbResponse.text_response('Error: Can Not Record Into Custom Collection "{0}"'.format(coll))
return self.serve_content(environ, coll, url, record=True) |
def by_leb(blocks):
"""Sort blocks by Logical Erase Block number.
Arguments:
List:blocks -- List of block objects to sort.
Returns:
List -- Indexes of blocks sorted by LEB.
"""
slist_len = len(blocks)
slist = ['x'] * slist_len
for block in blocks:
if blocks[block].leb_num >= slist_len:
add_elements = blocks[block].leb_num - slist_len + 1
slist += (['x'] * add_elements)
slist_len = len(slist)
slist[blocks[block].leb_num] = block
return slist | Sort blocks by Logical Erase Block number.
Arguments:
List:blocks -- List of block objects to sort.
Returns:
List -- Indexes of blocks sorted by LEB. | Below is the the instruction that describes the task:
### Input:
Sort blocks by Logical Erase Block number.
Arguments:
List:blocks -- List of block objects to sort.
Returns:
List -- Indexes of blocks sorted by LEB.
### Response:
def by_leb(blocks):
"""Sort blocks by Logical Erase Block number.
Arguments:
List:blocks -- List of block objects to sort.
Returns:
List -- Indexes of blocks sorted by LEB.
"""
slist_len = len(blocks)
slist = ['x'] * slist_len
for block in blocks:
if blocks[block].leb_num >= slist_len:
add_elements = blocks[block].leb_num - slist_len + 1
slist += (['x'] * add_elements)
slist_len = len(slist)
slist[blocks[block].leb_num] = block
return slist |
def drop_it(title, filters, blacklist):
"""
The found torrents should be in filters list and shouldn't be in blacklist.
"""
title = title.lower()
matched = False
for f in filters:
if re.match(f, title):
matched = True
if not matched:
return True
for b in blacklist:
if re.match(b, title):
return True
return False | The found torrents should be in filters list and shouldn't be in blacklist. | Below is the the instruction that describes the task:
### Input:
The found torrents should be in filters list and shouldn't be in blacklist.
### Response:
def drop_it(title, filters, blacklist):
"""
The found torrents should be in filters list and shouldn't be in blacklist.
"""
title = title.lower()
matched = False
for f in filters:
if re.match(f, title):
matched = True
if not matched:
return True
for b in blacklist:
if re.match(b, title):
return True
return False |
def read_dalton(basis_lines, fname):
'''Reads Dalton-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the nwchem format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
if line.lower().startswith('a '):
element_Z = line.split()[1]
i += 1
# Shell am is strictly increasing (I hope)
shell_am = 0
while i < len(basis_lines) and not basis_lines[i].lower().startswith('a '):
line = basis_lines[i]
nprim, ngen = line.split()
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
if not 'electron_shells' in bs_data['elements'][element_Z]:
bs_data['elements'][element_Z]['electron_shells'] = []
element_data = bs_data['elements'][element_Z]
if shell_am <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': [shell_am]
}
exponents = []
coefficients = []
i += 1
for _ in range(int(nprim)):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
# Make sure the number of general contractions is >0
# (This error was found in some bad files)
if int(ngen) <= 0:
raise RuntimeError("Number of general contractions is not greater than zero for element " + str(element_Z))
# Make sure the number of general contractions match the heading line
if len(shell['coefficients']) != int(ngen):
raise RuntimeError("Number of general contractions does not equal what was given for element " + str(element_Z))
element_data['electron_shells'].append(shell)
shell_am += 1
return bs_data | Reads Dalton-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the nwchem format does not store all the fields we
have, so some fields are left blank | Below is the the instruction that describes the task:
### Input:
Reads Dalton-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the nwchem format does not store all the fields we
have, so some fields are left blank
### Response:
def read_dalton(basis_lines, fname):
'''Reads Dalton-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the nwchem format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
if line.lower().startswith('a '):
element_Z = line.split()[1]
i += 1
# Shell am is strictly increasing (I hope)
shell_am = 0
while i < len(basis_lines) and not basis_lines[i].lower().startswith('a '):
line = basis_lines[i]
nprim, ngen = line.split()
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
if not 'electron_shells' in bs_data['elements'][element_Z]:
bs_data['elements'][element_Z]['electron_shells'] = []
element_data = bs_data['elements'][element_Z]
if shell_am <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': [shell_am]
}
exponents = []
coefficients = []
i += 1
for _ in range(int(nprim)):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
# Make sure the number of general contractions is >0
# (This error was found in some bad files)
if int(ngen) <= 0:
raise RuntimeError("Number of general contractions is not greater than zero for element " + str(element_Z))
# Make sure the number of general contractions match the heading line
if len(shell['coefficients']) != int(ngen):
raise RuntimeError("Number of general contractions does not equal what was given for element " + str(element_Z))
element_data['electron_shells'].append(shell)
shell_am += 1
return bs_data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.