code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line | Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines. | Below is the the instruction that describes the task:
### Input:
Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
### Response:
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line |
def to_dict(self):
"""Return a dict representation of KnwKBRVAL."""
# FIXME remove 'id' dependency from invenio modules
return {'id': self.m_key + "_" + str(self.id_knwKB),
'key': self.m_key,
'value': self.m_value,
'kbid': self.kb.id if self.kb else None,
'kbname': self.kb.name if self.kb else None} | Return a dict representation of KnwKBRVAL. | Below is the the instruction that describes the task:
### Input:
Return a dict representation of KnwKBRVAL.
### Response:
def to_dict(self):
"""Return a dict representation of KnwKBRVAL."""
# FIXME remove 'id' dependency from invenio modules
return {'id': self.m_key + "_" + str(self.id_knwKB),
'key': self.m_key,
'value': self.m_value,
'kbid': self.kb.id if self.kb else None,
'kbname': self.kb.name if self.kb else None} |
def set_text(self, val, base64encode=False):
""" Sets the text property of this instance.
:param val: The value of the text property
:param base64encode: Whether the value should be base64encoded
:return: The instance
"""
# print("set_text: %s" % (val,))
if isinstance(val, bool):
if val:
setattr(self, "text", "true")
else:
setattr(self, "text", "false")
elif isinstance(val, int):
setattr(self, "text", "%d" % val)
elif isinstance(val, six.string_types):
setattr(self, "text", val)
elif val is None:
pass
else:
raise ValueError("Type shouldn't be '%s'" % (val,))
return self | Sets the text property of this instance.
:param val: The value of the text property
:param base64encode: Whether the value should be base64encoded
:return: The instance | Below is the the instruction that describes the task:
### Input:
Sets the text property of this instance.
:param val: The value of the text property
:param base64encode: Whether the value should be base64encoded
:return: The instance
### Response:
def set_text(self, val, base64encode=False):
""" Sets the text property of this instance.
:param val: The value of the text property
:param base64encode: Whether the value should be base64encoded
:return: The instance
"""
# print("set_text: %s" % (val,))
if isinstance(val, bool):
if val:
setattr(self, "text", "true")
else:
setattr(self, "text", "false")
elif isinstance(val, int):
setattr(self, "text", "%d" % val)
elif isinstance(val, six.string_types):
setattr(self, "text", val)
elif val is None:
pass
else:
raise ValueError("Type shouldn't be '%s'" % (val,))
return self |
def get_bindings_for_keys(self, keys):
"""
Return a list of key bindings that can handle this key.
(This return also inactive bindings, so the `filter` still has to be
called, for checking it.)
:param keys: tuple of keys.
"""
def get():
result = []
for b in self.key_bindings:
if len(keys) == len(b.keys):
match = True
any_count = 0
for i, j in zip(b.keys, keys):
if i != j and i != Keys.Any:
match = False
break
if i == Keys.Any:
any_count += 1
if match:
result.append((any_count, b))
# Place bindings that have more 'Any' occurences in them at the end.
result = sorted(result, key=lambda item: -item[0])
return [item[1] for item in result]
return self._get_bindings_for_keys_cache.get(keys, get) | Return a list of key bindings that can handle this key.
(This return also inactive bindings, so the `filter` still has to be
called, for checking it.)
:param keys: tuple of keys. | Below is the the instruction that describes the task:
### Input:
Return a list of key bindings that can handle this key.
(This return also inactive bindings, so the `filter` still has to be
called, for checking it.)
:param keys: tuple of keys.
### Response:
def get_bindings_for_keys(self, keys):
"""
Return a list of key bindings that can handle this key.
(This return also inactive bindings, so the `filter` still has to be
called, for checking it.)
:param keys: tuple of keys.
"""
def get():
result = []
for b in self.key_bindings:
if len(keys) == len(b.keys):
match = True
any_count = 0
for i, j in zip(b.keys, keys):
if i != j and i != Keys.Any:
match = False
break
if i == Keys.Any:
any_count += 1
if match:
result.append((any_count, b))
# Place bindings that have more 'Any' occurences in them at the end.
result = sorted(result, key=lambda item: -item[0])
return [item[1] for item in result]
return self._get_bindings_for_keys_cache.get(keys, get) |
def piece_size(model_file=None, model_proto=None, name=None):
"""Returns the piece size (vocabulary size).
Args:
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A scalar representing the vocabulary size.
"""
return _gen_sentencepiece_processor_op.sentencepiece_get_piece_size(
model_file=model_file, model_proto=model_proto, name=name) | Returns the piece size (vocabulary size).
Args:
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A scalar representing the vocabulary size. | Below is the the instruction that describes the task:
### Input:
Returns the piece size (vocabulary size).
Args:
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A scalar representing the vocabulary size.
### Response:
def piece_size(model_file=None, model_proto=None, name=None):
"""Returns the piece size (vocabulary size).
Args:
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A scalar representing the vocabulary size.
"""
return _gen_sentencepiece_processor_op.sentencepiece_get_piece_size(
model_file=model_file, model_proto=model_proto, name=name) |
def delete_license_request(request):
"""Submission to remove a license acceptance request."""
uuid_ = request.matchdict['uuid']
posted_uids = [x['uid'] for x in request.json.get('licensors', [])]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_license_requests(cursor, uuid_, posted_uids)
resp = request.response
resp.status_int = 200
return resp | Submission to remove a license acceptance request. | Below is the the instruction that describes the task:
### Input:
Submission to remove a license acceptance request.
### Response:
def delete_license_request(request):
"""Submission to remove a license acceptance request."""
uuid_ = request.matchdict['uuid']
posted_uids = [x['uid'] for x in request.json.get('licensors', [])]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_license_requests(cursor, uuid_, posted_uids)
resp = request.response
resp.status_int = 200
return resp |
def setDecode(self, decodeTable):
"""Store decodeTable,
and compute lengthTable, minLength, maxLength from encodings.
"""
self.decodeTable = decodeTable
#set of symbols with unknown length
todo = set(decodeTable)
#bit size under investigation
maskLength = 0
lengthTable = {}
while todo:
mask = (1<<maskLength)-1
#split the encodings that we didn't find yet using b bits
splitSymbols = defaultdict(list)
for s in todo: splitSymbols[s&mask].append(s)
#unique encodings have a length of maskLength bits
#set length, and remove from todo list
for s,subset in splitSymbols.items():
if len(subset)==1:
lengthTable[self.decodeTable[s]] = maskLength
todo.remove(s)
#now investigate with longer mask
maskLength +=1
#save result
self.lengthTable = lengthTable
self.minLength = min(lengthTable.values())
self.maxLength = max(lengthTable.values())
self.switchToPrefix() | Store decodeTable,
and compute lengthTable, minLength, maxLength from encodings. | Below is the the instruction that describes the task:
### Input:
Store decodeTable,
and compute lengthTable, minLength, maxLength from encodings.
### Response:
def setDecode(self, decodeTable):
"""Store decodeTable,
and compute lengthTable, minLength, maxLength from encodings.
"""
self.decodeTable = decodeTable
#set of symbols with unknown length
todo = set(decodeTable)
#bit size under investigation
maskLength = 0
lengthTable = {}
while todo:
mask = (1<<maskLength)-1
#split the encodings that we didn't find yet using b bits
splitSymbols = defaultdict(list)
for s in todo: splitSymbols[s&mask].append(s)
#unique encodings have a length of maskLength bits
#set length, and remove from todo list
for s,subset in splitSymbols.items():
if len(subset)==1:
lengthTable[self.decodeTable[s]] = maskLength
todo.remove(s)
#now investigate with longer mask
maskLength +=1
#save result
self.lengthTable = lengthTable
self.minLength = min(lengthTable.values())
self.maxLength = max(lengthTable.values())
self.switchToPrefix() |
def backbone_scope(freeze):
"""
Args:
freeze (bool): whether to freeze all the variables under the scope
"""
def nonlin(x):
x = get_norm()(x)
return tf.nn.relu(x)
with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False, activation=nonlin,
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out')), \
ExitStack() as stack:
if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']:
if freeze or cfg.BACKBONE.NORM == 'FreezeBN':
stack.enter_context(argscope(BatchNorm, training=False))
else:
stack.enter_context(argscope(
BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod'))
if freeze:
stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True))
else:
# the layers are not completely freezed, but we may want to only freeze the affine
if cfg.BACKBONE.FREEZE_AFFINE:
stack.enter_context(custom_getter_scope(freeze_affine_getter))
yield | Args:
freeze (bool): whether to freeze all the variables under the scope | Below is the the instruction that describes the task:
### Input:
Args:
freeze (bool): whether to freeze all the variables under the scope
### Response:
def backbone_scope(freeze):
"""
Args:
freeze (bool): whether to freeze all the variables under the scope
"""
def nonlin(x):
x = get_norm()(x)
return tf.nn.relu(x)
with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False, activation=nonlin,
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out')), \
ExitStack() as stack:
if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']:
if freeze or cfg.BACKBONE.NORM == 'FreezeBN':
stack.enter_context(argscope(BatchNorm, training=False))
else:
stack.enter_context(argscope(
BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod'))
if freeze:
stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True))
else:
# the layers are not completely freezed, but we may want to only freeze the affine
if cfg.BACKBONE.FREEZE_AFFINE:
stack.enter_context(custom_getter_scope(freeze_affine_getter))
yield |
def add_header(self, name, value):
'''Attach an email header to send with the message.
:param name: The name of the header value.
:param value: The header value.
'''
if self.headers is None:
self.headers = []
self.headers.append(dict(Name=name, Value=value)) | Attach an email header to send with the message.
:param name: The name of the header value.
:param value: The header value. | Below is the the instruction that describes the task:
### Input:
Attach an email header to send with the message.
:param name: The name of the header value.
:param value: The header value.
### Response:
def add_header(self, name, value):
'''Attach an email header to send with the message.
:param name: The name of the header value.
:param value: The header value.
'''
if self.headers is None:
self.headers = []
self.headers.append(dict(Name=name, Value=value)) |
def process_data(self, block):
"""expects Block from Compressor"""
if hasattr(block, 'send_destinations') and block.send_destinations:
self.fire(events.FileProcessed(block))
self._log_in_db(block)
if self._sent_log_file:
self._log_in_sent_log(block)
self.log.info("Sent to '%s' file '%s' containing files: %s",
str(block.send_destinations),
block.processed_data_file_info.basename,
str([file_info.path for file_info in block.content_file_infos]))
else:
self.log.info("File %s wasn't sent", block.processed_data_file_info.basename)
return block | expects Block from Compressor | Below is the the instruction that describes the task:
### Input:
expects Block from Compressor
### Response:
def process_data(self, block):
"""expects Block from Compressor"""
if hasattr(block, 'send_destinations') and block.send_destinations:
self.fire(events.FileProcessed(block))
self._log_in_db(block)
if self._sent_log_file:
self._log_in_sent_log(block)
self.log.info("Sent to '%s' file '%s' containing files: %s",
str(block.send_destinations),
block.processed_data_file_info.basename,
str([file_info.path for file_info in block.content_file_infos]))
else:
self.log.info("File %s wasn't sent", block.processed_data_file_info.basename)
return block |
def sync(self, max_bytes):
"""Find the next sync.
Returns True if found."""
# at least 2 bytes for the sync
max_bytes = max(max_bytes, 2)
r = self._r
r.align()
while max_bytes > 0:
try:
b = r.bytes(1)
if b == b"\xff":
if r.bits(4) == 0xf:
return True
r.align()
max_bytes -= 2
else:
max_bytes -= 1
except BitReaderError:
return False
return False | Find the next sync.
Returns True if found. | Below is the the instruction that describes the task:
### Input:
Find the next sync.
Returns True if found.
### Response:
def sync(self, max_bytes):
"""Find the next sync.
Returns True if found."""
# at least 2 bytes for the sync
max_bytes = max(max_bytes, 2)
r = self._r
r.align()
while max_bytes > 0:
try:
b = r.bytes(1)
if b == b"\xff":
if r.bits(4) == 0xf:
return True
r.align()
max_bytes -= 2
else:
max_bytes -= 1
except BitReaderError:
return False
return False |
def pop(self):
"""Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection
"""
try:
return self._collection.pop()
except KeyError:
raise KeyError("Nothing left in the {}: '{}'.".format(type(self).__name__, self)) from None | Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection | Below is the the instruction that describes the task:
### Input:
Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection
### Response:
def pop(self):
"""Removes a random element from the collection and returns it
# Returns
`object`
> A random object from the collection
"""
try:
return self._collection.pop()
except KeyError:
raise KeyError("Nothing left in the {}: '{}'.".format(type(self).__name__, self)) from None |
def get_object(self, name, *argv, **kwargs):
"""
Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url
"""
regexp = name
options = self.opts(regexp)
options.update(kwargs)
args = options.pop('view_args', argv)
csrf_enable = self.get_backend_data(regexp).get('CSRF_ENABLE', True)
if regexp in self.settings_urls:
regexp = r'^{}'.format(self.get_django_settings(regexp)[1:])
view = self[name].as_view()
if not csrf_enable:
view = csrf_exempt(view)
return url(regexp, view, *args, **options) | Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url | Below is the the instruction that describes the task:
### Input:
Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url
### Response:
def get_object(self, name, *argv, **kwargs):
"""
Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url
"""
regexp = name
options = self.opts(regexp)
options.update(kwargs)
args = options.pop('view_args', argv)
csrf_enable = self.get_backend_data(regexp).get('CSRF_ENABLE', True)
if regexp in self.settings_urls:
regexp = r'^{}'.format(self.get_django_settings(regexp)[1:])
view = self[name].as_view()
if not csrf_enable:
view = csrf_exempt(view)
return url(regexp, view, *args, **options) |
def is_pathname_valid(pathname: str) -> bool:
"""Checks if the given path name is valid.
Returns
-------
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True | Checks if the given path name is valid.
Returns
-------
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise. | Below is the the instruction that describes the task:
### Input:
Checks if the given path name is valid.
Returns
-------
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
### Response:
def is_pathname_valid(pathname: str) -> bool:
"""Checks if the given path name is valid.
Returns
-------
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True |
def setPostScript(self, goal, script):
"""
After learning call the given script using 'goal'.
:param goal: goal name
:param script: prolog script to call
"""
self.postGoal = goal
self.postScript = script | After learning call the given script using 'goal'.
:param goal: goal name
:param script: prolog script to call | Below is the the instruction that describes the task:
### Input:
After learning call the given script using 'goal'.
:param goal: goal name
:param script: prolog script to call
### Response:
def setPostScript(self, goal, script):
"""
After learning call the given script using 'goal'.
:param goal: goal name
:param script: prolog script to call
"""
self.postGoal = goal
self.postScript = script |
def delete(self, *keys):
"""Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'DEL'] + list(keys), len(keys)) | Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError` | Below is the the instruction that describes the task:
### Input:
Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
### Response:
def delete(self, *keys):
"""Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'DEL'] + list(keys), len(keys)) |
def safe_mkdir(path, uid=-1, gid=-1):
""" create path if it doesn't exist """
try:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid) | create path if it doesn't exist | Below is the the instruction that describes the task:
### Input:
create path if it doesn't exist
### Response:
def safe_mkdir(path, uid=-1, gid=-1):
""" create path if it doesn't exist """
try:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid) |
def _steps_to_slices():
"""parse timesteps and snapshots arguments and return slices"""
if not (conf.core.timesteps or conf.core.snapshots):
# default to the last snap
conf.core.timesteps = None
conf.core.snapshots = slice(-1, None, None)
return
elif conf.core.snapshots:
# snapshots take precedence over timesteps
# if both are defined
conf.core.timesteps = None
steps = conf.core.snapshots
else:
conf.core.snapshots = None
steps = conf.core.timesteps
steps = steps.split(':')
steps[0] = int(steps[0]) if steps[0] else None
if len(steps) == 1:
steps.append(steps[0] + 1)
steps[1] = int(steps[1]) if steps[1] else None
if len(steps) != 3:
steps = steps[0:2] + [1]
steps[2] = int(steps[2]) if steps[2] else None
steps = slice(*steps)
if conf.core.snapshots is not None:
conf.core.snapshots = steps
else:
conf.core.timesteps = steps | parse timesteps and snapshots arguments and return slices | Below is the the instruction that describes the task:
### Input:
parse timesteps and snapshots arguments and return slices
### Response:
def _steps_to_slices():
"""parse timesteps and snapshots arguments and return slices"""
if not (conf.core.timesteps or conf.core.snapshots):
# default to the last snap
conf.core.timesteps = None
conf.core.snapshots = slice(-1, None, None)
return
elif conf.core.snapshots:
# snapshots take precedence over timesteps
# if both are defined
conf.core.timesteps = None
steps = conf.core.snapshots
else:
conf.core.snapshots = None
steps = conf.core.timesteps
steps = steps.split(':')
steps[0] = int(steps[0]) if steps[0] else None
if len(steps) == 1:
steps.append(steps[0] + 1)
steps[1] = int(steps[1]) if steps[1] else None
if len(steps) != 3:
steps = steps[0:2] + [1]
steps[2] = int(steps[2]) if steps[2] else None
steps = slice(*steps)
if conf.core.snapshots is not None:
conf.core.snapshots = steps
else:
conf.core.timesteps = steps |
def __fetch_issue_data(self, issue_id):
"""Get data associated to an issue"""
raw_issue = self.client.issue(issue_id)
issue = json.loads(raw_issue)
return issue | Get data associated to an issue | Below is the the instruction that describes the task:
### Input:
Get data associated to an issue
### Response:
def __fetch_issue_data(self, issue_id):
"""Get data associated to an issue"""
raw_issue = self.client.issue(issue_id)
issue = json.loads(raw_issue)
return issue |
def list_overlay_names(self):
"""Return list of overlay names."""
overlay_names = []
for blob in self._blobservice.list_blobs(
self.uuid,
prefix=self.overlays_key_prefix
):
overlay_file = blob.name.rsplit('/', 1)[-1]
overlay_name, ext = overlay_file.split('.')
overlay_names.append(overlay_name)
return overlay_names | Return list of overlay names. | Below is the the instruction that describes the task:
### Input:
Return list of overlay names.
### Response:
def list_overlay_names(self):
"""Return list of overlay names."""
overlay_names = []
for blob in self._blobservice.list_blobs(
self.uuid,
prefix=self.overlays_key_prefix
):
overlay_file = blob.name.rsplit('/', 1)[-1]
overlay_name, ext = overlay_file.split('.')
overlay_names.append(overlay_name)
return overlay_names |
def _getLayers(self):
""" gets layers for the featuer service """
params = {"f": "json"}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._layers = []
if 'layers' in json_dict:
for l in json_dict["layers"]:
self._layers.append(
layer.FeatureLayer(url=self._url + "/%s" % l['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
) | gets layers for the featuer service | Below is the the instruction that describes the task:
### Input:
gets layers for the featuer service
### Response:
def _getLayers(self):
""" gets layers for the featuer service """
params = {"f": "json"}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._layers = []
if 'layers' in json_dict:
for l in json_dict["layers"]:
self._layers.append(
layer.FeatureLayer(url=self._url + "/%s" % l['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
) |
def fill_null_values(self):
""" Fill missing model fields in JSON with {key: null value}.
Only run for PUT requests.
"""
if not self.Model:
log.info("%s has no model defined" % self.__class__.__name__)
return
empty_values = self.Model.get_null_values()
for field, value in empty_values.items():
if field not in self._json_params:
self._json_params[field] = value | Fill missing model fields in JSON with {key: null value}.
Only run for PUT requests. | Below is the the instruction that describes the task:
### Input:
Fill missing model fields in JSON with {key: null value}.
Only run for PUT requests.
### Response:
def fill_null_values(self):
""" Fill missing model fields in JSON with {key: null value}.
Only run for PUT requests.
"""
if not self.Model:
log.info("%s has no model defined" % self.__class__.__name__)
return
empty_values = self.Model.get_null_values()
for field, value in empty_values.items():
if field not in self._json_params:
self._json_params[field] = value |
def find_all(container):
"""Find all annotated function inside of a container.
Annotated functions are identified as those that:
- do not start with a _ character
- are either annotated with metadata
- or strings that point to lazily loaded modules
Args:
container (object): The container to search for annotated functions.
Returns:
dict: A dict with all of the found functions in it.
"""
if isinstance(container, dict):
names = container.keys()
else:
names = dir(container)
built_context = BasicContext()
for name in names:
# Ignore _ and __ names
if name.startswith('_'):
continue
if isinstance(container, dict):
obj = container[name]
else:
obj = getattr(container, name)
# Check if this is an annotated object that should be included. Check the type of
# annotated to avoid issues with module imports where someone did from annotate import *
# into the module causing an annotated symbol to be defined as a decorator
# If we are in a dict context then strings point to lazily loaded modules so include them too.
if isinstance(container, dict) and isinstance(obj, str):
built_context[name] = obj
elif hasattr(obj, 'metadata') and isinstance(getattr(obj, 'metadata'), AnnotatedMetadata):
built_context[name] = obj
return built_context | Find all annotated function inside of a container.
Annotated functions are identified as those that:
- do not start with a _ character
- are either annotated with metadata
- or strings that point to lazily loaded modules
Args:
container (object): The container to search for annotated functions.
Returns:
dict: A dict with all of the found functions in it. | Below is the the instruction that describes the task:
### Input:
Find all annotated function inside of a container.
Annotated functions are identified as those that:
- do not start with a _ character
- are either annotated with metadata
- or strings that point to lazily loaded modules
Args:
container (object): The container to search for annotated functions.
Returns:
dict: A dict with all of the found functions in it.
### Response:
def find_all(container):
"""Find all annotated function inside of a container.
Annotated functions are identified as those that:
- do not start with a _ character
- are either annotated with metadata
- or strings that point to lazily loaded modules
Args:
container (object): The container to search for annotated functions.
Returns:
dict: A dict with all of the found functions in it.
"""
if isinstance(container, dict):
names = container.keys()
else:
names = dir(container)
built_context = BasicContext()
for name in names:
# Ignore _ and __ names
if name.startswith('_'):
continue
if isinstance(container, dict):
obj = container[name]
else:
obj = getattr(container, name)
# Check if this is an annotated object that should be included. Check the type of
# annotated to avoid issues with module imports where someone did from annotate import *
# into the module causing an annotated symbol to be defined as a decorator
# If we are in a dict context then strings point to lazily loaded modules so include them too.
if isinstance(container, dict) and isinstance(obj, str):
built_context[name] = obj
elif hasattr(obj, 'metadata') and isinstance(getattr(obj, 'metadata'), AnnotatedMetadata):
built_context[name] = obj
return built_context |
async def mailed_confirm(self, **params):
"""Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid
"""
if not params:
return {"error":400, "reason":"Missed required fields"}
# Check if required fields exists
cid = params.get("cid")
buyer_address = params.get("buyer_address")
price = params.get("price")
offer_type = params.get("offer_type")
coinid = params.get("coinid").upper()
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fileds
if not all([cid, buyer_address, price]):
return {"error":400, "reason":"Missed required fields"}
# Get content owner address
#if coinid in settings.AVAILABLE_COIN_ID:
# client_bridge.endpoint = settings.bridges[coinid]
#else:
# return {"error":400, "reason":"Invalid coin ID"}
#owneraddr = await client_bridge.request(method_name="ownerbycid", cid=cid)
# Send appropriate mail to seller if exists
#seller = await getaccountbywallet(wallet=owneraddr)
#logging.debug(seller)
#if "error" in seller.keys():
# return seller
#if seller.get("email"):
# emaildata = {
# "to": seller["email"],
# "subject": "Robin8 support",
# "optional": "You`ve got a new offer from %s" % seller["public_key"]
#
# }
# await client_email.request(method_name="sendmail", **emaildata)
# Send news for seller
buyer = await getaccountbywallet(wallet=buyer_address)
if "error" in buyer.keys():
buyer["public_key"] = None
newsdata = {
"event_type":"made offer",
"cid": cid,
"access_string":buyer["public_key"],
"buyer_pubkey":buyer["public_key"],
"buyer_address":buyer_address,
#"owneraddr":owneraddr,
"price": price,
"offer_type": offer_type,
"coinid":coinid
}
news = await self.insert_news(**newsdata)
return {"result":"ok"} | Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid | Below is the the instruction that describes the task:
### Input:
Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid
### Response:
async def mailed_confirm(self, **params):
"""Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid
"""
if not params:
return {"error":400, "reason":"Missed required fields"}
# Check if required fields exists
cid = params.get("cid")
buyer_address = params.get("buyer_address")
price = params.get("price")
offer_type = params.get("offer_type")
coinid = params.get("coinid").upper()
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fileds
if not all([cid, buyer_address, price]):
return {"error":400, "reason":"Missed required fields"}
# Get content owner address
#if coinid in settings.AVAILABLE_COIN_ID:
# client_bridge.endpoint = settings.bridges[coinid]
#else:
# return {"error":400, "reason":"Invalid coin ID"}
#owneraddr = await client_bridge.request(method_name="ownerbycid", cid=cid)
# Send appropriate mail to seller if exists
#seller = await getaccountbywallet(wallet=owneraddr)
#logging.debug(seller)
#if "error" in seller.keys():
# return seller
#if seller.get("email"):
# emaildata = {
# "to": seller["email"],
# "subject": "Robin8 support",
# "optional": "You`ve got a new offer from %s" % seller["public_key"]
#
# }
# await client_email.request(method_name="sendmail", **emaildata)
# Send news for seller
buyer = await getaccountbywallet(wallet=buyer_address)
if "error" in buyer.keys():
buyer["public_key"] = None
newsdata = {
"event_type":"made offer",
"cid": cid,
"access_string":buyer["public_key"],
"buyer_pubkey":buyer["public_key"],
"buyer_address":buyer_address,
#"owneraddr":owneraddr,
"price": price,
"offer_type": offer_type,
"coinid":coinid
}
news = await self.insert_news(**newsdata)
return {"result":"ok"} |
def setdefault(self, key, defaultvalue = None):
"""
Support dict-like setdefault (create if not existed)
"""
(t, k) = self._getsubitem(key, True)
return t.__dict__.setdefault(k, defaultvalue) | Support dict-like setdefault (create if not existed) | Below is the the instruction that describes the task:
### Input:
Support dict-like setdefault (create if not existed)
### Response:
def setdefault(self, key, defaultvalue = None):
"""
Support dict-like setdefault (create if not existed)
"""
(t, k) = self._getsubitem(key, True)
return t.__dict__.setdefault(k, defaultvalue) |
def _folder_item_fieldicons(self, analysis_brain):
"""Resolves if field-specific icons must be displayed for the object
passed in.
:param analysis_brain: Brain that represents an analysis
"""
full_obj = self.get_object(analysis_brain)
uid = api.get_uid(full_obj)
for name, adapter in getAdapters((full_obj,), IFieldIcons):
alerts = adapter()
if not alerts or uid not in alerts:
continue
alerts = alerts[uid]
if uid not in self.field_icons:
self.field_icons[uid] = alerts
continue
self.field_icons[uid].extend(alerts) | Resolves if field-specific icons must be displayed for the object
passed in.
:param analysis_brain: Brain that represents an analysis | Below is the the instruction that describes the task:
### Input:
Resolves if field-specific icons must be displayed for the object
passed in.
:param analysis_brain: Brain that represents an analysis
### Response:
def _folder_item_fieldicons(self, analysis_brain):
"""Resolves if field-specific icons must be displayed for the object
passed in.
:param analysis_brain: Brain that represents an analysis
"""
full_obj = self.get_object(analysis_brain)
uid = api.get_uid(full_obj)
for name, adapter in getAdapters((full_obj,), IFieldIcons):
alerts = adapter()
if not alerts or uid not in alerts:
continue
alerts = alerts[uid]
if uid not in self.field_icons:
self.field_icons[uid] = alerts
continue
self.field_icons[uid].extend(alerts) |
def staged_rewards(self):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
"""
reach_mult = 0.1
grasp_mult = 0.35
lift_mult = 0.5
hover_mult = 0.7
# filter out objects that are already in the correct bins
objs_to_reach = []
geoms_to_grasp = []
target_bin_placements = []
for i in range(len(self.ob_inits)):
if self.objects_in_bins[i]:
continue
obj_str = str(self.item_names[i]) + "0"
objs_to_reach.append(self.obj_body_id[obj_str])
geoms_to_grasp.append(self.obj_geom_id[obj_str])
target_bin_placements.append(self.target_bin_placements[i])
target_bin_placements = np.array(target_bin_placements)
### reaching reward governed by distance to closest object ###
r_reach = 0.
if len(objs_to_reach):
# get reaching reward via minimum distance to a target object
target_object_pos = self.sim.data.body_xpos[objs_to_reach]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dists = np.linalg.norm(
target_object_pos - gripper_site_pos.reshape(1, -1), axis=1
)
r_reach = (1 - np.tanh(10.0 * min(dists))) * reach_mult
### grasping reward for touching any objects of interest ###
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in geoms_to_grasp:
bin_id = geoms_to_grasp.index(c.geom1)
if c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
elif c.geom2 in geoms_to_grasp:
bin_id = geoms_to_grasp.index(c.geom2)
if c.geom1 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids:
touch_right_finger = True
has_grasp = touch_left_finger and touch_right_finger
r_grasp = int(has_grasp) * grasp_mult
### lifting reward for picking up an object ###
r_lift = 0.
if len(objs_to_reach) and r_grasp > 0.:
z_target = self.bin_pos[2] + 0.25
object_z_locs = self.sim.data.body_xpos[objs_to_reach][:, 2]
z_dists = np.maximum(z_target - object_z_locs, 0.)
r_lift = grasp_mult + (1 - np.tanh(15.0 * min(z_dists))) * (
lift_mult - grasp_mult
)
### hover reward for getting object above bin ###
r_hover = 0.
if len(objs_to_reach):
# segment objects into left of the bins and above the bins
object_xy_locs = self.sim.data.body_xpos[objs_to_reach][:, :2]
y_check = (
np.abs(object_xy_locs[:, 1] - target_bin_placements[:, 1])
< self.bin_size[1] / 4.
)
x_check = (
np.abs(object_xy_locs[:, 0] - target_bin_placements[:, 0])
< self.bin_size[0] / 4.
)
objects_above_bins = np.logical_and(x_check, y_check)
objects_not_above_bins = np.logical_not(objects_above_bins)
dists = np.linalg.norm(
target_bin_placements[:, :2] - object_xy_locs, axis=1
)
# objects to the left get r_lift added to hover reward, those on the right get max(r_lift) added (to encourage dropping)
r_hover_all = np.zeros(len(objs_to_reach))
r_hover_all[objects_above_bins] = lift_mult + (
1 - np.tanh(10.0 * dists[objects_above_bins])
) * (hover_mult - lift_mult)
r_hover_all[objects_not_above_bins] = r_lift + (
1 - np.tanh(10.0 * dists[objects_not_above_bins])
) * (hover_mult - lift_mult)
r_hover = np.max(r_hover_all)
return r_reach, r_grasp, r_lift, r_hover | Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering. | Below is the the instruction that describes the task:
### Input:
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
### Response:
def staged_rewards(self):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
"""
reach_mult = 0.1
grasp_mult = 0.35
lift_mult = 0.5
hover_mult = 0.7
# filter out objects that are already in the correct bins
objs_to_reach = []
geoms_to_grasp = []
target_bin_placements = []
for i in range(len(self.ob_inits)):
if self.objects_in_bins[i]:
continue
obj_str = str(self.item_names[i]) + "0"
objs_to_reach.append(self.obj_body_id[obj_str])
geoms_to_grasp.append(self.obj_geom_id[obj_str])
target_bin_placements.append(self.target_bin_placements[i])
target_bin_placements = np.array(target_bin_placements)
### reaching reward governed by distance to closest object ###
r_reach = 0.
if len(objs_to_reach):
# get reaching reward via minimum distance to a target object
target_object_pos = self.sim.data.body_xpos[objs_to_reach]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dists = np.linalg.norm(
target_object_pos - gripper_site_pos.reshape(1, -1), axis=1
)
r_reach = (1 - np.tanh(10.0 * min(dists))) * reach_mult
### grasping reward for touching any objects of interest ###
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in geoms_to_grasp:
bin_id = geoms_to_grasp.index(c.geom1)
if c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
elif c.geom2 in geoms_to_grasp:
bin_id = geoms_to_grasp.index(c.geom2)
if c.geom1 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids:
touch_right_finger = True
has_grasp = touch_left_finger and touch_right_finger
r_grasp = int(has_grasp) * grasp_mult
### lifting reward for picking up an object ###
r_lift = 0.
if len(objs_to_reach) and r_grasp > 0.:
z_target = self.bin_pos[2] + 0.25
object_z_locs = self.sim.data.body_xpos[objs_to_reach][:, 2]
z_dists = np.maximum(z_target - object_z_locs, 0.)
r_lift = grasp_mult + (1 - np.tanh(15.0 * min(z_dists))) * (
lift_mult - grasp_mult
)
### hover reward for getting object above bin ###
r_hover = 0.
if len(objs_to_reach):
# segment objects into left of the bins and above the bins
object_xy_locs = self.sim.data.body_xpos[objs_to_reach][:, :2]
y_check = (
np.abs(object_xy_locs[:, 1] - target_bin_placements[:, 1])
< self.bin_size[1] / 4.
)
x_check = (
np.abs(object_xy_locs[:, 0] - target_bin_placements[:, 0])
< self.bin_size[0] / 4.
)
objects_above_bins = np.logical_and(x_check, y_check)
objects_not_above_bins = np.logical_not(objects_above_bins)
dists = np.linalg.norm(
target_bin_placements[:, :2] - object_xy_locs, axis=1
)
# objects to the left get r_lift added to hover reward, those on the right get max(r_lift) added (to encourage dropping)
r_hover_all = np.zeros(len(objs_to_reach))
r_hover_all[objects_above_bins] = lift_mult + (
1 - np.tanh(10.0 * dists[objects_above_bins])
) * (hover_mult - lift_mult)
r_hover_all[objects_not_above_bins] = r_lift + (
1 - np.tanh(10.0 * dists[objects_not_above_bins])
) * (hover_mult - lift_mult)
r_hover = np.max(r_hover_all)
return r_reach, r_grasp, r_lift, r_hover |
def open(self):
"""Implementation of Reporter callback."""
safe_mkdir(os.path.dirname(self._html_dir))
self._report_file = open(self.report_path(), 'w') | Implementation of Reporter callback. | Below is the the instruction that describes the task:
### Input:
Implementation of Reporter callback.
### Response:
def open(self):
"""Implementation of Reporter callback."""
safe_mkdir(os.path.dirname(self._html_dir))
self._report_file = open(self.report_path(), 'w') |
def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip) | Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast. | Below is the the instruction that describes the task:
### Input:
Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
### Response:
def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip) |
def truncate(self, max_length):
"""Truncate this vector so it's length does not exceed max."""
if self.length() > max_length:
# If it's longer than the max_length, scale to the max_length.
self.scale(max_length / self.length()) | Truncate this vector so it's length does not exceed max. | Below is the the instruction that describes the task:
### Input:
Truncate this vector so it's length does not exceed max.
### Response:
def truncate(self, max_length):
"""Truncate this vector so it's length does not exceed max."""
if self.length() > max_length:
# If it's longer than the max_length, scale to the max_length.
self.scale(max_length / self.length()) |
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps = preparation.merge_peptides(self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps} | Merge all psms and peptides | Below is the the instruction that describes the task:
### Input:
Merge all psms and peptides
### Response:
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps = preparation.merge_peptides(self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps} |
def to_image_header(img):
'''
to_image_header(img) yields img.header if img is a nibabel image object.
to_image_header(hdr) yields hdr if hdr is a nibabel header object.
to_image_header(obj) raises an error for other input types.
'''
if not img.__module__.startswith('nibabel.'):
raise ValueError('to_image_header: only nibabel obejcts can be coerced to headers')
if type(img).__name__.endswith('Header'): return img
# if not a header given, must be an image given:
try: return img.header
except Exception:
raise ValueError('to_image_header: can only convert nibabel image or header objects') | to_image_header(img) yields img.header if img is a nibabel image object.
to_image_header(hdr) yields hdr if hdr is a nibabel header object.
to_image_header(obj) raises an error for other input types. | Below is the the instruction that describes the task:
### Input:
to_image_header(img) yields img.header if img is a nibabel image object.
to_image_header(hdr) yields hdr if hdr is a nibabel header object.
to_image_header(obj) raises an error for other input types.
### Response:
def to_image_header(img):
'''
to_image_header(img) yields img.header if img is a nibabel image object.
to_image_header(hdr) yields hdr if hdr is a nibabel header object.
to_image_header(obj) raises an error for other input types.
'''
if not img.__module__.startswith('nibabel.'):
raise ValueError('to_image_header: only nibabel obejcts can be coerced to headers')
if type(img).__name__.endswith('Header'): return img
# if not a header given, must be an image given:
try: return img.header
except Exception:
raise ValueError('to_image_header: can only convert nibabel image or header objects') |
def breakpoint_set(self, addr, thumb=False, arm=False):
"""Sets a breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
Returns:
An integer specifying the breakpoint handle. This handle should be
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
"""
flags = enums.JLinkBreakpoint.ANY
if thumb:
flags = flags | enums.JLinkBreakpoint.THUMB
elif arm:
flags = flags | enums.JLinkBreakpoint.ARM
handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)
if handle <= 0:
raise errors.JLinkException('Breakpoint could not be set.')
return handle | Sets a breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
Returns:
An integer specifying the breakpoint handle. This handle should be
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set. | Below is the the instruction that describes the task:
### Input:
Sets a breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
Returns:
An integer specifying the breakpoint handle. This handle should be
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
### Response:
def breakpoint_set(self, addr, thumb=False, arm=False):
"""Sets a breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
Returns:
An integer specifying the breakpoint handle. This handle should be
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
"""
flags = enums.JLinkBreakpoint.ANY
if thumb:
flags = flags | enums.JLinkBreakpoint.THUMB
elif arm:
flags = flags | enums.JLinkBreakpoint.ARM
handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)
if handle <= 0:
raise errors.JLinkException('Breakpoint could not be set.')
return handle |
def set_final_values(self, enabled, v_box_values, extra_config_values):
"""This method allows the appliance's user to change the configuration for the virtual
system descriptions. For each array item returned from :py:func:`get_description` ,
you must pass in one boolean value and one configuration value.
Each item in the boolean array determines whether the particular configuration item
should be enabled.
You can only disable items of the types HardDiskControllerIDE, HardDiskControllerSATA,
HardDiskControllerSCSI, HardDiskImage, CDROM, Floppy, NetworkAdapter, USBController
and SoundCard.
For the "vbox" and "extra configuration" values, if you pass in the same arrays
as returned in the aVBoxValues and aExtraConfigValues arrays from :py:func:`get_description` ,
the configuration remains unchanged. Please see the documentation for :py:func:`get_description`
for valid configuration values for the individual array item types. If the
corresponding item in the aEnabled array is @c false, the configuration value is ignored.
in enabled of type bool
in v_box_values of type str
in extra_config_values of type str
"""
if not isinstance(enabled, list):
raise TypeError("enabled can only be an instance of type list")
for a in enabled[:10]:
if not isinstance(a, bool):
raise TypeError(
"array can only contain objects of type bool")
if not isinstance(v_box_values, list):
raise TypeError("v_box_values can only be an instance of type list")
for a in v_box_values[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(extra_config_values, list):
raise TypeError("extra_config_values can only be an instance of type list")
for a in extra_config_values[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
self._call("setFinalValues",
in_p=[enabled, v_box_values, extra_config_values]) | This method allows the appliance's user to change the configuration for the virtual
system descriptions. For each array item returned from :py:func:`get_description` ,
you must pass in one boolean value and one configuration value.
Each item in the boolean array determines whether the particular configuration item
should be enabled.
You can only disable items of the types HardDiskControllerIDE, HardDiskControllerSATA,
HardDiskControllerSCSI, HardDiskImage, CDROM, Floppy, NetworkAdapter, USBController
and SoundCard.
For the "vbox" and "extra configuration" values, if you pass in the same arrays
as returned in the aVBoxValues and aExtraConfigValues arrays from :py:func:`get_description` ,
the configuration remains unchanged. Please see the documentation for :py:func:`get_description`
for valid configuration values for the individual array item types. If the
corresponding item in the aEnabled array is @c false, the configuration value is ignored.
in enabled of type bool
in v_box_values of type str
in extra_config_values of type str | Below is the the instruction that describes the task:
### Input:
This method allows the appliance's user to change the configuration for the virtual
system descriptions. For each array item returned from :py:func:`get_description` ,
you must pass in one boolean value and one configuration value.
Each item in the boolean array determines whether the particular configuration item
should be enabled.
You can only disable items of the types HardDiskControllerIDE, HardDiskControllerSATA,
HardDiskControllerSCSI, HardDiskImage, CDROM, Floppy, NetworkAdapter, USBController
and SoundCard.
For the "vbox" and "extra configuration" values, if you pass in the same arrays
as returned in the aVBoxValues and aExtraConfigValues arrays from :py:func:`get_description` ,
the configuration remains unchanged. Please see the documentation for :py:func:`get_description`
for valid configuration values for the individual array item types. If the
corresponding item in the aEnabled array is @c false, the configuration value is ignored.
in enabled of type bool
in v_box_values of type str
in extra_config_values of type str
### Response:
def set_final_values(self, enabled, v_box_values, extra_config_values):
"""This method allows the appliance's user to change the configuration for the virtual
system descriptions. For each array item returned from :py:func:`get_description` ,
you must pass in one boolean value and one configuration value.
Each item in the boolean array determines whether the particular configuration item
should be enabled.
You can only disable items of the types HardDiskControllerIDE, HardDiskControllerSATA,
HardDiskControllerSCSI, HardDiskImage, CDROM, Floppy, NetworkAdapter, USBController
and SoundCard.
For the "vbox" and "extra configuration" values, if you pass in the same arrays
as returned in the aVBoxValues and aExtraConfigValues arrays from :py:func:`get_description` ,
the configuration remains unchanged. Please see the documentation for :py:func:`get_description`
for valid configuration values for the individual array item types. If the
corresponding item in the aEnabled array is @c false, the configuration value is ignored.
in enabled of type bool
in v_box_values of type str
in extra_config_values of type str
"""
if not isinstance(enabled, list):
raise TypeError("enabled can only be an instance of type list")
for a in enabled[:10]:
if not isinstance(a, bool):
raise TypeError(
"array can only contain objects of type bool")
if not isinstance(v_box_values, list):
raise TypeError("v_box_values can only be an instance of type list")
for a in v_box_values[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(extra_config_values, list):
raise TypeError("extra_config_values can only be an instance of type list")
for a in extra_config_values[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
self._call("setFinalValues",
in_p=[enabled, v_box_values, extra_config_values]) |
def ids_for(self, city_name, country=None, matching='nocase'):
"""
Returns a list of tuples in the form (long, str, str) corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of tuples
"""
if not city_name:
return []
if matching not in self.MATCHINGS:
raise ValueError("Unknown type of matching: "
"allowed values are %s" % ", ".join(self.MATCHINGS))
if country is not None and len(country) != 2:
raise ValueError("Country must be a 2-char string")
splits = self._filter_matching_lines(city_name, country, matching)
return [(int(item[1]), item[0], item[4]) for item in splits] | Returns a list of tuples in the form (long, str, str) corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of tuples | Below is the the instruction that describes the task:
### Input:
Returns a list of tuples in the form (long, str, str) corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of tuples
### Response:
def ids_for(self, city_name, country=None, matching='nocase'):
"""
Returns a list of tuples in the form (long, str, str) corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of tuples
"""
if not city_name:
return []
if matching not in self.MATCHINGS:
raise ValueError("Unknown type of matching: "
"allowed values are %s" % ", ".join(self.MATCHINGS))
if country is not None and len(country) != 2:
raise ValueError("Country must be a 2-char string")
splits = self._filter_matching_lines(city_name, country, matching)
return [(int(item[1]), item[0], item[4]) for item in splits] |
def polling_loop(timeout, interval=1):
"""Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration."""
start_time = time.time()
iteration = 0
end_time = start_time + timeout
while time.time() < end_time:
yield iteration
iteration += 1
time.sleep(interval) | Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration. | Below is the the instruction that describes the task:
### Input:
Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.
### Response:
def polling_loop(timeout, interval=1):
"""Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration."""
start_time = time.time()
iteration = 0
end_time = start_time + timeout
while time.time() < end_time:
yield iteration
iteration += 1
time.sleep(interval) |
def _format_docstring(self, text):
"""Formats the specified text for display within 90 characters. Returns a list
of *un-joined* lines.
:arg text: the text to format.
"""
#The only complication we have here is that we don't want to break words up.
words = text.split()
result = []
line = []
cumline = 0
for word in words:
if len(word) + 1 + cumline < 90:
line.append(word)
cumline += len(word) + 1
else:
result.append(' '.join(line))
cumline = len(word) + 1
line = [word]
return result | Formats the specified text for display within 90 characters. Returns a list
of *un-joined* lines.
:arg text: the text to format. | Below is the the instruction that describes the task:
### Input:
Formats the specified text for display within 90 characters. Returns a list
of *un-joined* lines.
:arg text: the text to format.
### Response:
def _format_docstring(self, text):
"""Formats the specified text for display within 90 characters. Returns a list
of *un-joined* lines.
:arg text: the text to format.
"""
#The only complication we have here is that we don't want to break words up.
words = text.split()
result = []
line = []
cumline = 0
for word in words:
if len(word) + 1 + cumline < 90:
line.append(word)
cumline += len(word) + 1
else:
result.append(' '.join(line))
cumline = len(word) + 1
line = [word]
return result |
def pass_from_pipe(cls):
"""Return password from pipe if not on TTY, else False.
"""
is_pipe = not sys.stdin.isatty()
return is_pipe and cls.strip_last_newline(sys.stdin.read()) | Return password from pipe if not on TTY, else False. | Below is the the instruction that describes the task:
### Input:
Return password from pipe if not on TTY, else False.
### Response:
def pass_from_pipe(cls):
"""Return password from pipe if not on TTY, else False.
"""
is_pipe = not sys.stdin.isatty()
return is_pipe and cls.strip_last_newline(sys.stdin.read()) |
def availableBranches(self):
''' return a list of GithubComponentVersion objects for the tip of each branch
'''
return [
GithubComponentVersion(
'', b[0], b[1], self.name, cache_key=None
) for b in _getBranchHeads(self.repo).items()
] | return a list of GithubComponentVersion objects for the tip of each branch | Below is the the instruction that describes the task:
### Input:
return a list of GithubComponentVersion objects for the tip of each branch
### Response:
def availableBranches(self):
''' return a list of GithubComponentVersion objects for the tip of each branch
'''
return [
GithubComponentVersion(
'', b[0], b[1], self.name, cache_key=None
) for b in _getBranchHeads(self.repo).items()
] |
def _canceling_task(self, backend):
"""
Used internally to decrement `backend`s current and total task counts
when `backend` could not be reached.
"""
with self.backend_mutex:
self.backends[backend] -= 1
self.task_counter[backend] -= 1 | Used internally to decrement `backend`s current and total task counts
when `backend` could not be reached. | Below is the the instruction that describes the task:
### Input:
Used internally to decrement `backend`s current and total task counts
when `backend` could not be reached.
### Response:
def _canceling_task(self, backend):
"""
Used internally to decrement `backend`s current and total task counts
when `backend` could not be reached.
"""
with self.backend_mutex:
self.backends[backend] -= 1
self.task_counter[backend] -= 1 |
def consume_assignment_list(self):
self.expect('(')
self.expect('model')
"""Parses a list of expressions from the tokens"""
assignments = []
while True:
next_token = self.tokens.consume()
self.tokens.add_extra_token(next_token) # push it back
if next_token == ')':
break
assignments.append(self.expect_assignment_tuple())
self.expect(')')
return assignments | Parses a list of expressions from the tokens | Below is the the instruction that describes the task:
### Input:
Parses a list of expressions from the tokens
### Response:
def consume_assignment_list(self):
self.expect('(')
self.expect('model')
"""Parses a list of expressions from the tokens"""
assignments = []
while True:
next_token = self.tokens.consume()
self.tokens.add_extra_token(next_token) # push it back
if next_token == ')':
break
assignments.append(self.expect_assignment_tuple())
self.expect(')')
return assignments |
def snmp_server_user_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
user = ET.SubElement(snmp_server, "user")
username = ET.SubElement(user, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def snmp_server_user_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
user = ET.SubElement(snmp_server, "user")
username = ET.SubElement(user, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def clip(self, value):
"""
Clip a value to a bound.
Parameters
----------
value : scalar or ndarray
value to clip
Returns
-------
scalar or ndarray :
of the same shape as value, bit with each element clipped to fall
within the specified bounds
Example
-------
>>> bnd = Bound(1, 2)
>>> bnd.clip(1.5)
1.5
>>> bnd.clip(3)
2
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 2. , 1.5])
>>> bnd = Bound(None, None)
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 3. , 1.5])
"""
if not self.lower and not self.upper:
return value
return np.clip(value, self.lower, self.upper) | Clip a value to a bound.
Parameters
----------
value : scalar or ndarray
value to clip
Returns
-------
scalar or ndarray :
of the same shape as value, bit with each element clipped to fall
within the specified bounds
Example
-------
>>> bnd = Bound(1, 2)
>>> bnd.clip(1.5)
1.5
>>> bnd.clip(3)
2
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 2. , 1.5])
>>> bnd = Bound(None, None)
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 3. , 1.5]) | Below is the the instruction that describes the task:
### Input:
Clip a value to a bound.
Parameters
----------
value : scalar or ndarray
value to clip
Returns
-------
scalar or ndarray :
of the same shape as value, bit with each element clipped to fall
within the specified bounds
Example
-------
>>> bnd = Bound(1, 2)
>>> bnd.clip(1.5)
1.5
>>> bnd.clip(3)
2
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 2. , 1.5])
>>> bnd = Bound(None, None)
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 3. , 1.5])
### Response:
def clip(self, value):
"""
Clip a value to a bound.
Parameters
----------
value : scalar or ndarray
value to clip
Returns
-------
scalar or ndarray :
of the same shape as value, bit with each element clipped to fall
within the specified bounds
Example
-------
>>> bnd = Bound(1, 2)
>>> bnd.clip(1.5)
1.5
>>> bnd.clip(3)
2
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 2. , 1.5])
>>> bnd = Bound(None, None)
>>> bnd.clip(np.array([1, 3, 1.5]))
array([ 1. , 3. , 1.5])
"""
if not self.lower and not self.upper:
return value
return np.clip(value, self.lower, self.upper) |
def info(self):
"""
Use the priors that make up the model_mapper to generate information on each parameter of the overall model.
This information is extracted from each priors *model_info* property.
"""
info = []
for prior_model_name, prior_model in self.prior_model_tuples:
info.append(prior_model.name + '\n')
info.extend([f"{prior_model_name}_{item}" for item in prior_model.info])
return '\n'.join(info) | Use the priors that make up the model_mapper to generate information on each parameter of the overall model.
This information is extracted from each priors *model_info* property. | Below is the the instruction that describes the task:
### Input:
Use the priors that make up the model_mapper to generate information on each parameter of the overall model.
This information is extracted from each priors *model_info* property.
### Response:
def info(self):
"""
Use the priors that make up the model_mapper to generate information on each parameter of the overall model.
This information is extracted from each priors *model_info* property.
"""
info = []
for prior_model_name, prior_model in self.prior_model_tuples:
info.append(prior_model.name + '\n')
info.extend([f"{prior_model_name}_{item}" for item in prior_model.info])
return '\n'.join(info) |
def update_subs(new_user_id):
"""Update subs to send added/removed for collections with user_rel."""
for sub in Subscription.objects.filter(connection=this.ws.connection):
params = loads(sub.params_ejson)
pub = API.get_pub_by_name(sub.publication)
# calculate the querysets prior to update
pre = collections.OrderedDict([
(col, query) for col, query
in API.sub_unique_objects(sub, params, pub)
])
# save the subscription with the updated user_id
sub.user_id = new_user_id
sub.save()
# calculate the querysets after the update
post = collections.OrderedDict([
(col, query) for col, query
in API.sub_unique_objects(sub, params, pub)
])
# first pass, send `added` for objs unique to `post`
for col_post, query in post.items():
try:
qs_pre = pre[col_post]
query = query.exclude(
pk__in=qs_pre.order_by().values('pk'),
)
except KeyError:
# collection not included pre-auth, everything is added.
pass
for obj in query:
this.ws.send(col_post.obj_change_as_msg(obj, ADDED))
# second pass, send `removed` for objs unique to `pre`
for col_pre, query in pre.items():
try:
qs_post = post[col_pre]
query = query.exclude(
pk__in=qs_post.order_by().values('pk'),
)
except KeyError:
# collection not included post-auth, everything is removed.
pass
for obj in query:
this.ws.send(col_pre.obj_change_as_msg(obj, REMOVED)) | Update subs to send added/removed for collections with user_rel. | Below is the the instruction that describes the task:
### Input:
Update subs to send added/removed for collections with user_rel.
### Response:
def update_subs(new_user_id):
"""Update subs to send added/removed for collections with user_rel."""
for sub in Subscription.objects.filter(connection=this.ws.connection):
params = loads(sub.params_ejson)
pub = API.get_pub_by_name(sub.publication)
# calculate the querysets prior to update
pre = collections.OrderedDict([
(col, query) for col, query
in API.sub_unique_objects(sub, params, pub)
])
# save the subscription with the updated user_id
sub.user_id = new_user_id
sub.save()
# calculate the querysets after the update
post = collections.OrderedDict([
(col, query) for col, query
in API.sub_unique_objects(sub, params, pub)
])
# first pass, send `added` for objs unique to `post`
for col_post, query in post.items():
try:
qs_pre = pre[col_post]
query = query.exclude(
pk__in=qs_pre.order_by().values('pk'),
)
except KeyError:
# collection not included pre-auth, everything is added.
pass
for obj in query:
this.ws.send(col_post.obj_change_as_msg(obj, ADDED))
# second pass, send `removed` for objs unique to `pre`
for col_pre, query in pre.items():
try:
qs_post = post[col_pre]
query = query.exclude(
pk__in=qs_post.order_by().values('pk'),
)
except KeyError:
# collection not included post-auth, everything is removed.
pass
for obj in query:
this.ws.send(col_pre.obj_change_as_msg(obj, REMOVED)) |
def command2str(num):
""" Turn command number into name """
for attr in SLOT.__dict__.keys():
if not attr.startswith('_') and attr == attr.upper():
if getattr(SLOT, attr) == num:
return 'SLOT_%s' % attr
return "0x%02x" % (num) | Turn command number into name | Below is the the instruction that describes the task:
### Input:
Turn command number into name
### Response:
def command2str(num):
""" Turn command number into name """
for attr in SLOT.__dict__.keys():
if not attr.startswith('_') and attr == attr.upper():
if getattr(SLOT, attr) == num:
return 'SLOT_%s' % attr
return "0x%02x" % (num) |
def setText(self, text):
"""
Sets the text for this item.
:param text | <str>
"""
self._text = text
# update the label
btn = self.widget()
if btn:
btn.setText(text) | Sets the text for this item.
:param text | <str> | Below is the the instruction that describes the task:
### Input:
Sets the text for this item.
:param text | <str>
### Response:
def setText(self, text):
"""
Sets the text for this item.
:param text | <str>
"""
self._text = text
# update the label
btn = self.widget()
if btn:
btn.setText(text) |
def present(self, path, timeout=0):
"""returns True if there is an entity at path"""
ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path),
timeout=timeout)
assert ret <= 0 and not data, (ret, data)
if ret < 0:
return False
else:
return True | returns True if there is an entity at path | Below is the the instruction that describes the task:
### Input:
returns True if there is an entity at path
### Response:
def present(self, path, timeout=0):
"""returns True if there is an entity at path"""
ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path),
timeout=timeout)
assert ret <= 0 and not data, (ret, data)
if ret < 0:
return False
else:
return True |
def is_script(self, container):
"""Returns `True` if this styled text is super/subscript."""
try:
style = self._style(container)
return style.get_value('position',
container) != TextPosition.NORMAL
except StyleException:
return False | Returns `True` if this styled text is super/subscript. | Below is the the instruction that describes the task:
### Input:
Returns `True` if this styled text is super/subscript.
### Response:
def is_script(self, container):
"""Returns `True` if this styled text is super/subscript."""
try:
style = self._style(container)
return style.get_value('position',
container) != TextPosition.NORMAL
except StyleException:
return False |
def save_sentences(twg, stmts, filename, agent_limit=300):
"""Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count.
"""
sentences = []
unmapped_texts = [t[0] for t in twg]
counter = 0
logger.info('Getting sentences for top %d unmapped agent texts.' %
agent_limit)
for text in unmapped_texts:
agent_sentences = get_sentences_for_agent(text, stmts)
sentences += map(lambda tup: (text,) + tup, agent_sentences)
counter += 1
if counter >= agent_limit:
break
# Write sentences to CSV file
write_unicode_csv(filename, sentences, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count. | Below is the the instruction that describes the task:
### Input:
Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count.
### Response:
def save_sentences(twg, stmts, filename, agent_limit=300):
"""Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count.
"""
sentences = []
unmapped_texts = [t[0] for t in twg]
counter = 0
logger.info('Getting sentences for top %d unmapped agent texts.' %
agent_limit)
for text in unmapped_texts:
agent_sentences = get_sentences_for_agent(text, stmts)
sentences += map(lambda tup: (text,) + tup, agent_sentences)
counter += 1
if counter >= agent_limit:
break
# Write sentences to CSV file
write_unicode_csv(filename, sentences, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') |
def get_bgp_config(self, group="", neighbor=""):
"""
Parse BGP config params into a dict
:param group='':
:param neighbor='':
"""
bgp_config = {}
def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout):
prefix_limit = {}
inet = False
inet6 = False
preifx_type = "inet"
if isinstance(af_table, list):
af_table = str(af_table)
if "ipv4" in af_table.lower():
inet = True
if "ipv6" in af_table.lower():
inet6 = True
preifx_type = "inet6"
if len(af_table.split()) == 2:
safi = "unicast"
else:
safi = af_table.split()[-1]
if inet or inet6:
prefix_limit = {
preifx_type: {
safi: {
"limit": limit,
"teardown": {
"threshold": prefix_percent,
"timeout": prefix_timeout,
},
}
}
}
return prefix_limit
# Get BGP config using ciscoconfparse because some old devices dont support "| sec bgp"
cfg = self.get_config(retrieve="running")
cfg = cfg["running"].splitlines()
bgp_config_text = napalm.base.helpers.cisco_conf_parse_objects(
"router bgp", cfg
)
bgp_asn = napalm.base.helpers.regex_find_txt(
r"router bgp (\d+)", bgp_config_text, default=0
)
# Get a list of all neighbors and groups in the config
all_neighbors = set()
all_groups = set()
bgp_group_neighbors = {}
all_groups.add("_")
for line in bgp_config_text:
if " neighbor " in line:
if re.search(IP_ADDR_REGEX, line) is not None:
all_neighbors.add(re.search(IP_ADDR_REGEX, line).group())
elif re.search(IPV6_ADDR_REGEX_2, line) is not None:
all_neighbors.add(re.search(IPV6_ADDR_REGEX_2, line).group())
else:
bgp_group = re.search(r" neighbor [^\s]+", line).group()
bgp_group = bgp_group.split()[1]
all_groups.add(bgp_group)
# Get the neighrbor level config for each neighbor
for bgp_neighbor in all_neighbors:
# If neighbor_filter is passed in, only continue for that neighbor
if neighbor:
if bgp_neighbor != neighbor:
continue
afi_list = napalm.base.helpers.cisco_conf_parse_parents(
r"\s+address-family.*", bgp_neighbor, bgp_config_text
)
afi = afi_list[0]
# Skipping neighbors in VRFs for now
if "vrf" in str(afi_list):
continue
else:
neighbor_config = napalm.base.helpers.cisco_conf_parse_objects(
bgp_neighbor, bgp_config_text
)
# For group_name- use peer-group name, else VRF name, else "_" for no group
group_name = napalm.base.helpers.regex_find_txt(
" peer-group ([^']+)", neighbor_config, default="_"
)
# Start finding attributes for the neighbor config
description = napalm.base.helpers.regex_find_txt(
r" description ([^\']+)\'", neighbor_config
)
peer_as = napalm.base.helpers.regex_find_txt(
r" remote-as (\d+)", neighbor_config, default=0
)
prefix_limit = napalm.base.helpers.regex_find_txt(
r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0
)
prefix_percent = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0
)
prefix_timeout = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0
)
export_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) out", neighbor_config
)
import_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) in", neighbor_config
)
local_address = napalm.base.helpers.regex_find_txt(
r" update-source (\w+)", neighbor_config
)
local_as = napalm.base.helpers.regex_find_txt(
r"local-as (\d+)", neighbor_config, default=0
)
password = napalm.base.helpers.regex_find_txt(
r"password (?:[0-9] )?([^\']+\')", neighbor_config
)
nhs = bool(
napalm.base.helpers.regex_find_txt(r" next-hop-self", neighbor_config)
)
route_reflector_client = bool(
napalm.base.helpers.regex_find_txt(
r"route-reflector-client", neighbor_config
)
)
# Add the group name to bgp_group_neighbors if its not there already
if group_name not in bgp_group_neighbors.keys():
bgp_group_neighbors[group_name] = {}
# Build the neighbor dict of attributes
bgp_group_neighbors[group_name][bgp_neighbor] = {
"description": description,
"remote_as": peer_as,
"prefix_limit": build_prefix_limit(
afi, prefix_limit, prefix_percent, prefix_timeout
),
"export_policy": export_policy,
"import_policy": import_policy,
"local_address": local_address,
"local_as": local_as,
"authentication_key": password,
"nhs": nhs,
"route_reflector_client": route_reflector_client,
}
# Get the peer-group level config for each group
for group_name in bgp_group_neighbors.keys():
# If a group is passed in params, only continue on that group
if group:
if group_name != group:
continue
# Default no group
if group_name == "_":
bgp_config["_"] = {
"apply_groups": [],
"description": "",
"local_as": 0,
"type": "",
"import_policy": "",
"export_policy": "",
"local_address": "",
"multipath": False,
"multihop_ttl": 0,
"remote_as": 0,
"remove_private_as": False,
"prefix_limit": {},
"neighbors": bgp_group_neighbors.get("_", {}),
}
continue
neighbor_config = napalm.base.helpers.cisco_conf_parse_objects(
group_name, bgp_config_text
)
multipath = False
afi_list = napalm.base.helpers.cisco_conf_parse_parents(
r"\s+address-family.*", group_name, neighbor_config
)
for afi in afi_list:
afi_config = napalm.base.helpers.cisco_conf_parse_objects(
afi, bgp_config_text
)
multipath = bool(
napalm.base.helpers.regex_find_txt(r" multipath", str(afi_config))
)
if multipath:
break
description = napalm.base.helpers.regex_find_txt(
r" description ([^\']+)\'", neighbor_config
)
local_as = napalm.base.helpers.regex_find_txt(
r"local-as (\d+)", neighbor_config, default=0
)
import_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) in", neighbor_config
)
export_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) out", neighbor_config
)
local_address = napalm.base.helpers.regex_find_txt(
r" update-source (\w+)", neighbor_config
)
multihop_ttl = napalm.base.helpers.regex_find_txt(
r"ebgp-multihop {\d+}", neighbor_config, default=0
)
peer_as = napalm.base.helpers.regex_find_txt(
r" remote-as (\d+)", neighbor_config, default=0
)
remove_private_as = bool(
napalm.base.helpers.regex_find_txt(
r"remove-private-as", neighbor_config
)
)
prefix_limit = napalm.base.helpers.regex_find_txt(
r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0
)
prefix_percent = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0
)
prefix_timeout = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0
)
bgp_type = "external"
if local_as:
if local_as == peer_as:
bgp_type = "internal"
elif bgp_asn == peer_as:
bgp_type = "internal"
bgp_config[group_name] = {
"apply_groups": [], # on IOS will always be empty list!
"description": description,
"local_as": local_as,
"type": bgp_type,
"import_policy": import_policy,
"export_policy": export_policy,
"local_address": local_address,
"multipath": multipath,
"multihop_ttl": multihop_ttl,
"remote_as": peer_as,
"remove_private_as": remove_private_as,
"prefix_limit": build_prefix_limit(
afi, prefix_limit, prefix_percent, prefix_timeout
),
"neighbors": bgp_group_neighbors.get(group_name, {}),
}
return bgp_config | Parse BGP config params into a dict
:param group='':
:param neighbor='': | Below is the the instruction that describes the task:
### Input:
Parse BGP config params into a dict
:param group='':
:param neighbor='':
### Response:
def get_bgp_config(self, group="", neighbor=""):
"""
Parse BGP config params into a dict
:param group='':
:param neighbor='':
"""
bgp_config = {}
def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout):
prefix_limit = {}
inet = False
inet6 = False
preifx_type = "inet"
if isinstance(af_table, list):
af_table = str(af_table)
if "ipv4" in af_table.lower():
inet = True
if "ipv6" in af_table.lower():
inet6 = True
preifx_type = "inet6"
if len(af_table.split()) == 2:
safi = "unicast"
else:
safi = af_table.split()[-1]
if inet or inet6:
prefix_limit = {
preifx_type: {
safi: {
"limit": limit,
"teardown": {
"threshold": prefix_percent,
"timeout": prefix_timeout,
},
}
}
}
return prefix_limit
# Get BGP config using ciscoconfparse because some old devices dont support "| sec bgp"
cfg = self.get_config(retrieve="running")
cfg = cfg["running"].splitlines()
bgp_config_text = napalm.base.helpers.cisco_conf_parse_objects(
"router bgp", cfg
)
bgp_asn = napalm.base.helpers.regex_find_txt(
r"router bgp (\d+)", bgp_config_text, default=0
)
# Get a list of all neighbors and groups in the config
all_neighbors = set()
all_groups = set()
bgp_group_neighbors = {}
all_groups.add("_")
for line in bgp_config_text:
if " neighbor " in line:
if re.search(IP_ADDR_REGEX, line) is not None:
all_neighbors.add(re.search(IP_ADDR_REGEX, line).group())
elif re.search(IPV6_ADDR_REGEX_2, line) is not None:
all_neighbors.add(re.search(IPV6_ADDR_REGEX_2, line).group())
else:
bgp_group = re.search(r" neighbor [^\s]+", line).group()
bgp_group = bgp_group.split()[1]
all_groups.add(bgp_group)
# Get the neighrbor level config for each neighbor
for bgp_neighbor in all_neighbors:
# If neighbor_filter is passed in, only continue for that neighbor
if neighbor:
if bgp_neighbor != neighbor:
continue
afi_list = napalm.base.helpers.cisco_conf_parse_parents(
r"\s+address-family.*", bgp_neighbor, bgp_config_text
)
afi = afi_list[0]
# Skipping neighbors in VRFs for now
if "vrf" in str(afi_list):
continue
else:
neighbor_config = napalm.base.helpers.cisco_conf_parse_objects(
bgp_neighbor, bgp_config_text
)
# For group_name- use peer-group name, else VRF name, else "_" for no group
group_name = napalm.base.helpers.regex_find_txt(
" peer-group ([^']+)", neighbor_config, default="_"
)
# Start finding attributes for the neighbor config
description = napalm.base.helpers.regex_find_txt(
r" description ([^\']+)\'", neighbor_config
)
peer_as = napalm.base.helpers.regex_find_txt(
r" remote-as (\d+)", neighbor_config, default=0
)
prefix_limit = napalm.base.helpers.regex_find_txt(
r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0
)
prefix_percent = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0
)
prefix_timeout = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0
)
export_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) out", neighbor_config
)
import_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) in", neighbor_config
)
local_address = napalm.base.helpers.regex_find_txt(
r" update-source (\w+)", neighbor_config
)
local_as = napalm.base.helpers.regex_find_txt(
r"local-as (\d+)", neighbor_config, default=0
)
password = napalm.base.helpers.regex_find_txt(
r"password (?:[0-9] )?([^\']+\')", neighbor_config
)
nhs = bool(
napalm.base.helpers.regex_find_txt(r" next-hop-self", neighbor_config)
)
route_reflector_client = bool(
napalm.base.helpers.regex_find_txt(
r"route-reflector-client", neighbor_config
)
)
# Add the group name to bgp_group_neighbors if its not there already
if group_name not in bgp_group_neighbors.keys():
bgp_group_neighbors[group_name] = {}
# Build the neighbor dict of attributes
bgp_group_neighbors[group_name][bgp_neighbor] = {
"description": description,
"remote_as": peer_as,
"prefix_limit": build_prefix_limit(
afi, prefix_limit, prefix_percent, prefix_timeout
),
"export_policy": export_policy,
"import_policy": import_policy,
"local_address": local_address,
"local_as": local_as,
"authentication_key": password,
"nhs": nhs,
"route_reflector_client": route_reflector_client,
}
# Get the peer-group level config for each group
for group_name in bgp_group_neighbors.keys():
# If a group is passed in params, only continue on that group
if group:
if group_name != group:
continue
# Default no group
if group_name == "_":
bgp_config["_"] = {
"apply_groups": [],
"description": "",
"local_as": 0,
"type": "",
"import_policy": "",
"export_policy": "",
"local_address": "",
"multipath": False,
"multihop_ttl": 0,
"remote_as": 0,
"remove_private_as": False,
"prefix_limit": {},
"neighbors": bgp_group_neighbors.get("_", {}),
}
continue
neighbor_config = napalm.base.helpers.cisco_conf_parse_objects(
group_name, bgp_config_text
)
multipath = False
afi_list = napalm.base.helpers.cisco_conf_parse_parents(
r"\s+address-family.*", group_name, neighbor_config
)
for afi in afi_list:
afi_config = napalm.base.helpers.cisco_conf_parse_objects(
afi, bgp_config_text
)
multipath = bool(
napalm.base.helpers.regex_find_txt(r" multipath", str(afi_config))
)
if multipath:
break
description = napalm.base.helpers.regex_find_txt(
r" description ([^\']+)\'", neighbor_config
)
local_as = napalm.base.helpers.regex_find_txt(
r"local-as (\d+)", neighbor_config, default=0
)
import_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) in", neighbor_config
)
export_policy = napalm.base.helpers.regex_find_txt(
r"route-map ([^\s]+) out", neighbor_config
)
local_address = napalm.base.helpers.regex_find_txt(
r" update-source (\w+)", neighbor_config
)
multihop_ttl = napalm.base.helpers.regex_find_txt(
r"ebgp-multihop {\d+}", neighbor_config, default=0
)
peer_as = napalm.base.helpers.regex_find_txt(
r" remote-as (\d+)", neighbor_config, default=0
)
remove_private_as = bool(
napalm.base.helpers.regex_find_txt(
r"remove-private-as", neighbor_config
)
)
prefix_limit = napalm.base.helpers.regex_find_txt(
r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0
)
prefix_percent = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0
)
prefix_timeout = napalm.base.helpers.regex_find_txt(
r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0
)
bgp_type = "external"
if local_as:
if local_as == peer_as:
bgp_type = "internal"
elif bgp_asn == peer_as:
bgp_type = "internal"
bgp_config[group_name] = {
"apply_groups": [], # on IOS will always be empty list!
"description": description,
"local_as": local_as,
"type": bgp_type,
"import_policy": import_policy,
"export_policy": export_policy,
"local_address": local_address,
"multipath": multipath,
"multihop_ttl": multihop_ttl,
"remote_as": peer_as,
"remove_private_as": remove_private_as,
"prefix_limit": build_prefix_limit(
afi, prefix_limit, prefix_percent, prefix_timeout
),
"neighbors": bgp_group_neighbors.get(group_name, {}),
}
return bgp_config |
def _opdist_low(av, bv, cv, deriv):
"""Similar to opdist, but with relative vectors"""
a = Vector3(9, deriv, av, (0, 1, 2))
b = Vector3(9, deriv, bv, (3, 4, 5))
c = Vector3(9, deriv, cv, (6, 7, 8))
n = cross(a, b)
n /= n.norm()
dist = dot(c, n)
return dist.results() | Similar to opdist, but with relative vectors | Below is the the instruction that describes the task:
### Input:
Similar to opdist, but with relative vectors
### Response:
def _opdist_low(av, bv, cv, deriv):
"""Similar to opdist, but with relative vectors"""
a = Vector3(9, deriv, av, (0, 1, 2))
b = Vector3(9, deriv, bv, (3, 4, 5))
c = Vector3(9, deriv, cv, (6, 7, 8))
n = cross(a, b)
n /= n.norm()
dist = dot(c, n)
return dist.results() |
def instruction_INC_memory(self, opcode, ea, m):
"""
Adds to the register. The carry bit is not affected, thus allowing this
instruction to be used as a loop counter in multiple-precision
computations. When operating on unsigned values, only the BEQ and BNE
branches can be expected to behave consistently. When operating on twos
complement values, all signed branches are correctly available.
source code forms: INC Q; INCA; INCB
CC bits "HNZVC": -aaa-
"""
r = self.INC(m)
return ea, r & 0xff | Adds to the register. The carry bit is not affected, thus allowing this
instruction to be used as a loop counter in multiple-precision
computations. When operating on unsigned values, only the BEQ and BNE
branches can be expected to behave consistently. When operating on twos
complement values, all signed branches are correctly available.
source code forms: INC Q; INCA; INCB
CC bits "HNZVC": -aaa- | Below is the the instruction that describes the task:
### Input:
Adds to the register. The carry bit is not affected, thus allowing this
instruction to be used as a loop counter in multiple-precision
computations. When operating on unsigned values, only the BEQ and BNE
branches can be expected to behave consistently. When operating on twos
complement values, all signed branches are correctly available.
source code forms: INC Q; INCA; INCB
CC bits "HNZVC": -aaa-
### Response:
def instruction_INC_memory(self, opcode, ea, m):
"""
Adds to the register. The carry bit is not affected, thus allowing this
instruction to be used as a loop counter in multiple-precision
computations. When operating on unsigned values, only the BEQ and BNE
branches can be expected to behave consistently. When operating on twos
complement values, all signed branches are correctly available.
source code forms: INC Q; INCA; INCB
CC bits "HNZVC": -aaa-
"""
r = self.INC(m)
return ea, r & 0xff |
def close_compute_projects(self, compute):
"""
Close projects running on a compute
"""
for project in self._projects.values():
if compute in project.computes:
yield from project.close() | Close projects running on a compute | Below is the the instruction that describes the task:
### Input:
Close projects running on a compute
### Response:
def close_compute_projects(self, compute):
"""
Close projects running on a compute
"""
for project in self._projects.values():
if compute in project.computes:
yield from project.close() |
def _apply_postprocessing(marc_xml, xml, func, uuid, url):
"""
Apply `func` to all ``<mods:mods>`` tags from `xml`. Insert UUID.
Args:
marc_xml (str): Original Aleph record.
xml (str): XML which will be postprocessed.
func (fn): Function, which will be used for postprocessing.
uuid (str): UUID, which will be inserted to `xml`.
url (str): URL of the publication (public or not).
Returns:
list: List of string with postprocessed XML.
"""
dom = dhtmlparser.parseString(xml)
return [
func(marc_xml, mods_tag, uuid, cnt, url)
for cnt, mods_tag in enumerate(dom.find("mods:mods"))
] | Apply `func` to all ``<mods:mods>`` tags from `xml`. Insert UUID.
Args:
marc_xml (str): Original Aleph record.
xml (str): XML which will be postprocessed.
func (fn): Function, which will be used for postprocessing.
uuid (str): UUID, which will be inserted to `xml`.
url (str): URL of the publication (public or not).
Returns:
list: List of string with postprocessed XML. | Below is the the instruction that describes the task:
### Input:
Apply `func` to all ``<mods:mods>`` tags from `xml`. Insert UUID.
Args:
marc_xml (str): Original Aleph record.
xml (str): XML which will be postprocessed.
func (fn): Function, which will be used for postprocessing.
uuid (str): UUID, which will be inserted to `xml`.
url (str): URL of the publication (public or not).
Returns:
list: List of string with postprocessed XML.
### Response:
def _apply_postprocessing(marc_xml, xml, func, uuid, url):
"""
Apply `func` to all ``<mods:mods>`` tags from `xml`. Insert UUID.
Args:
marc_xml (str): Original Aleph record.
xml (str): XML which will be postprocessed.
func (fn): Function, which will be used for postprocessing.
uuid (str): UUID, which will be inserted to `xml`.
url (str): URL of the publication (public or not).
Returns:
list: List of string with postprocessed XML.
"""
dom = dhtmlparser.parseString(xml)
return [
func(marc_xml, mods_tag, uuid, cnt, url)
for cnt, mods_tag in enumerate(dom.find("mods:mods"))
] |
async def service(self, limit=None, quota: Optional[Quota] = None) -> int:
"""
Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed.
"""
if self.listener:
await self._serviceStack(self.age, quota)
else:
logger.info("{} is stopped".format(self))
r = len(self.rxMsgs)
if r > 0:
pracLimit = limit if limit else sys.maxsize
return self.processReceived(pracLimit)
return 0 | Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed. | Below is the the instruction that describes the task:
### Input:
Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed.
### Response:
async def service(self, limit=None, quota: Optional[Quota] = None) -> int:
"""
Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed.
"""
if self.listener:
await self._serviceStack(self.age, quota)
else:
logger.info("{} is stopped".format(self))
r = len(self.rxMsgs)
if r > 0:
pracLimit = limit if limit else sys.maxsize
return self.processReceived(pracLimit)
return 0 |
def smallest(heap, predicate):
"""Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found.
"""
n = heap.size()
# items contains indexes of items yet to be checked.
items = deque([0])
while items:
current = items.popleft()
if current >= n:
continue
if predicate(heap.peek(current)):
return current
child1 = 2 * current + 1
child2 = child1 + 1
if child1 < n and child2 < n and heap.lt(child2, child1):
# make sure we check the smaller child first.
child1, child2 = child2, child1
if child1 < n:
items.append(child1)
if child2 < n:
items.append(child2)
raise NoMatchError() | Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found. | Below is the the instruction that describes the task:
### Input:
Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found.
### Response:
def smallest(heap, predicate):
"""Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found.
"""
n = heap.size()
# items contains indexes of items yet to be checked.
items = deque([0])
while items:
current = items.popleft()
if current >= n:
continue
if predicate(heap.peek(current)):
return current
child1 = 2 * current + 1
child2 = child1 + 1
if child1 < n and child2 < n and heap.lt(child2, child1):
# make sure we check the smaller child first.
child1, child2 = child2, child1
if child1 < n:
items.append(child1)
if child2 < n:
items.append(child2)
raise NoMatchError() |
def insert_size(self):
'''Returns insert size, defined as distance from outer edges of reads (and assumes gap length of zero)'''
try:
distances = self._distance_to_contig_ends()
except:
raise Error('Error getting insert size from Link:\n' + str(self))
return sum(distances) | Returns insert size, defined as distance from outer edges of reads (and assumes gap length of zero) | Below is the the instruction that describes the task:
### Input:
Returns insert size, defined as distance from outer edges of reads (and assumes gap length of zero)
### Response:
def insert_size(self):
'''Returns insert size, defined as distance from outer edges of reads (and assumes gap length of zero)'''
try:
distances = self._distance_to_contig_ends()
except:
raise Error('Error getting insert size from Link:\n' + str(self))
return sum(distances) |
def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to mxnet, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
(op_name, attrs)
Converted (op_name, attrs) for mxnet.
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
pass
elif op_name in convert_map:
op_name, attrs = convert_map[op_name](attrs)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
op = getattr(mx.sym, op_name, None)
if not op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
return op, attrs | Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to mxnet, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
(op_name, attrs)
Converted (op_name, attrs) for mxnet. | Below is the the instruction that describes the task:
### Input:
Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to mxnet, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
(op_name, attrs)
Converted (op_name, attrs) for mxnet.
### Response:
def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to mxnet, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
(op_name, attrs)
Converted (op_name, attrs) for mxnet.
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
pass
elif op_name in convert_map:
op_name, attrs = convert_map[op_name](attrs)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
op = getattr(mx.sym, op_name, None)
if not op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
return op, attrs |
def add_data_file(data_files, target, source):
"""Add an entry to data_files"""
for t, f in data_files:
if t == target:
break
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source) | Add an entry to data_files | Below is the the instruction that describes the task:
### Input:
Add an entry to data_files
### Response:
def add_data_file(data_files, target, source):
"""Add an entry to data_files"""
for t, f in data_files:
if t == target:
break
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source) |
def run_through_shell(command, enable_shell=False):
"""
Retrieve output of a command.
Returns a named tuple with three elements:
* ``rc`` (integer) Return code of command.
* ``out`` (string) Everything that was printed to stdout.
* ``err`` (string) Everything that was printed to stderr.
Don't use this function with programs that outputs lots of data since the
output is saved in one variable.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param enable_shell: If set ot `True` users default shell will be invoked
and given ``command`` to execute. The ``command`` should obviously be a
string since shell does all the parsing.
"""
if not enable_shell and isinstance(command, str):
command = shlex.split(command)
returncode = None
stderr = None
try:
proc = subprocess.Popen(command, stderr=subprocess.PIPE,
stdout=subprocess.PIPE, shell=enable_shell)
out, stderr = proc.communicate()
out = out.decode("UTF-8")
stderr = stderr.decode("UTF-8")
returncode = proc.returncode
except OSError as e:
out = e.strerror
stderr = e.strerror
logging.getLogger("i3pystatus.core.command").exception("")
except subprocess.CalledProcessError as e:
out = e.output
logging.getLogger("i3pystatus.core.command").exception("")
return CommandResult(returncode, out, stderr) | Retrieve output of a command.
Returns a named tuple with three elements:
* ``rc`` (integer) Return code of command.
* ``out`` (string) Everything that was printed to stdout.
* ``err`` (string) Everything that was printed to stderr.
Don't use this function with programs that outputs lots of data since the
output is saved in one variable.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param enable_shell: If set ot `True` users default shell will be invoked
and given ``command`` to execute. The ``command`` should obviously be a
string since shell does all the parsing. | Below is the the instruction that describes the task:
### Input:
Retrieve output of a command.
Returns a named tuple with three elements:
* ``rc`` (integer) Return code of command.
* ``out`` (string) Everything that was printed to stdout.
* ``err`` (string) Everything that was printed to stderr.
Don't use this function with programs that outputs lots of data since the
output is saved in one variable.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param enable_shell: If set ot `True` users default shell will be invoked
and given ``command`` to execute. The ``command`` should obviously be a
string since shell does all the parsing.
### Response:
def run_through_shell(command, enable_shell=False):
"""
Retrieve output of a command.
Returns a named tuple with three elements:
* ``rc`` (integer) Return code of command.
* ``out`` (string) Everything that was printed to stdout.
* ``err`` (string) Everything that was printed to stderr.
Don't use this function with programs that outputs lots of data since the
output is saved in one variable.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param enable_shell: If set ot `True` users default shell will be invoked
and given ``command`` to execute. The ``command`` should obviously be a
string since shell does all the parsing.
"""
if not enable_shell and isinstance(command, str):
command = shlex.split(command)
returncode = None
stderr = None
try:
proc = subprocess.Popen(command, stderr=subprocess.PIPE,
stdout=subprocess.PIPE, shell=enable_shell)
out, stderr = proc.communicate()
out = out.decode("UTF-8")
stderr = stderr.decode("UTF-8")
returncode = proc.returncode
except OSError as e:
out = e.strerror
stderr = e.strerror
logging.getLogger("i3pystatus.core.command").exception("")
except subprocess.CalledProcessError as e:
out = e.output
logging.getLogger("i3pystatus.core.command").exception("")
return CommandResult(returncode, out, stderr) |
def compose_args(self, action_name, in_argdict):
"""Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, eg
``{'InstanceID': 0, 'Speed': 1}. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature.
"""
for action in self.actions:
if action.name == action_name:
# The found 'action' will be visible from outside the loop
break
else:
raise AttributeError('Unknown Action: {0}'.format(action_name))
# Check for given argument names which do not occur in the expected
# argument list
# pylint: disable=undefined-loop-variable
unexpected = set(in_argdict) - \
set(argument.name for argument in action.in_args)
if unexpected:
raise ValueError(
"Unexpected argument '{0}'. Method signature: {1}"
.format(next(iter(unexpected)), str(action))
)
# List the (name, value) tuples for each argument in the argument list
composed = []
for argument in action.in_args:
name = argument.name
if name in in_argdict:
composed.append((name, in_argdict[name]))
continue
if name in self.DEFAULT_ARGS:
composed.append((name, self.DEFAULT_ARGS[name]))
continue
if argument.vartype.default is not None:
composed.append((name, argument.vartype.default))
raise ValueError(
"Missing argument '{0}'. Method signature: {1}"
.format(argument.name, str(action))
)
return composed | Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, eg
``{'InstanceID': 0, 'Speed': 1}. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature. | Below is the the instruction that describes the task:
### Input:
Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, eg
``{'InstanceID': 0, 'Speed': 1}. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature.
### Response:
def compose_args(self, action_name, in_argdict):
"""Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, eg
``{'InstanceID': 0, 'Speed': 1}. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature.
"""
for action in self.actions:
if action.name == action_name:
# The found 'action' will be visible from outside the loop
break
else:
raise AttributeError('Unknown Action: {0}'.format(action_name))
# Check for given argument names which do not occur in the expected
# argument list
# pylint: disable=undefined-loop-variable
unexpected = set(in_argdict) - \
set(argument.name for argument in action.in_args)
if unexpected:
raise ValueError(
"Unexpected argument '{0}'. Method signature: {1}"
.format(next(iter(unexpected)), str(action))
)
# List the (name, value) tuples for each argument in the argument list
composed = []
for argument in action.in_args:
name = argument.name
if name in in_argdict:
composed.append((name, in_argdict[name]))
continue
if name in self.DEFAULT_ARGS:
composed.append((name, self.DEFAULT_ARGS[name]))
continue
if argument.vartype.default is not None:
composed.append((name, argument.vartype.default))
raise ValueError(
"Missing argument '{0}'. Method signature: {1}"
.format(argument.name, str(action))
)
return composed |
def update_history(self) -> None:
"""
Update messaging history on disk.
:returns: None
"""
self.log.debug(f"Saving history. History is: \n{self.history}")
jsons = []
for item in self.history:
json_item = item.__dict__
# Convert sub-entries into JSON as well.
json_item["output_records"] = self._parse_output_records(item)
jsons.append(json_item)
if not path.isfile(self.history_filename):
open(self.history_filename, "a+").close()
with open(self.history_filename, "w") as f:
json.dump(jsons, f, default=lambda x: x.__dict__.copy(), sort_keys=True, indent=4)
f.write("\n") | Update messaging history on disk.
:returns: None | Below is the the instruction that describes the task:
### Input:
Update messaging history on disk.
:returns: None
### Response:
def update_history(self) -> None:
"""
Update messaging history on disk.
:returns: None
"""
self.log.debug(f"Saving history. History is: \n{self.history}")
jsons = []
for item in self.history:
json_item = item.__dict__
# Convert sub-entries into JSON as well.
json_item["output_records"] = self._parse_output_records(item)
jsons.append(json_item)
if not path.isfile(self.history_filename):
open(self.history_filename, "a+").close()
with open(self.history_filename, "w") as f:
json.dump(jsons, f, default=lambda x: x.__dict__.copy(), sort_keys=True, indent=4)
f.write("\n") |
def calc_attribute_statistic(self, attribute, statistic, time):
"""
Calculate statistics based on the values of an attribute. The following statistics are supported:
mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value).
Args:
attribute: Attribute extracted from model grid
statistic: Name of statistic being used.
time: timestep of the object being investigated
Returns:
The value of the statistic
"""
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)()
elif statistic == 'median':
stat_val = np.median(self.attributes[attribute][ti].ravel()[ma])
elif statistic == "skew":
stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \
np.median(self.attributes[attribute][ti].ravel()[ma])
elif 'percentile' in statistic:
per = int(statistic.split("_")[1])
stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per)
elif 'dt' in statistic:
stat_name = statistic[:-3]
if ti == 0:
stat_val = 0
else:
stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \
- self.calc_attribute_statistic(attribute, stat_name, time - 1)
else:
stat_val = np.nan
return stat_val | Calculate statistics based on the values of an attribute. The following statistics are supported:
mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value).
Args:
attribute: Attribute extracted from model grid
statistic: Name of statistic being used.
time: timestep of the object being investigated
Returns:
The value of the statistic | Below is the the instruction that describes the task:
### Input:
Calculate statistics based on the values of an attribute. The following statistics are supported:
mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value).
Args:
attribute: Attribute extracted from model grid
statistic: Name of statistic being used.
time: timestep of the object being investigated
Returns:
The value of the statistic
### Response:
def calc_attribute_statistic(self, attribute, statistic, time):
"""
Calculate statistics based on the values of an attribute. The following statistics are supported:
mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value).
Args:
attribute: Attribute extracted from model grid
statistic: Name of statistic being used.
time: timestep of the object being investigated
Returns:
The value of the statistic
"""
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)()
elif statistic == 'median':
stat_val = np.median(self.attributes[attribute][ti].ravel()[ma])
elif statistic == "skew":
stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \
np.median(self.attributes[attribute][ti].ravel()[ma])
elif 'percentile' in statistic:
per = int(statistic.split("_")[1])
stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per)
elif 'dt' in statistic:
stat_name = statistic[:-3]
if ti == 0:
stat_val = 0
else:
stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \
- self.calc_attribute_statistic(attribute, stat_name, time - 1)
else:
stat_val = np.nan
return stat_val |
def _translate_stm(self, oprnd1, oprnd2, oprnd3):
"""Return a formula representation of a STM instruction.
"""
assert oprnd1.size and oprnd3.size
assert oprnd3.size == self._address_size
op1_var = self._translate_src_oprnd(oprnd1)
op3_var = self._translate_src_oprnd(oprnd3)
for i in range(0, oprnd1.size, 8):
self._mem_curr[op3_var + i//8] = smtfunction.extract(op1_var, i, 8)
# Memory versioning.
self._mem_instance += 1
mem_old = self._mem_curr
mem_new = self.make_array(self._address_size, "MEM_{}".format(self._mem_instance))
self._mem_curr = mem_new
return [mem_new == mem_old] | Return a formula representation of a STM instruction. | Below is the the instruction that describes the task:
### Input:
Return a formula representation of a STM instruction.
### Response:
def _translate_stm(self, oprnd1, oprnd2, oprnd3):
"""Return a formula representation of a STM instruction.
"""
assert oprnd1.size and oprnd3.size
assert oprnd3.size == self._address_size
op1_var = self._translate_src_oprnd(oprnd1)
op3_var = self._translate_src_oprnd(oprnd3)
for i in range(0, oprnd1.size, 8):
self._mem_curr[op3_var + i//8] = smtfunction.extract(op1_var, i, 8)
# Memory versioning.
self._mem_instance += 1
mem_old = self._mem_curr
mem_new = self.make_array(self._address_size, "MEM_{}".format(self._mem_instance))
self._mem_curr = mem_new
return [mem_new == mem_old] |
def get_dims_from_tree_size(self):
"Calculate reasonable canvas height and width for tree given N tips"
ntips = len(self.ttree)
if self.style.orient in ("right", "left"):
# height is long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(1000, 18 * ntips))
if not self.style.width:
self.style.width = max(350, min(500, 18 * ntips))
else:
# width is long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(500, 18 * ntips))
if not self.style.width:
self.style.width = max(350, min(1000, 18 * ntips)) | Calculate reasonable canvas height and width for tree given N tips | Below is the the instruction that describes the task:
### Input:
Calculate reasonable canvas height and width for tree given N tips
### Response:
def get_dims_from_tree_size(self):
"Calculate reasonable canvas height and width for tree given N tips"
ntips = len(self.ttree)
if self.style.orient in ("right", "left"):
# height is long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(1000, 18 * ntips))
if not self.style.width:
self.style.width = max(350, min(500, 18 * ntips))
else:
# width is long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(500, 18 * ntips))
if not self.style.width:
self.style.width = max(350, min(1000, 18 * ntips)) |
def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u | helper func that snaps a scaled variable to the nearest config | Below is the the instruction that describes the task:
### Input:
helper func that snaps a scaled variable to the nearest config
### Response:
def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u |
def parse_http_response(http_response: HttpResponse) -> 'environ.Response':
"""
Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response
"""
try:
response = environ.Response.deserialize(http_response.json())
except Exception as error:
response = environ.Response().fail(
code='INVALID_REMOTE_RESPONSE',
error=error,
message='Invalid HTTP response from remote connection'
).console(
whitespace=1
).response
response.http_response = http_response
return response | Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response | Below is the the instruction that describes the task:
### Input:
Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response
### Response:
def parse_http_response(http_response: HttpResponse) -> 'environ.Response':
"""
Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response
"""
try:
response = environ.Response.deserialize(http_response.json())
except Exception as error:
response = environ.Response().fail(
code='INVALID_REMOTE_RESPONSE',
error=error,
message='Invalid HTTP response from remote connection'
).console(
whitespace=1
).response
response.http_response = http_response
return response |
def cross(self,p_i,p_j, max_depth = 2):
"""subtree-like swap crossover between programs p_i and p_j."""
# only choose crossover points for out_types available in both programs
# pdb.set_trace()
# determine possible outttypes
types_p_i = [t for t in [p.out_type for p in p_i]]
types_p_j = [t for t in [p.out_type for p in p_j]]
types = set(types_p_i).intersection(types_p_j)
# grab subtree of p_i
p_i_sub = [i for i,n in enumerate(p_i) if n.out_type in types]
x_i_end = self.random_state.choice(p_i_sub)
x_i_begin = x_i_end
arity_sum = p_i[x_i_end].arity[p_i[x_i_end].in_type]
# print("x_i_end:",x_i_end)
# i = 0
while (arity_sum > 0): #and i < 1000:
if x_i_begin == 0:
print("arity_sum:",arity_sum,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end)
x_i_begin -= 1
arity_sum += p_i[x_i_begin].arity[p_i[x_i_begin].in_type]-1
# i += 1
# if i == 1000:
# print("in variation")
# pdb.set_trace()
# grab subtree of p_j with matching out_type to p_i[x_i_end]
p_j_sub = [i for i,n in enumerate(p_j) if n.out_type == p_i[x_i_end].out_type]
x_j_end = self.random_state.choice(p_j_sub)
x_j_begin = x_j_end
arity_sum = p_j[x_j_end].arity[p_j[x_j_end].in_type]
# i = 0
while (arity_sum > 0): #and i < 1000:
if x_j_begin == 0:
print("arity_sum:",arity_sum,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end)
print("p_j:",p_j)
x_j_begin -= 1
arity_sum += p_j[x_j_begin].arity[p_j[x_j_begin].in_type]-1
# i += 1
# if i == 1000:
# print("in variation")
# pdb.set_trace()
#swap subtrees
tmpi = p_i[:]
tmpj = p_j[:]
tmpi[x_i_begin:x_i_end+1:],tmpj[x_j_begin:x_j_end+1:] = \
tmpj[x_j_begin:x_j_end+1:],tmpi[x_i_begin:x_i_end+1:]
if not self.is_valid_program(p_i) or not self.is_valid_program(p_j):
# pdb.set_trace()
print("parent 1:",p_i,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end)
print("parent 2:",p_j,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end)
print("child 1:",tmpi)
print("child 2:",tmpj)
raise ValueError('Crossover produced an invalid program.')
# size check, then assignment
if len(tmpi) <= 2**max_depth-1:
p_i[:] = tmpi
if len(tmpj) <= 2**max_depth-1:
p_j[:] = tmpj | subtree-like swap crossover between programs p_i and p_j. | Below is the the instruction that describes the task:
### Input:
subtree-like swap crossover between programs p_i and p_j.
### Response:
def cross(self,p_i,p_j, max_depth = 2):
"""subtree-like swap crossover between programs p_i and p_j."""
# only choose crossover points for out_types available in both programs
# pdb.set_trace()
# determine possible outttypes
types_p_i = [t for t in [p.out_type for p in p_i]]
types_p_j = [t for t in [p.out_type for p in p_j]]
types = set(types_p_i).intersection(types_p_j)
# grab subtree of p_i
p_i_sub = [i for i,n in enumerate(p_i) if n.out_type in types]
x_i_end = self.random_state.choice(p_i_sub)
x_i_begin = x_i_end
arity_sum = p_i[x_i_end].arity[p_i[x_i_end].in_type]
# print("x_i_end:",x_i_end)
# i = 0
while (arity_sum > 0): #and i < 1000:
if x_i_begin == 0:
print("arity_sum:",arity_sum,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end)
x_i_begin -= 1
arity_sum += p_i[x_i_begin].arity[p_i[x_i_begin].in_type]-1
# i += 1
# if i == 1000:
# print("in variation")
# pdb.set_trace()
# grab subtree of p_j with matching out_type to p_i[x_i_end]
p_j_sub = [i for i,n in enumerate(p_j) if n.out_type == p_i[x_i_end].out_type]
x_j_end = self.random_state.choice(p_j_sub)
x_j_begin = x_j_end
arity_sum = p_j[x_j_end].arity[p_j[x_j_end].in_type]
# i = 0
while (arity_sum > 0): #and i < 1000:
if x_j_begin == 0:
print("arity_sum:",arity_sum,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end)
print("p_j:",p_j)
x_j_begin -= 1
arity_sum += p_j[x_j_begin].arity[p_j[x_j_begin].in_type]-1
# i += 1
# if i == 1000:
# print("in variation")
# pdb.set_trace()
#swap subtrees
tmpi = p_i[:]
tmpj = p_j[:]
tmpi[x_i_begin:x_i_end+1:],tmpj[x_j_begin:x_j_end+1:] = \
tmpj[x_j_begin:x_j_end+1:],tmpi[x_i_begin:x_i_end+1:]
if not self.is_valid_program(p_i) or not self.is_valid_program(p_j):
# pdb.set_trace()
print("parent 1:",p_i,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end)
print("parent 2:",p_j,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end)
print("child 1:",tmpi)
print("child 2:",tmpj)
raise ValueError('Crossover produced an invalid program.')
# size check, then assignment
if len(tmpi) <= 2**max_depth-1:
p_i[:] = tmpi
if len(tmpj) <= 2**max_depth-1:
p_j[:] = tmpj |
def _calc_priority_filter(row, pops):
"""Calculate the priority filter based on external associated data.
- Pass high/medium impact variants not found in population databases
- Pass variants found in COSMIC or Clinvar provided they don't have two
additional reasons to filter (found in multiple external populations)
"""
filters = []
passes = []
passes.extend(_find_known(row))
filters.extend(_known_populations(row, pops))
if len(filters) == 0 or (len(passes) > 0 and len(filters) < 2):
passes.insert(0, "pass")
return ",".join(passes + filters) | Calculate the priority filter based on external associated data.
- Pass high/medium impact variants not found in population databases
- Pass variants found in COSMIC or Clinvar provided they don't have two
additional reasons to filter (found in multiple external populations) | Below is the the instruction that describes the task:
### Input:
Calculate the priority filter based on external associated data.
- Pass high/medium impact variants not found in population databases
- Pass variants found in COSMIC or Clinvar provided they don't have two
additional reasons to filter (found in multiple external populations)
### Response:
def _calc_priority_filter(row, pops):
"""Calculate the priority filter based on external associated data.
- Pass high/medium impact variants not found in population databases
- Pass variants found in COSMIC or Clinvar provided they don't have two
additional reasons to filter (found in multiple external populations)
"""
filters = []
passes = []
passes.extend(_find_known(row))
filters.extend(_known_populations(row, pops))
if len(filters) == 0 or (len(passes) > 0 and len(filters) < 2):
passes.insert(0, "pass")
return ",".join(passes + filters) |
def _dialate_array(self, array, iterators):
"""
'Dialates' a to_process/to_protect array to include all subject and/or
visits if the pipeline contains any joins over the corresponding
iterators.
Parameters
----------
array : np.array[M, N]
The array to potentially dialate
iterators : set[str]
The iterators that the array should be dialated for
Returns
-------
dialated : np.array[M, N]
The dialated array
"""
if not iterators:
return array
dialated = np.copy(array)
if self.study.SUBJECT_ID in iterators:
# If we join over subjects we should include all subjects for every
# visit we want to process
dialated[:, dialated.any(axis=0)] = True
if self.study.VISIT_ID in iterators:
# If we join over visits we should include all visits for every
# subject we want to process
dialated[dialated.any(axis=1), :] = True
return dialated | 'Dialates' a to_process/to_protect array to include all subject and/or
visits if the pipeline contains any joins over the corresponding
iterators.
Parameters
----------
array : np.array[M, N]
The array to potentially dialate
iterators : set[str]
The iterators that the array should be dialated for
Returns
-------
dialated : np.array[M, N]
The dialated array | Below is the the instruction that describes the task:
### Input:
'Dialates' a to_process/to_protect array to include all subject and/or
visits if the pipeline contains any joins over the corresponding
iterators.
Parameters
----------
array : np.array[M, N]
The array to potentially dialate
iterators : set[str]
The iterators that the array should be dialated for
Returns
-------
dialated : np.array[M, N]
The dialated array
### Response:
def _dialate_array(self, array, iterators):
"""
'Dialates' a to_process/to_protect array to include all subject and/or
visits if the pipeline contains any joins over the corresponding
iterators.
Parameters
----------
array : np.array[M, N]
The array to potentially dialate
iterators : set[str]
The iterators that the array should be dialated for
Returns
-------
dialated : np.array[M, N]
The dialated array
"""
if not iterators:
return array
dialated = np.copy(array)
if self.study.SUBJECT_ID in iterators:
# If we join over subjects we should include all subjects for every
# visit we want to process
dialated[:, dialated.any(axis=0)] = True
if self.study.VISIT_ID in iterators:
# If we join over visits we should include all visits for every
# subject we want to process
dialated[dialated.any(axis=1), :] = True
return dialated |
def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
if idx < 0:
length = self.shape[0]
idx += length
if idx < 0:
raise IndexError('index %d is out of bounds for axis 0 with size %d'
% (idx-length, length))
check_call(_LIB.MXNDArrayAt(
self.handle, mx_uint(idx), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable) | Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32) | Below is the the instruction that describes the task:
### Input:
Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
### Response:
def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
if idx < 0:
length = self.shape[0]
idx += length
if idx < 0:
raise IndexError('index %d is out of bounds for axis 0 with size %d'
% (idx-length, length))
check_call(_LIB.MXNDArrayAt(
self.handle, mx_uint(idx), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable) |
def invalidate(*tables, **kwargs):
"""Invalidate the current generation for one or more tables. The arguments
can be either strings representing database table names or models. Pass in
kwarg ``using`` to set the database."""
backend = get_backend()
db = kwargs.get('using', 'default')
if backend._patched:
for t in map(resolve_table, tables):
backend.keyhandler.invalidate_table(t, db) | Invalidate the current generation for one or more tables. The arguments
can be either strings representing database table names or models. Pass in
kwarg ``using`` to set the database. | Below is the the instruction that describes the task:
### Input:
Invalidate the current generation for one or more tables. The arguments
can be either strings representing database table names or models. Pass in
kwarg ``using`` to set the database.
### Response:
def invalidate(*tables, **kwargs):
"""Invalidate the current generation for one or more tables. The arguments
can be either strings representing database table names or models. Pass in
kwarg ``using`` to set the database."""
backend = get_backend()
db = kwargs.get('using', 'default')
if backend._patched:
for t in map(resolve_table, tables):
backend.keyhandler.invalidate_table(t, db) |
def inet_ntop(af, addr):
"""Convert an IP address from binary form into text represenation"""
if af == socket.AF_INET:
return inet_ntoa(addr)
elif af == socket.AF_INET6:
# IPv6 addresses have 128bits (16 bytes)
if len(addr) != 16:
raise Exception("Illegal syntax for IP address")
parts = []
for left in [0, 2, 4, 6, 8, 10, 12, 14]:
try:
value = struct.unpack("!H", addr[left:left+2])[0]
hexstr = hex(value)[2:]
except TypeError:
raise Exception("Illegal syntax for IP address")
parts.append(hexstr.lstrip("0").lower())
result = b":".join(parts)
while b":::" in result:
result = result.replace(b":::", b"::")
# Leaving out leading and trailing zeros is only allowed with ::
if result.endswith(b":") and not result.endswith(b"::"):
result = result + b"0"
if result.startswith(b":") and not result.startswith(b"::"):
result = b"0" + result
return result
else:
raise Exception("Address family not supported yet") | Convert an IP address from binary form into text represenation | Below is the the instruction that describes the task:
### Input:
Convert an IP address from binary form into text represenation
### Response:
def inet_ntop(af, addr):
"""Convert an IP address from binary form into text represenation"""
if af == socket.AF_INET:
return inet_ntoa(addr)
elif af == socket.AF_INET6:
# IPv6 addresses have 128bits (16 bytes)
if len(addr) != 16:
raise Exception("Illegal syntax for IP address")
parts = []
for left in [0, 2, 4, 6, 8, 10, 12, 14]:
try:
value = struct.unpack("!H", addr[left:left+2])[0]
hexstr = hex(value)[2:]
except TypeError:
raise Exception("Illegal syntax for IP address")
parts.append(hexstr.lstrip("0").lower())
result = b":".join(parts)
while b":::" in result:
result = result.replace(b":::", b"::")
# Leaving out leading and trailing zeros is only allowed with ::
if result.endswith(b":") and not result.endswith(b"::"):
result = result + b"0"
if result.startswith(b":") and not result.startswith(b"::"):
result = b"0" + result
return result
else:
raise Exception("Address family not supported yet") |
def pip_install_requirements():
"""
Install on current installed virtualenv version from a pip bundle [dist/project name-version].zip or pip ``req.txt``|``requirements.txt``
or a env.pip_requirements list.
By default it will look for a zip bundle in the dist directory first then a requirements file.
The limitations of installing requirements are that you cannot point directly to packages
in your local filesystem. In this case you would bundle instead.
"""
if not version_state('mkvirtualenv'):
print env.host,'Error: Cannot run pip_install_requirements. A virtualenv is not created for this version. Run mkvirtualenv first'
return
if env.verbosity:
print env.host, 'PIP INSTALLING REQUIREMENTS:'
#Remove any pre-existing pip-log from any previous failed installation
pip_log_dir = '/'.join(['/home',env.user,'.pip'])
if exists(pip_log_dir): run('rm -f %s/*.txt'% pip_log_dir)
#determine what req files or bundle files we need to deploy
if not env.PIP_REQUIREMENTS:
req_files = {}.fromkeys(glob('req*'))
else:
req_files = {}.fromkeys(env.PIP_REQUIREMENTS)
for key in req_files:
bundle = ''.join([key.split('.')[0],'.zip'])
if os.path.exists(os.path.join('dist',bundle)):
req_files[key] = bundle
#determine the django version
file_patterns =''
django_version = get_version()
svn_version = django_version.find('SVN')
if svn_version > -1:
django_version = django_version[svn_version+4:]
django_req = ''.join(['-e svn+http://code.djangoproject.com/svn/django/trunk@',django_version,'#egg=Django'])
else:
other_builds = ['alpha','beta','rc']
for b in other_builds:
if b in django_version:
print "ERROR: Unsupported Django version", django_version
print "Define a DJANGO_REQUIREMENT pointing to the tar.gz for",django_version
print "and re-deploy, or use the official or SVN release of Django."
sys.exit(1)
django_req = ''.join(['Django==',django_version])
#if no requirements file exists create one
if not req_files:
f = open("requirements.txt","w+")
text = render_to_string('woven/requirements.txt', {'django':django_req})
f.write(text)
f.close()
if env.verbosity:
print "Created local requirements.txt"
req_files["requirements.txt"]=''
req_files_list = req_files.keys()
req_files_list.sort()
#patterns for bundles
if req_files: file_patterns = '|'.join([file_patterns,'req*.zip'])
#create a pip cache & src directory
cache = '/'.join([deployment_root(),'.pip','cache'])
src = '/'.join([deployment_root(),'.pip','src'])
deployed = mkdirs(cache)
deployed += mkdirs(src)
#deploy bundles and any local copy of django
local_dir = os.path.join(os.getcwd(),'dist')
remote_dir = '/'.join([deployment_root(),'env',env.project_fullname,'dist'])
if os.path.exists(local_dir):
if file_patterns: deployed += deploy_files(local_dir, remote_dir, pattern=file_patterns)
#deploy any requirement files
deployed += deploy_files(os.getcwd(), remote_dir, pattern = 'req*')
#install in the env
out = State(' '.join([env.host,'pip install requirements']))
python_path = '/'.join([deployment_root(),'env',env.project_fullname,'bin','python'])
with settings(warn_only=True):
with cd(remote_dir):
for req in req_files_list:
bundle = req_files[req]
if bundle: req=bundle
if env.verbosity:
print ' * installing',req
if '.zip' in req.lower():
install = run('pip install %s -q --environment=%s --log=/home/%s/.pip/%s_pip_log.txt'%
(req, python_path, env.user, req.replace('.','_')))
else:
install = run('pip install -q --environment=%s --src=%s --download-cache=%s --requirement=%s --log=/home/%s/.pip/%s_pip_log.txt'%
(python_path,src,cache,req, env.user,req.replace('.','_')))
if install.failed:
out.failed =True
out.stderr += ' '.join([env.host, "ERROR INSTALLING",req,'\n'])
out.object = deployed
if out.failed:
print out.stderr
print "Review the pip install logs at %s/.pip and re-deploy"% deployment_root()
sys.exit(1)
return out | Install on current installed virtualenv version from a pip bundle [dist/project name-version].zip or pip ``req.txt``|``requirements.txt``
or a env.pip_requirements list.
By default it will look for a zip bundle in the dist directory first then a requirements file.
The limitations of installing requirements are that you cannot point directly to packages
in your local filesystem. In this case you would bundle instead. | Below is the the instruction that describes the task:
### Input:
Install on current installed virtualenv version from a pip bundle [dist/project name-version].zip or pip ``req.txt``|``requirements.txt``
or a env.pip_requirements list.
By default it will look for a zip bundle in the dist directory first then a requirements file.
The limitations of installing requirements are that you cannot point directly to packages
in your local filesystem. In this case you would bundle instead.
### Response:
def pip_install_requirements():
"""
Install on current installed virtualenv version from a pip bundle [dist/project name-version].zip or pip ``req.txt``|``requirements.txt``
or a env.pip_requirements list.
By default it will look for a zip bundle in the dist directory first then a requirements file.
The limitations of installing requirements are that you cannot point directly to packages
in your local filesystem. In this case you would bundle instead.
"""
if not version_state('mkvirtualenv'):
print env.host,'Error: Cannot run pip_install_requirements. A virtualenv is not created for this version. Run mkvirtualenv first'
return
if env.verbosity:
print env.host, 'PIP INSTALLING REQUIREMENTS:'
#Remove any pre-existing pip-log from any previous failed installation
pip_log_dir = '/'.join(['/home',env.user,'.pip'])
if exists(pip_log_dir): run('rm -f %s/*.txt'% pip_log_dir)
#determine what req files or bundle files we need to deploy
if not env.PIP_REQUIREMENTS:
req_files = {}.fromkeys(glob('req*'))
else:
req_files = {}.fromkeys(env.PIP_REQUIREMENTS)
for key in req_files:
bundle = ''.join([key.split('.')[0],'.zip'])
if os.path.exists(os.path.join('dist',bundle)):
req_files[key] = bundle
#determine the django version
file_patterns =''
django_version = get_version()
svn_version = django_version.find('SVN')
if svn_version > -1:
django_version = django_version[svn_version+4:]
django_req = ''.join(['-e svn+http://code.djangoproject.com/svn/django/trunk@',django_version,'#egg=Django'])
else:
other_builds = ['alpha','beta','rc']
for b in other_builds:
if b in django_version:
print "ERROR: Unsupported Django version", django_version
print "Define a DJANGO_REQUIREMENT pointing to the tar.gz for",django_version
print "and re-deploy, or use the official or SVN release of Django."
sys.exit(1)
django_req = ''.join(['Django==',django_version])
#if no requirements file exists create one
if not req_files:
f = open("requirements.txt","w+")
text = render_to_string('woven/requirements.txt', {'django':django_req})
f.write(text)
f.close()
if env.verbosity:
print "Created local requirements.txt"
req_files["requirements.txt"]=''
req_files_list = req_files.keys()
req_files_list.sort()
#patterns for bundles
if req_files: file_patterns = '|'.join([file_patterns,'req*.zip'])
#create a pip cache & src directory
cache = '/'.join([deployment_root(),'.pip','cache'])
src = '/'.join([deployment_root(),'.pip','src'])
deployed = mkdirs(cache)
deployed += mkdirs(src)
#deploy bundles and any local copy of django
local_dir = os.path.join(os.getcwd(),'dist')
remote_dir = '/'.join([deployment_root(),'env',env.project_fullname,'dist'])
if os.path.exists(local_dir):
if file_patterns: deployed += deploy_files(local_dir, remote_dir, pattern=file_patterns)
#deploy any requirement files
deployed += deploy_files(os.getcwd(), remote_dir, pattern = 'req*')
#install in the env
out = State(' '.join([env.host,'pip install requirements']))
python_path = '/'.join([deployment_root(),'env',env.project_fullname,'bin','python'])
with settings(warn_only=True):
with cd(remote_dir):
for req in req_files_list:
bundle = req_files[req]
if bundle: req=bundle
if env.verbosity:
print ' * installing',req
if '.zip' in req.lower():
install = run('pip install %s -q --environment=%s --log=/home/%s/.pip/%s_pip_log.txt'%
(req, python_path, env.user, req.replace('.','_')))
else:
install = run('pip install -q --environment=%s --src=%s --download-cache=%s --requirement=%s --log=/home/%s/.pip/%s_pip_log.txt'%
(python_path,src,cache,req, env.user,req.replace('.','_')))
if install.failed:
out.failed =True
out.stderr += ' '.join([env.host, "ERROR INSTALLING",req,'\n'])
out.object = deployed
if out.failed:
print out.stderr
print "Review the pip install logs at %s/.pip and re-deploy"% deployment_root()
sys.exit(1)
return out |
def paranoidconfig(**kwargs):
"""A function decorator to set a local setting.
Settings may be set either globally (using
settings.Settings.set()) or locally using this decorator. The
setting name should be passed as a keyword argument, and the value
to assign the setting should be passed as the value. See
settings.Settings for the different settings which can be set.
Example usage:
| @returns(Number)
| @paranoidconfig(enabled=False)
| def slow_function():
| ...
"""
def _decorator(func):
for k,v in kwargs.items():
Settings._set(k, v, function=func)
return _wrap(func)
return _decorator | A function decorator to set a local setting.
Settings may be set either globally (using
settings.Settings.set()) or locally using this decorator. The
setting name should be passed as a keyword argument, and the value
to assign the setting should be passed as the value. See
settings.Settings for the different settings which can be set.
Example usage:
| @returns(Number)
| @paranoidconfig(enabled=False)
| def slow_function():
| ... | Below is the the instruction that describes the task:
### Input:
A function decorator to set a local setting.
Settings may be set either globally (using
settings.Settings.set()) or locally using this decorator. The
setting name should be passed as a keyword argument, and the value
to assign the setting should be passed as the value. See
settings.Settings for the different settings which can be set.
Example usage:
| @returns(Number)
| @paranoidconfig(enabled=False)
| def slow_function():
| ...
### Response:
def paranoidconfig(**kwargs):
"""A function decorator to set a local setting.
Settings may be set either globally (using
settings.Settings.set()) or locally using this decorator. The
setting name should be passed as a keyword argument, and the value
to assign the setting should be passed as the value. See
settings.Settings for the different settings which can be set.
Example usage:
| @returns(Number)
| @paranoidconfig(enabled=False)
| def slow_function():
| ...
"""
def _decorator(func):
for k,v in kwargs.items():
Settings._set(k, v, function=func)
return _wrap(func)
return _decorator |
def emit(self, record):
"""Call father's emit, but salute first (just once)."""
if not self._already_saluted:
self._already_saluted = True
self._logger.info(SALUTATION)
super().emit(record) | Call father's emit, but salute first (just once). | Below is the the instruction that describes the task:
### Input:
Call father's emit, but salute first (just once).
### Response:
def emit(self, record):
"""Call father's emit, but salute first (just once)."""
if not self._already_saluted:
self._already_saluted = True
self._logger.info(SALUTATION)
super().emit(record) |
def csv_dict(d):
"""Format dict to a string with comma-separated values.
"""
if len(d) == 0:
return "{}"
return "{" + ', '.join(["'{}': {}".format(k, quotable(v))
for k, v in d.items()]) + "}" | Format dict to a string with comma-separated values. | Below is the the instruction that describes the task:
### Input:
Format dict to a string with comma-separated values.
### Response:
def csv_dict(d):
"""Format dict to a string with comma-separated values.
"""
if len(d) == 0:
return "{}"
return "{" + ', '.join(["'{}': {}".format(k, quotable(v))
for k, v in d.items()]) + "}" |
def release():
"""Release a new version"""
release_check()
build()
print("Releasing %s version %s." % (env.projname, env.version))
local("git tag %s" % env.version)
local('gpg --detach-sign --armor dist/coursera-*.tar.gz*')
local('twine upload dist/coursera-*.tar.gz*')
local("git push")
local("git push --tags") | Release a new version | Below is the the instruction that describes the task:
### Input:
Release a new version
### Response:
def release():
"""Release a new version"""
release_check()
build()
print("Releasing %s version %s." % (env.projname, env.version))
local("git tag %s" % env.version)
local('gpg --detach-sign --armor dist/coursera-*.tar.gz*')
local('twine upload dist/coursera-*.tar.gz*')
local("git push")
local("git push --tags") |
def get_preflist(self, bucket, key):
"""
Get the preflist for a bucket/key
:param bucket: Riak Bucket
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: Riak Key
:type key: string
:rtype: list of dicts
"""
if not self.preflists():
raise NotImplementedError("fetching preflists is not supported.")
bucket_type = self._get_bucket_type(bucket.bucket_type)
url = self.preflist_path(bucket.name, key, bucket_type=bucket_type)
status, headers, body = self._request('GET', url)
if status == 200:
preflist = json.loads(bytes_to_str(body))
return preflist['preflist']
else:
raise RiakError('Error getting bucket/key preflist.') | Get the preflist for a bucket/key
:param bucket: Riak Bucket
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: Riak Key
:type key: string
:rtype: list of dicts | Below is the the instruction that describes the task:
### Input:
Get the preflist for a bucket/key
:param bucket: Riak Bucket
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: Riak Key
:type key: string
:rtype: list of dicts
### Response:
def get_preflist(self, bucket, key):
"""
Get the preflist for a bucket/key
:param bucket: Riak Bucket
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: Riak Key
:type key: string
:rtype: list of dicts
"""
if not self.preflists():
raise NotImplementedError("fetching preflists is not supported.")
bucket_type = self._get_bucket_type(bucket.bucket_type)
url = self.preflist_path(bucket.name, key, bucket_type=bucket_type)
status, headers, body = self._request('GET', url)
if status == 200:
preflist = json.loads(bytes_to_str(body))
return preflist['preflist']
else:
raise RiakError('Error getting bucket/key preflist.') |
def is_changed():
""" Checks if current project has any noncommited changes. """
executed, changed_lines = execute_git('status --porcelain', output=False)
merge_not_finished = mod_path.exists('.git/MERGE_HEAD')
return changed_lines.strip() or merge_not_finished | Checks if current project has any noncommited changes. | Below is the the instruction that describes the task:
### Input:
Checks if current project has any noncommited changes.
### Response:
def is_changed():
""" Checks if current project has any noncommited changes. """
executed, changed_lines = execute_git('status --porcelain', output=False)
merge_not_finished = mod_path.exists('.git/MERGE_HEAD')
return changed_lines.strip() or merge_not_finished |
def user_disable_throw_rest_endpoint(self, username, url='rest/scriptrunner/latest/custom/disableUser',
param='userName'):
"""The disable method throw own rest enpoint"""
url = "{}?{}={}".format(url, param, username)
return self.get(path=url) | The disable method throw own rest enpoint | Below is the the instruction that describes the task:
### Input:
The disable method throw own rest enpoint
### Response:
def user_disable_throw_rest_endpoint(self, username, url='rest/scriptrunner/latest/custom/disableUser',
param='userName'):
"""The disable method throw own rest enpoint"""
url = "{}?{}={}".format(url, param, username)
return self.get(path=url) |
def seebeck_spb(eta,Lambda=0.5):
"""
Seebeck analytic formula in the single parabolic model
"""
from fdint import fdk
return constants.k/constants.e * ((2. + Lambda) * fdk( 1.+ Lambda, eta)/
((1.+Lambda)*fdk(Lambda, eta))- eta) * 1e+6 | Seebeck analytic formula in the single parabolic model | Below is the the instruction that describes the task:
### Input:
Seebeck analytic formula in the single parabolic model
### Response:
def seebeck_spb(eta,Lambda=0.5):
"""
Seebeck analytic formula in the single parabolic model
"""
from fdint import fdk
return constants.k/constants.e * ((2. + Lambda) * fdk( 1.+ Lambda, eta)/
((1.+Lambda)*fdk(Lambda, eta))- eta) * 1e+6 |
def htmlDocContentDumpOutput(self, cur, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding) | Dump an HTML document. Formating return/spaces are added. | Below is the the instruction that describes the task:
### Input:
Dump an HTML document. Formating return/spaces are added.
### Response:
def htmlDocContentDumpOutput(self, cur, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding) |
def head(line, n: int):
"""returns the first `n` lines"""
global counter
counter += 1
if counter > n:
raise cbox.Stop() # can also raise StopIteration()
return line | returns the first `n` lines | Below is the the instruction that describes the task:
### Input:
returns the first `n` lines
### Response:
def head(line, n: int):
"""returns the first `n` lines"""
global counter
counter += 1
if counter > n:
raise cbox.Stop() # can also raise StopIteration()
return line |
def _compose_chapters(self):
"""
Creates a chapters
and appends them to list
"""
for count in range(self.chapter_count):
chapter_num = count + 1
c = Chapter(self.markov, chapter_num)
self.chapters.append(c) | Creates a chapters
and appends them to list | Below is the the instruction that describes the task:
### Input:
Creates a chapters
and appends them to list
### Response:
def _compose_chapters(self):
"""
Creates a chapters
and appends them to list
"""
for count in range(self.chapter_count):
chapter_num = count + 1
c = Chapter(self.markov, chapter_num)
self.chapters.append(c) |
def validate_email(addr):
"""Validate an email address.
This function raises ``ValueError`` if the email address is not valid.
>>> validate_email('foo@bar.com')
'foo@bar.com'
>>> validate_email('foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com
"""
if '@' not in addr:
raise ValueError('Invalid email address: %s' % addr)
node, domain = addr.split('@', 1)
try:
domain = idna.encode(force_text(domain))
except idna.core.IDNAError:
raise ValueError('Invalid domain: %s' % domain)
return '%s@%s' % (node, force_text(domain)) | Validate an email address.
This function raises ``ValueError`` if the email address is not valid.
>>> validate_email('foo@bar.com')
'foo@bar.com'
>>> validate_email('foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com | Below is the the instruction that describes the task:
### Input:
Validate an email address.
This function raises ``ValueError`` if the email address is not valid.
>>> validate_email('foo@bar.com')
'foo@bar.com'
>>> validate_email('foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com
### Response:
def validate_email(addr):
"""Validate an email address.
This function raises ``ValueError`` if the email address is not valid.
>>> validate_email('foo@bar.com')
'foo@bar.com'
>>> validate_email('foo@bar com')
Traceback (most recent call last):
...
ValueError: Invalid domain: bar com
"""
if '@' not in addr:
raise ValueError('Invalid email address: %s' % addr)
node, domain = addr.split('@', 1)
try:
domain = idna.encode(force_text(domain))
except idna.core.IDNAError:
raise ValueError('Invalid domain: %s' % domain)
return '%s@%s' % (node, force_text(domain)) |
def to_reverse(self):
"""Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
"""
if self.v == 4:
return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa'])
else:
return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa']) | Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa | Below is the the instruction that describes the task:
### Input:
Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
### Response:
def to_reverse(self):
"""Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
"""
if self.v == 4:
return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa'])
else:
return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa']) |
def security_code_date(self):
""" Date of user's security code update """
return sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime(2000, 1, 1),
server_default="2000-01-01 01:01",
) | Date of user's security code update | Below is the the instruction that describes the task:
### Input:
Date of user's security code update
### Response:
def security_code_date(self):
""" Date of user's security code update """
return sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime(2000, 1, 1),
server_default="2000-01-01 01:01",
) |
def _pstore16(ins):
""" Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
"""
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
size = 1
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
if is_int(value):
output = []
else:
output = _16bit_oper(value)
ix_changed = not (-128 + size <= I <= 127 - size) # Offset > 127 bytes. Need to change IX
if indirect:
if is_int(value):
output.append('ld hl, %i' % int16(value))
output.append('ld bc, %i' % I)
output.append('call __PISTORE16')
REQUIRES.add('istore16.asm')
return output
# direct store
if ix_changed: # more than 1 byte
if not is_int(value):
output.append('ex de, hl')
output.append('push ix')
output.append('pop hl')
output.append('ld bc, %i' % I)
output.append('add hl, bc')
if is_int(value):
v = int16(value)
output.append('ld (hl), %i' % (v & 0xFF))
output.append('inc hl')
output.append('ld (hl), %i' % (v >> 8))
return output
else:
output.append('ld (hl), e')
output.append('inc hl')
output.append('ld (hl), d')
return output
if is_int(value):
v = int16(value)
output.append('ld (ix%+i), %i' % (I, v & 0xFF))
output.append('ld (ix%+i), %i' % (I + 1, v >> 8))
else:
output.append('ld (ix%+i), l' % I)
output.append('ld (ix%+i), h' % (I + 1))
return output | Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer. | Below is the the instruction that describes the task:
### Input:
Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
### Response:
def _pstore16(ins):
""" Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
"""
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
size = 1
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
if is_int(value):
output = []
else:
output = _16bit_oper(value)
ix_changed = not (-128 + size <= I <= 127 - size) # Offset > 127 bytes. Need to change IX
if indirect:
if is_int(value):
output.append('ld hl, %i' % int16(value))
output.append('ld bc, %i' % I)
output.append('call __PISTORE16')
REQUIRES.add('istore16.asm')
return output
# direct store
if ix_changed: # more than 1 byte
if not is_int(value):
output.append('ex de, hl')
output.append('push ix')
output.append('pop hl')
output.append('ld bc, %i' % I)
output.append('add hl, bc')
if is_int(value):
v = int16(value)
output.append('ld (hl), %i' % (v & 0xFF))
output.append('inc hl')
output.append('ld (hl), %i' % (v >> 8))
return output
else:
output.append('ld (hl), e')
output.append('inc hl')
output.append('ld (hl), d')
return output
if is_int(value):
v = int16(value)
output.append('ld (ix%+i), %i' % (I, v & 0xFF))
output.append('ld (ix%+i), %i' % (I + 1, v >> 8))
else:
output.append('ld (ix%+i), l' % I)
output.append('ld (ix%+i), h' % (I + 1))
return output |
def inform_of_short_bin_name(cls, binary):
"""Historically, we had "devassistant" binary, but we chose to go with
shorter "da". We still allow "devassistant", but we recommend using "da".
"""
binary = os.path.splitext(os.path.basename(binary))[0]
if binary != 'da':
msg = '"da" is the preffered way of running "{binary}".'.format(binary=binary)
logger.logger.info('*' * len(msg))
logger.logger.info(msg)
logger.logger.info('*' * len(msg)) | Historically, we had "devassistant" binary, but we chose to go with
shorter "da". We still allow "devassistant", but we recommend using "da". | Below is the the instruction that describes the task:
### Input:
Historically, we had "devassistant" binary, but we chose to go with
shorter "da". We still allow "devassistant", but we recommend using "da".
### Response:
def inform_of_short_bin_name(cls, binary):
"""Historically, we had "devassistant" binary, but we chose to go with
shorter "da". We still allow "devassistant", but we recommend using "da".
"""
binary = os.path.splitext(os.path.basename(binary))[0]
if binary != 'da':
msg = '"da" is the preffered way of running "{binary}".'.format(binary=binary)
logger.logger.info('*' * len(msg))
logger.logger.info(msg)
logger.logger.info('*' * len(msg)) |
def install_pyenv(name, user=None):
'''
Install pyenv if not installed. Allows you to require pyenv be installed
prior to installing the plugins. Useful if you want to install pyenv
plugins via the git or file modules and need them installed before
installing any rubies.
Use the pyenv.root configuration option to set the path for pyenv if you
want a system wide install that is not in a user home dir.
user: None
The user to run pyenv as.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['comment'] = 'pyenv is set to be installed'
return ret
return _check_and_install_python(ret, user) | Install pyenv if not installed. Allows you to require pyenv be installed
prior to installing the plugins. Useful if you want to install pyenv
plugins via the git or file modules and need them installed before
installing any rubies.
Use the pyenv.root configuration option to set the path for pyenv if you
want a system wide install that is not in a user home dir.
user: None
The user to run pyenv as. | Below is the the instruction that describes the task:
### Input:
Install pyenv if not installed. Allows you to require pyenv be installed
prior to installing the plugins. Useful if you want to install pyenv
plugins via the git or file modules and need them installed before
installing any rubies.
Use the pyenv.root configuration option to set the path for pyenv if you
want a system wide install that is not in a user home dir.
user: None
The user to run pyenv as.
### Response:
def install_pyenv(name, user=None):
'''
Install pyenv if not installed. Allows you to require pyenv be installed
prior to installing the plugins. Useful if you want to install pyenv
plugins via the git or file modules and need them installed before
installing any rubies.
Use the pyenv.root configuration option to set the path for pyenv if you
want a system wide install that is not in a user home dir.
user: None
The user to run pyenv as.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['comment'] = 'pyenv is set to be installed'
return ret
return _check_and_install_python(ret, user) |
def _get_task_statuses(task_ids, cluster):
"""
Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
"""
response = client.describe_tasks(tasks=task_ids, cluster=cluster)
# Error checking
if response['failures'] != []:
raise Exception('There were some failures:\n{0}'.format(
response['failures']))
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Task status request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return [t['lastStatus'] for t in response['tasks']] | Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids | Below is the the instruction that describes the task:
### Input:
Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
### Response:
def _get_task_statuses(task_ids, cluster):
"""
Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
"""
response = client.describe_tasks(tasks=task_ids, cluster=cluster)
# Error checking
if response['failures'] != []:
raise Exception('There were some failures:\n{0}'.format(
response['failures']))
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Task status request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return [t['lastStatus'] for t in response['tasks']] |
def main(argv=None):
"""ben-umb entry point"""
arguments = cli_common(__doc__, argv=argv)
driver = CampaignDriver(arguments['CAMPAIGN-DIR'], expandcampvars=False)
driver(no_exec=True)
if argv is not None:
return driver | ben-umb entry point | Below is the the instruction that describes the task:
### Input:
ben-umb entry point
### Response:
def main(argv=None):
"""ben-umb entry point"""
arguments = cli_common(__doc__, argv=argv)
driver = CampaignDriver(arguments['CAMPAIGN-DIR'], expandcampvars=False)
driver(no_exec=True)
if argv is not None:
return driver |
def write_how_many(self, file):
""" Writes component numbers to a table.
"""
report = CaseReport(self.case)
# Map component labels to attribute names
components = [("Bus", "n_buses"), ("Generator", "n_generators"),
("Committed Generator", "n_online_generators"),
("Load", "n_loads"), ("Fixed Load", "n_fixed_loads"),
("Despatchable Load", "n_online_vloads"), ("Shunt", "n_shunts"),
("Branch", "n_branches"), ("Transformer", "n_transformers"),
("Inter-tie", "n_interties"), ("Area", "n_areas")
]
# Column 1 width
longest = max([len(c[0]) for c in components])
col1_header = "Object"
col1_width = longest
col2_header = "Quantity"
col2_width = len(col2_header)
# Row separator
sep = "="*col1_width + " " + "="*col2_width + "\n"
# Row headers
file.write(sep)
file.write(col1_header.center(col1_width))
file.write(" ")
file.write("%s\n" % col2_header.center(col2_width))
file.write(sep)
# Rows
for label, attr in components:
col2_value = str(getattr(report, attr))
file.write("%s %s\n" %
(label.ljust(col1_width), col2_value.rjust(col2_width)))
else:
file.write(sep)
file.write("\n")
del report | Writes component numbers to a table. | Below is the the instruction that describes the task:
### Input:
Writes component numbers to a table.
### Response:
def write_how_many(self, file):
""" Writes component numbers to a table.
"""
report = CaseReport(self.case)
# Map component labels to attribute names
components = [("Bus", "n_buses"), ("Generator", "n_generators"),
("Committed Generator", "n_online_generators"),
("Load", "n_loads"), ("Fixed Load", "n_fixed_loads"),
("Despatchable Load", "n_online_vloads"), ("Shunt", "n_shunts"),
("Branch", "n_branches"), ("Transformer", "n_transformers"),
("Inter-tie", "n_interties"), ("Area", "n_areas")
]
# Column 1 width
longest = max([len(c[0]) for c in components])
col1_header = "Object"
col1_width = longest
col2_header = "Quantity"
col2_width = len(col2_header)
# Row separator
sep = "="*col1_width + " " + "="*col2_width + "\n"
# Row headers
file.write(sep)
file.write(col1_header.center(col1_width))
file.write(" ")
file.write("%s\n" % col2_header.center(col2_width))
file.write(sep)
# Rows
for label, attr in components:
col2_value = str(getattr(report, attr))
file.write("%s %s\n" %
(label.ljust(col1_width), col2_value.rjust(col2_width)))
else:
file.write(sep)
file.write("\n")
del report |
def K(self, X, X2=None, presliced=False):
"""
Calculates the kernel matrix K(X, X2) (or K(X, X) if X2 is None).
Handles the slicing as well as scaling and computes k(x, x') = k(r),
where r² = ((x - x')/lengthscales)².
Internally, this calls self.K_r2(r²), which in turn computes the
square-root and calls self.K_r(r). Classes implementing stationary
kernels can either overwrite `K_r2(r2)` if they only depend on the
squared distance, or `K_r(r)` if they need the actual radial distance.
"""
if not presliced:
X, X2 = self._slice(X, X2)
return self.K_r2(self.scaled_square_dist(X, X2)) | Calculates the kernel matrix K(X, X2) (or K(X, X) if X2 is None).
Handles the slicing as well as scaling and computes k(x, x') = k(r),
where r² = ((x - x')/lengthscales)².
Internally, this calls self.K_r2(r²), which in turn computes the
square-root and calls self.K_r(r). Classes implementing stationary
kernels can either overwrite `K_r2(r2)` if they only depend on the
squared distance, or `K_r(r)` if they need the actual radial distance. | Below is the the instruction that describes the task:
### Input:
Calculates the kernel matrix K(X, X2) (or K(X, X) if X2 is None).
Handles the slicing as well as scaling and computes k(x, x') = k(r),
where r² = ((x - x')/lengthscales)².
Internally, this calls self.K_r2(r²), which in turn computes the
square-root and calls self.K_r(r). Classes implementing stationary
kernels can either overwrite `K_r2(r2)` if they only depend on the
squared distance, or `K_r(r)` if they need the actual radial distance.
### Response:
def K(self, X, X2=None, presliced=False):
"""
Calculates the kernel matrix K(X, X2) (or K(X, X) if X2 is None).
Handles the slicing as well as scaling and computes k(x, x') = k(r),
where r² = ((x - x')/lengthscales)².
Internally, this calls self.K_r2(r²), which in turn computes the
square-root and calls self.K_r(r). Classes implementing stationary
kernels can either overwrite `K_r2(r2)` if they only depend on the
squared distance, or `K_r(r)` if they need the actual radial distance.
"""
if not presliced:
X, X2 = self._slice(X, X2)
return self.K_r2(self.scaled_square_dist(X, X2)) |
async def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
try:
resp = await self.get_object(Bucket=Bucket, Key=Key)
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
# Convert to 404 so it looks the same when boto3.download_file fails
raise ClientError({'Error': {'Code': '404', 'Message': 'Not Found'}}, 'HeadObject')
raise
body = resp['Body']
while True:
data = await body.read(4096)
if data == b'':
break
if Callback:
try:
Callback(len(data))
except: # noqa: E722
pass
Fileobj.write(data)
await asyncio.sleep(0.0) | Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download. | Below is the the instruction that describes the task:
### Input:
Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
### Response:
async def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
try:
resp = await self.get_object(Bucket=Bucket, Key=Key)
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
# Convert to 404 so it looks the same when boto3.download_file fails
raise ClientError({'Error': {'Code': '404', 'Message': 'Not Found'}}, 'HeadObject')
raise
body = resp['Body']
while True:
data = await body.read(4096)
if data == b'':
break
if Callback:
try:
Callback(len(data))
except: # noqa: E722
pass
Fileobj.write(data)
await asyncio.sleep(0.0) |
def simulate(model, dr, N=1, T=40, s0=None, i0=None, m0=None,
driving_process=None, seed=42, stochastic=True):
'''
Simulate a model using the specified decision rule.
Parameters
----------
model: NumericModel
dr: decision rule
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables.
'''
if isinstance(dr, AlgoResult):
dr = dr.dr
calib = model.calibration
parms = numpy.array(calib['parameters'])
if s0 is None:
s0 = calib['states']
n_x = len(model.symbols["controls"])
n_s = len(model.symbols["states"])
s_simul = numpy.zeros((T, N, n_s))
x_simul = numpy.zeros((T, N, n_x))
s_simul[0, :, :] = s0[None, :]
# are we simulating a markov chain or a continuous process ?
if driving_process is not None:
m_simul = driving_process
sim_type = 'continuous'
m0 = m_simul[0,:,:]
x_simul[0,:,:] = dr.eval_ms(m0, s0)
else:
if isinstance(dr.exo_grid, UnstructuredGrid):
if i0 is None:
i0 = 0
dp = model.exogenous.discretize() ## TODO (nothing guarantee the processes match)
m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)
i_simul = find_index(m_simul, dp.values)
sim_type = 'discrete'
m0 = dp.node(i0)
x0 = dr.eval_is(i0, s0)
else:
process = model.exogenous ## TODO (nothing guarantee the processes match)
m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)
sim_type = 'continuous'
if m0 is None:
m0 = model.calibration["exogenous"]
x0 = dr.eval_ms(m0, s0)
x_simul[0, :, :] = x0[None, :]
fun = model.functions
f = model.functions['arbitrage']
g = model.functions['transition']
numpy.random.seed(seed)
from dolo.misc.dprint import dprint
mp = m0
for i in range(T):
m = m_simul[i,:,:]
s = s_simul[i,:,:]
if sim_type=='discrete':
i_m = i_simul[i,:]
xx = [dr.eval_is(i_m[ii], s[ii,:]) for ii in range(s.shape[0])]
x = np.row_stack(xx)
else:
x = dr.eval_ms(m, s)
x_simul[i,:,:] = x
ss = g(mp, s, x, m, parms)
if i < T-1:
s_simul[i + 1, :, :] = ss
mp = m
if 'auxiliary' not in fun: # TODO: find a better test than this
l = [s_simul, x_simul]
varnames = model.symbols['states'] + model.symbols['controls']
else:
aux = fun['auxiliary']
a_simul = aux(
m_simul.reshape((N * T, -1)),
s_simul.reshape((N * T, -1)),
x_simul.reshape((N * T, -1)), parms)
a_simul = a_simul.reshape(T, N, -1)
l = [m_simul, s_simul, x_simul, a_simul]
varnames = model.symbols['exogenous'] + model.symbols['states'] + model.symbols[
'controls'] + model.symbols['auxiliaries']
simul = numpy.concatenate(l, axis=2)
import xarray as xr
data = xr.DataArray(
simul,
dims=['T','N','V'],
coords={'T': range(T), 'N': range(N), 'V': varnames}
)
return data | Simulate a model using the specified decision rule.
Parameters
----------
model: NumericModel
dr: decision rule
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables. | Below is the the instruction that describes the task:
### Input:
Simulate a model using the specified decision rule.
Parameters
----------
model: NumericModel
dr: decision rule
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables.
### Response:
def simulate(model, dr, N=1, T=40, s0=None, i0=None, m0=None,
driving_process=None, seed=42, stochastic=True):
'''
Simulate a model using the specified decision rule.
Parameters
----------
model: NumericModel
dr: decision rule
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables.
'''
if isinstance(dr, AlgoResult):
dr = dr.dr
calib = model.calibration
parms = numpy.array(calib['parameters'])
if s0 is None:
s0 = calib['states']
n_x = len(model.symbols["controls"])
n_s = len(model.symbols["states"])
s_simul = numpy.zeros((T, N, n_s))
x_simul = numpy.zeros((T, N, n_x))
s_simul[0, :, :] = s0[None, :]
# are we simulating a markov chain or a continuous process ?
if driving_process is not None:
m_simul = driving_process
sim_type = 'continuous'
m0 = m_simul[0,:,:]
x_simul[0,:,:] = dr.eval_ms(m0, s0)
else:
if isinstance(dr.exo_grid, UnstructuredGrid):
if i0 is None:
i0 = 0
dp = model.exogenous.discretize() ## TODO (nothing guarantee the processes match)
m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)
i_simul = find_index(m_simul, dp.values)
sim_type = 'discrete'
m0 = dp.node(i0)
x0 = dr.eval_is(i0, s0)
else:
process = model.exogenous ## TODO (nothing guarantee the processes match)
m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)
sim_type = 'continuous'
if m0 is None:
m0 = model.calibration["exogenous"]
x0 = dr.eval_ms(m0, s0)
x_simul[0, :, :] = x0[None, :]
fun = model.functions
f = model.functions['arbitrage']
g = model.functions['transition']
numpy.random.seed(seed)
from dolo.misc.dprint import dprint
mp = m0
for i in range(T):
m = m_simul[i,:,:]
s = s_simul[i,:,:]
if sim_type=='discrete':
i_m = i_simul[i,:]
xx = [dr.eval_is(i_m[ii], s[ii,:]) for ii in range(s.shape[0])]
x = np.row_stack(xx)
else:
x = dr.eval_ms(m, s)
x_simul[i,:,:] = x
ss = g(mp, s, x, m, parms)
if i < T-1:
s_simul[i + 1, :, :] = ss
mp = m
if 'auxiliary' not in fun: # TODO: find a better test than this
l = [s_simul, x_simul]
varnames = model.symbols['states'] + model.symbols['controls']
else:
aux = fun['auxiliary']
a_simul = aux(
m_simul.reshape((N * T, -1)),
s_simul.reshape((N * T, -1)),
x_simul.reshape((N * T, -1)), parms)
a_simul = a_simul.reshape(T, N, -1)
l = [m_simul, s_simul, x_simul, a_simul]
varnames = model.symbols['exogenous'] + model.symbols['states'] + model.symbols[
'controls'] + model.symbols['auxiliaries']
simul = numpy.concatenate(l, axis=2)
import xarray as xr
data = xr.DataArray(
simul,
dims=['T','N','V'],
coords={'T': range(T), 'N': range(N), 'V': varnames}
)
return data |
def emit(self,rlen=150):
"""Emit a read based on a source sequence"""
source_tx = self._source.emit()
source_read = self._cutter.cut(source_tx)
if self._flip and self.options.rand.random() < 0.5: source_read = source_read.rc()
srname = self.options.rand.uuid4()
seqfull = FASTQ('@'+self.options.rand.uuid4()+"\tlong\n"+str(source_read.sequence)+"\n+\n"+'I'*source_read.sequence.length+"\n")
seqperm1 = seqfull.copy()
seqperm2 = seqfull.copy()
for e in self.errors:
seqperm1 = e.permute(seqperm1)
seqperm2 = e.permute(seqperm2)
sleft = seqperm1[0:rlen]
sleft = FASTQ('@'+sleft.name+"\tleft\n"+sleft.sequence+"\n+\n"+sleft.qual+"\n")
sright = seqperm2.rc()[0:rlen]
sright = FASTQ('@'+sright.name+"\tright\n"+sright.sequence+"\n+\n"+sright.qual+"\n")
emission = TranscriptEmission(source_tx,
Source(source_read,
source_read.slice_sequence(0,rlen),
source_read.rc().slice_sequence(0,rlen)),
Read(seqperm1,
sleft,
sright
))
return emission | Emit a read based on a source sequence | Below is the the instruction that describes the task:
### Input:
Emit a read based on a source sequence
### Response:
def emit(self,rlen=150):
"""Emit a read based on a source sequence"""
source_tx = self._source.emit()
source_read = self._cutter.cut(source_tx)
if self._flip and self.options.rand.random() < 0.5: source_read = source_read.rc()
srname = self.options.rand.uuid4()
seqfull = FASTQ('@'+self.options.rand.uuid4()+"\tlong\n"+str(source_read.sequence)+"\n+\n"+'I'*source_read.sequence.length+"\n")
seqperm1 = seqfull.copy()
seqperm2 = seqfull.copy()
for e in self.errors:
seqperm1 = e.permute(seqperm1)
seqperm2 = e.permute(seqperm2)
sleft = seqperm1[0:rlen]
sleft = FASTQ('@'+sleft.name+"\tleft\n"+sleft.sequence+"\n+\n"+sleft.qual+"\n")
sright = seqperm2.rc()[0:rlen]
sright = FASTQ('@'+sright.name+"\tright\n"+sright.sequence+"\n+\n"+sright.qual+"\n")
emission = TranscriptEmission(source_tx,
Source(source_read,
source_read.slice_sequence(0,rlen),
source_read.rc().slice_sequence(0,rlen)),
Read(seqperm1,
sleft,
sright
))
return emission |
def AQLQuery(self, query, batchSize = 100, rawResults = False, bindVars = {}, options = {}, count = False, fullCount = False,
json_encoder = None, **moreArgs) :
"""Set rawResults = True if you want the query to return dictionnaries instead of Document objects.
You can use **moreArgs to pass more arguments supported by the api, such as ttl=60 (time to live)"""
return AQLQuery(self, query, rawResults = rawResults, batchSize = batchSize, bindVars = bindVars, options = options, count = count, fullCount = fullCount,
json_encoder = json_encoder, **moreArgs) | Set rawResults = True if you want the query to return dictionnaries instead of Document objects.
You can use **moreArgs to pass more arguments supported by the api, such as ttl=60 (time to live) | Below is the the instruction that describes the task:
### Input:
Set rawResults = True if you want the query to return dictionnaries instead of Document objects.
You can use **moreArgs to pass more arguments supported by the api, such as ttl=60 (time to live)
### Response:
def AQLQuery(self, query, batchSize = 100, rawResults = False, bindVars = {}, options = {}, count = False, fullCount = False,
json_encoder = None, **moreArgs) :
"""Set rawResults = True if you want the query to return dictionnaries instead of Document objects.
You can use **moreArgs to pass more arguments supported by the api, such as ttl=60 (time to live)"""
return AQLQuery(self, query, rawResults = rawResults, batchSize = batchSize, bindVars = bindVars, options = options, count = count, fullCount = fullCount,
json_encoder = json_encoder, **moreArgs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.