code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def pwgen(length=None):
"""Generate a random pasword."""
if length is None:
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
# actual password
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
|
Generate a random pasword.
|
def set_urlroute_rules(rules=None):
"""
rules should be (pattern, replace)
e.g.: ('/admin', '/demo')
"""
global __url_route_rules__
__url_route_rules__ = []
for k, v in (rules or {}).values():
__url_route_rules__.append((re.compile(k), v))
|
rules should be (pattern, replace)
e.g.: ('/admin', '/demo')
|
def fcs(bits):
'''
Append running bitwise FCS CRC checksum to end of generator
'''
fcs = FCS()
for bit in bits:
yield bit
fcs.update_bit(bit)
# test = bitarray()
# for byte in (digest & 0xff, digest >> 8):
# print byte
# for i in range(8):
# b = (byte >> i) & 1 == 1
# test.append(b)
# yield b
# append fcs digest to bit stream
# n.b. wire format is little-bit-endianness in addition to little-endian
digest = bitarray(endian="little")
digest.frombytes(fcs.digest())
for bit in digest:
yield bit
|
Append running bitwise FCS CRC checksum to end of generator
|
def getBoundsColor(self, nNumOutputColors, flCollisionBoundsFadeDistance):
"""Get the current chaperone bounds draw color and brightness"""
fn = self.function_table.getBoundsColor
pOutputColorArray = HmdColor_t()
pOutputCameraColor = HmdColor_t()
fn(byref(pOutputColorArray), nNumOutputColors, flCollisionBoundsFadeDistance, byref(pOutputCameraColor))
return pOutputColorArray, pOutputCameraColor
|
Get the current chaperone bounds draw color and brightness
|
def query_tag_values(self, metric_type=None, **tags):
"""
Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
"""
tagql = self._transform_tags(**tags)
return self._get(self._get_metrics_tags_url(self._get_url(metric_type)) + '/{}'.format(tagql))
|
Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
|
def from_exception(cls, exc):
"""
Construct a new :class:`Error` payload from the attributes of the
exception.
:param exc: The exception to convert
:type exc: :class:`aioxmpp.errors.XMPPError`
:result: Newly constructed error payload
:rtype: :class:`Error`
.. versionchanged:: 0.10
The :attr:`aioxmpp.XMPPError.application_defined_condition` is now
taken over into the result.
"""
result = cls(
condition=exc.condition,
type_=exc.TYPE,
text=exc.text
)
result.application_condition = exc.application_defined_condition
return result
|
Construct a new :class:`Error` payload from the attributes of the
exception.
:param exc: The exception to convert
:type exc: :class:`aioxmpp.errors.XMPPError`
:result: Newly constructed error payload
:rtype: :class:`Error`
.. versionchanged:: 0.10
The :attr:`aioxmpp.XMPPError.application_defined_condition` is now
taken over into the result.
|
def _get_tensor_like_attributes():
"""Returns `Tensor` attributes related to shape and Python builtins."""
# Enable "Tensor semantics" for distributions.
# See tensorflow/python/framework/ops.py `class Tensor` for details.
attrs = dict()
# Setup overloadable operators and white-listed members / properties.
attrs.update((attr, _wrap_method(tf.Tensor, attr))
for attr in tf.Tensor.OVERLOADABLE_OPERATORS.union({'__iter__'}))
# Copy some members straight-through.
attrs.update((attr, getattr(tf.Tensor, attr))
for attr in {'__nonzero__', '__bool__', '__array_priority__'})
return attrs
|
Returns `Tensor` attributes related to shape and Python builtins.
|
def create_conversation(self, body, recipients, attachment_ids=None, context_code=None, filter=None, filter_mode=None, group_conversation=None, media_comment_id=None, media_comment_type=None, mode=None, scope=None, subject=None, user_note=None):
"""
Create a conversation.
Create a new conversation with one or more recipients. If there is already
an existing private conversation with the given recipients, it will be
reused.
"""
path = {}
data = {}
params = {}
# REQUIRED - recipients
"""An array of recipient ids. These may be user ids or course/group ids
prefixed with "course_" or "group_" respectively, e.g.
recipients[]=1&recipients[]=2&recipients[]=course_3"""
data["recipients"] = recipients
# OPTIONAL - subject
"""The subject of the conversation. This is ignored when reusing a
conversation. Maximum length is 255 characters."""
if subject is not None:
data["subject"] = subject
# REQUIRED - body
"""The message to be sent"""
data["body"] = body
# OPTIONAL - group_conversation
"""Defaults to false. If true, this will be a group conversation (i.e. all
recipients may see all messages and replies). If false, individual private
conversations will be started with each recipient. Must be set false if the
number of recipients is over the set maximum (default is 100)."""
if group_conversation is not None:
data["group_conversation"] = group_conversation
# OPTIONAL - attachment_ids
"""An array of attachments ids. These must be files that have been previously
uploaded to the sender's "conversation attachments" folder."""
if attachment_ids is not None:
data["attachment_ids"] = attachment_ids
# OPTIONAL - media_comment_id
"""Media comment id of an audio of video file to be associated with this
message."""
if media_comment_id is not None:
data["media_comment_id"] = media_comment_id
# OPTIONAL - media_comment_type
"""Type of the associated media file"""
if media_comment_type is not None:
self._validate_enum(media_comment_type, ["audio", "video"])
data["media_comment_type"] = media_comment_type
# OPTIONAL - user_note
"""Will add a faculty journal entry for each recipient as long as the user
making the api call has permission, the recipient is a student and
faculty journals are enabled in the account."""
if user_note is not None:
data["user_note"] = user_note
# OPTIONAL - mode
"""Determines whether the messages will be created/sent synchronously or
asynchronously. Defaults to sync, and this option is ignored if this is a
group conversation or there is just one recipient (i.e. it must be a bulk
private message). When sent async, the response will be an empty array
(batch status can be queried via the {api:ConversationsController#batches batches API})"""
if mode is not None:
self._validate_enum(mode, ["sync", "async"])
data["mode"] = mode
# OPTIONAL - scope
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
# OPTIONAL - filter
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter is not None:
data["filter"] = filter
# OPTIONAL - filter_mode
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
# OPTIONAL - context_code
"""The course or group that is the context for this conversation. Same format
as courses or groups in the recipients argument."""
if context_code is not None:
data["context_code"] = context_code
self.logger.debug("POST /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/conversations".format(**path), data=data, params=params, no_data=True)
|
Create a conversation.
Create a new conversation with one or more recipients. If there is already
an existing private conversation with the given recipients, it will be
reused.
|
def open_editor(self, data=''):
"""
Open a file for editing using the system's default editor.
After the file has been altered, the text will be read back and the
HTML comment tag <!--INSRUCTIONS --> will be stripped. If an error
occurs inside of the context manager, the file will be preserved so
users can recover their data. Otherwise, the file will be deleted when
the context manager closes.
Params:
data (str): If provided, text will be written to the file before
opening it with the editor.
Returns:
text (str): The text that the user entered into the editor.
"""
with NamedTemporaryFile(prefix='rtv_', suffix='.txt', delete=False) as fp:
# Create a tempory file and grab the name, but close immediately so
# we can re-open using the right encoding
filepath = fp.name
with codecs.open(filepath, 'w', 'utf-8') as fp:
fp.write(data)
_logger.info('File created: %s', filepath)
editor = (os.getenv('RTV_EDITOR') or
os.getenv('VISUAL') or
os.getenv('EDITOR') or
'nano')
command = shlex.split(editor) + [filepath]
try:
with self.suspend():
_logger.debug('Running command: %s', command)
p = subprocess.Popen(command)
try:
p.communicate()
except KeyboardInterrupt:
p.terminate()
except OSError as e:
_logger.exception(e)
self.show_notification('Could not open file with %s' % editor)
with codecs.open(filepath, 'r', 'utf-8') as fp:
text = fp.read()
text = self.strip_instructions(text)
try:
yield text
except exceptions.TemporaryFileError:
# All exceptions will cause the file to *not* be removed, but these
# ones should also be swallowed
_logger.info('Caught TemporaryFileError')
self.show_notification('Post saved as: %s' % filepath)
else:
# If no errors occurred, try to remove the file
try:
os.remove(filepath)
except OSError:
_logger.warning('Could not delete: %s', filepath)
else:
_logger.info('File deleted: %s', filepath)
|
Open a file for editing using the system's default editor.
After the file has been altered, the text will be read back and the
HTML comment tag <!--INSRUCTIONS --> will be stripped. If an error
occurs inside of the context manager, the file will be preserved so
users can recover their data. Otherwise, the file will be deleted when
the context manager closes.
Params:
data (str): If provided, text will be written to the file before
opening it with the editor.
Returns:
text (str): The text that the user entered into the editor.
|
def switch_onoff(self, device, status):
"""Switch a Socket"""
if status == 1 or status == True or status == '1':
return self.switch_on(device)
else:
return self.switch_off(device)
|
Switch a Socket
|
def _replace_element_by_own_content(self, element):
"""
Replace the element by own text content.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
# pylint: disable=no-self-use
if element.has_children_elements():
children = element.get_children_elements()
for child in children:
element.insert_before(child)
element.remove_node()
elif element.has_children():
element.replace_node(element.get_first_node_child())
|
Replace the element by own text content.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
|
def display(self):
"""Virtual keyborad may get small d.info['displayHeight']
"""
for line in self.adb_shell('dumpsys display').splitlines():
m = _DISPLAY_RE.search(line, 0)
if not m:
continue
w = int(m.group('width'))
h = int(m.group('height'))
return collections.namedtuple('Display', ['width', 'height'])(w, h)
else:
w, h = self.info['displayWidth'], self.info['displayHeight']
return collections.namedtuple('Display', ['width', 'height'])(w, h)
|
Virtual keyborad may get small d.info['displayHeight']
|
def agent_intents(self):
"""Returns a list of intent json objects"""
endpoint = self._intent_uri()
intents = self._get(endpoint) # should be list of dicts
if isinstance(intents, dict): # if error: intents = {status: {error}}
raise Exception(intents["status"])
return [Intent(intent_json=i) for i in intents]
|
Returns a list of intent json objects
|
def decrypt(self, ciphertext):
"""Given ``ciphertext`` returns a ``plaintext`` decrypted using the keys specified in ``__init__``.
Raises ``CiphertextTypeError`` if the input ``ciphertext`` is not a string.
Raises ``RecoverableDecryptionError`` if the input ``ciphertext`` has a non-negative message length greater than the ciphertext length.
Raises ``UnrecoverableDecryptionError`` if invalid padding is detected, or the the MAC is invalid.
"""
if not isinstance(ciphertext, str):
raise CiphertextTypeError("Input ciphertext is not of type string")
plaintext_length = self.getPlaintextLen(ciphertext)
ciphertext_length = self.getCiphertextLen(ciphertext)
ciphertext_complete = (len(ciphertext) >= ciphertext_length)
if ciphertext_complete is False:
raise RecoverableDecryptionError('Incomplete ciphertext: ('+str(len(ciphertext))+' of '+str(ciphertext_length)+').')
ciphertext = ciphertext[:ciphertext_length]
W1_start = 0
W1_end = AES.block_size
W1 = ciphertext[W1_start:W1_end]
W2_start = AES.block_size
W2_end = AES.block_size + plaintext_length
W2 = ciphertext[W2_start:W2_end]
T_start = AES.block_size + plaintext_length
T_end = AES.block_size + plaintext_length + Encrypter._MAC_LENGTH
T_expected = ciphertext[T_start:T_end]
mac = HMAC.new(self.K2, W1 + W2, SHA512)
T_actual = mac.digest()[:Encrypter._MAC_LENGTH]
if T_expected != T_actual:
raise UnrecoverableDecryptionError('Failed to verify MAC.')
iv2_bytes = '\x02' + self._ecb_enc_K1.decrypt(W1)[1:8]
counter_val = fte.bit_ops.bytes_to_long(iv2_bytes)
counter_length_in_bits = AES.block_size * 8
counter = Counter.new(
counter_length_in_bits, initial_value=counter_val)
ctr_enc = AES.new(key=self.K1,
mode=AES.MODE_CTR,
IV='\x00' * 8 + iv2_bytes,
counter=counter)
plaintext = ctr_enc.decrypt(W2)
return plaintext
|
Given ``ciphertext`` returns a ``plaintext`` decrypted using the keys specified in ``__init__``.
Raises ``CiphertextTypeError`` if the input ``ciphertext`` is not a string.
Raises ``RecoverableDecryptionError`` if the input ``ciphertext`` has a non-negative message length greater than the ciphertext length.
Raises ``UnrecoverableDecryptionError`` if invalid padding is detected, or the the MAC is invalid.
|
def get_campaign_name_list(self):
"""
Returns a list of all valid campaign names
Returns:
List of strings containing all valid campaign names
"""
campaigns = self.find('campaigns', {})
campaign_names = []
for campaign in campaigns:
if 'name' in campaign:
campaign_names.append(campaign['name'])
return campaign_names
|
Returns a list of all valid campaign names
Returns:
List of strings containing all valid campaign names
|
def _insert_defaults(self):
""" Inserts default values from :attr:`StructuredDictMixin.structure`
to `self` by merging the two structures
(see :func:`monk.manipulation.merge_defaults`).
"""
merged = merge_defaults(self.structure, self)
self.update(merged)
|
Inserts default values from :attr:`StructuredDictMixin.structure`
to `self` by merging the two structures
(see :func:`monk.manipulation.merge_defaults`).
|
def download(url, target, headers=None, trackers=()):
"""Download a file using requests.
This is like urllib.request.urlretrieve, but:
- requests validates SSL certificates by default
- you can pass tracker objects to e.g. display a progress bar or calculate
a file hash.
"""
if headers is None:
headers = {}
headers.setdefault('user-agent', 'requests_download/'+__version__)
r = requests.get(url, headers=headers, stream=True)
r.raise_for_status()
for t in trackers:
t.on_start(r)
with open(target, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
for t in trackers:
t.on_chunk(chunk)
for t in trackers:
t.on_finish()
|
Download a file using requests.
This is like urllib.request.urlretrieve, but:
- requests validates SSL certificates by default
- you can pass tracker objects to e.g. display a progress bar or calculate
a file hash.
|
def _write_max_gradient(self)->None:
"Writes the maximum of the gradients to Tensorboard."
max_gradient = max(x.data.max() for x in self.gradients)
self._add_gradient_scalar('max_gradient', scalar_value=max_gradient)
|
Writes the maximum of the gradients to Tensorboard.
|
def to_json(self, minimal=True):
"""Encode an object as a JSON string.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: str
"""
if minimal:
return json.dumps(self.json_repr(minimal=True), cls=MarathonMinimalJsonEncoder, sort_keys=True)
else:
return json.dumps(self.json_repr(), cls=MarathonJsonEncoder, sort_keys=True)
|
Encode an object as a JSON string.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: str
|
def present(email, profile="splunk", **kwargs):
'''
Ensure a user is present
.. code-block:: yaml
ensure example test user 1:
splunk.user_present:
- realname: 'Example TestUser1'
- name: 'exampleuser'
- email: 'example@domain.com'
- roles: ['user']
The following parameters are required:
email
This is the email of the user in splunk
'''
name = kwargs.get('name')
ret = {
'name': name,
'changes': {},
'result': None,
'comment': ''
}
target = __salt__['splunk.get_user'](email, profile=profile, user_details=True)
if not target:
if __opts__['test']:
ret['comment'] = 'User {0} will be created'.format(name)
return ret
# create the user
result = __salt__['splunk.create_user'](
email, profile=profile, **kwargs
)
if result:
ret['changes'].setdefault('old', None)
ret['changes'].setdefault('new', 'User {0} exists'.format(name))
ret['result'] = True
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0}'.format(name)
return ret
else:
ret['comment'] = 'User {0} set to be updated.'.format(name)
if __opts__['test']:
ret['result'] = None
return ret
# found a user... updating
result = __salt__['splunk.update_user'](
email, profile, **kwargs
)
if isinstance(result, bool) and result:
# no update
ret['result'] = None
ret['comment'] = "No changes"
else:
diff = {}
for field in ['name', 'realname', 'roles', 'defaultApp', 'tz', 'capabilities']:
if field == 'roles':
diff['roles'] = list(set(target.get(field, [])).symmetric_difference(set(result.get(field, []))))
elif target.get(field) != result.get(field):
diff[field] = result.get(field)
newvalues = result
ret['result'] = True
ret['changes']['diff'] = diff
ret['changes']['old'] = target
ret['changes']['new'] = newvalues
return ret
|
Ensure a user is present
.. code-block:: yaml
ensure example test user 1:
splunk.user_present:
- realname: 'Example TestUser1'
- name: 'exampleuser'
- email: 'example@domain.com'
- roles: ['user']
The following parameters are required:
email
This is the email of the user in splunk
|
def _write_header(f, version, flags, stream_id, opcode, length):
"""
Write a CQL protocol frame header.
"""
pack = v3_header_pack if version >= 3 else header_pack
f.write(pack(version, flags, stream_id, opcode))
write_int(f, length)
|
Write a CQL protocol frame header.
|
def pop_data_point(self, n):
"""
This will remove and return the n'th data point (starting at 0) from
all columns.
Parameters
----------
n
Index of data point to pop.
"""
# loop over the columns and pop the data
popped = []
for k in self.ckeys:
# first convert to a list
data = list(self.c(k))
# pop the data
popped.append(data.pop(n))
# now set this column again
self.insert_column(_n.array(data), k)
return popped
|
This will remove and return the n'th data point (starting at 0) from
all columns.
Parameters
----------
n
Index of data point to pop.
|
def _find_step_node(self, step_text):
"""Find the ast node which contains the text."""
for func, decorator in self._iter_step_func_decorators():
step = self._step_decorator_args(decorator)
arg_node = decorator.call.value[0].value
if step == step_text:
return arg_node, func
elif isinstance(step, list) and step_text in step:
step_node = arg_node[step.index(step_text)]
return step_node, func
return None, None
|
Find the ast node which contains the text.
|
async def get_info(self):
'''
Retrieves a brief information about the compute session.
'''
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
rqst = Request(self.session,
'GET', '/kernel/{}'.format(self.kernel_id),
params=params)
async with rqst.fetch() as resp:
return await resp.json()
|
Retrieves a brief information about the compute session.
|
def create_cmdclass(develop_wrappers=None, distribute_wrappers=None, data_dirs=None):
"""Create a command class with the given optional wrappers.
Parameters
----------
develop_wrapper: list(str), optional
The cmdclass names to run before running other commands
distribute_wrappers: list(str), optional
The cmdclass names to run before running other commands
data_dirs: list(str), optional.
The directories containing static data.
"""
develop_wrappers = develop_wrappers or []
distribute_wrappers = distribute_wrappers or []
data_dirs = data_dirs or []
develop_wrapper = functools.partial(wrap_command, develop_wrappers, data_dirs)
distribute_wrapper = functools.partial(wrap_command, distribute_wrappers, data_dirs)
cmdclass = dict(
develop=develop_wrapper(develop, strict=True),
sdist=distribute_wrapper(sdist, strict=True),
bdist_egg=bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled
)
if bdist_wheel:
cmdclass['bdist_wheel'] = bdist_wheel
return cmdclass
|
Create a command class with the given optional wrappers.
Parameters
----------
develop_wrapper: list(str), optional
The cmdclass names to run before running other commands
distribute_wrappers: list(str), optional
The cmdclass names to run before running other commands
data_dirs: list(str), optional.
The directories containing static data.
|
def post_customer_preferences(self, **kwargs): # noqa: E501
"""Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_customer_preferences_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.post_customer_preferences_with_http_info(**kwargs) # noqa: E501
return data
|
Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread.
|
def is_rpm_installed():
"""Tests if the rpm command is present."""
try:
version_result = subprocess.run(["rpm", "--usage"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
rpm_installed = not version_result.returncode
except FileNotFoundError:
rpm_installed = False
return rpm_installed
|
Tests if the rpm command is present.
|
async def create(
cls, name: str, architecture: str, content: io.IOBase, *,
title: str = "",
filetype: BootResourceFileType = BootResourceFileType.TGZ,
chunk_size=(1 << 22), progress_callback=None):
"""Create a `BootResource`.
Creates an uploaded boot resource with `content`. The `content` is
uploaded in chunks of `chunk_size`. `content` must be seekable as the
first pass through the `content` will calculate the size and sha256
value then the second pass will perform the actual upload.
:param name: Name of the boot resource. Must be in format 'os/release'.
:type name: `str`
:param architecture: Architecture of the boot resource. Must be in
format 'arch/subarch'.
:type architecture: `str`
:param content: Content of the boot resource.
:type content: `io.IOBase`
:param title: Title of the boot resource.
:type title: `str`
:param filetype: Type of file in content.
:type filetype: `str`
:param chunk_size: Size in bytes to upload to MAAS in chunks.
(Default is 4 MiB).
:type chunk_size: `int`
:param progress_callback: Called to inform the current progress of the
upload. One argument is passed with the progress as a precentage.
If the resource was already complete and no content
needed to be uploaded then this callback will never be called.
:type progress_callback: Callable
:returns: Create boot resource.
:rtype: `BootResource`.
"""
if '/' not in name:
raise ValueError(
"name must be in format os/release; missing '/'")
if '/' not in architecture:
raise ValueError(
"architecture must be in format arch/subarch; missing '/'")
if not content.readable():
raise ValueError("content must be readable")
elif not content.seekable():
raise ValueError("content must be seekable")
if chunk_size <= 0:
raise ValueError(
"chunk_size must be greater than 0, not %d" % chunk_size)
size, sha256 = calc_size_and_sha265(content, chunk_size)
resource = cls._object(await cls._handler.create(
name=name, architecture=architecture, title=title,
filetype=filetype.value, size=str(size), sha256=sha256))
newest_set = max(resource.sets, default=None)
assert newest_set is not None
resource_set = resource.sets[newest_set]
assert len(resource_set.files) == 1
rfile = list(resource_set.files.values())[0]
if rfile.complete:
# Already created and fully up-to-date.
return resource
else:
# Upload in chunks and reload boot resource.
await cls._upload_chunks(
rfile, content, chunk_size, progress_callback)
return cls._object.read(resource.id)
|
Create a `BootResource`.
Creates an uploaded boot resource with `content`. The `content` is
uploaded in chunks of `chunk_size`. `content` must be seekable as the
first pass through the `content` will calculate the size and sha256
value then the second pass will perform the actual upload.
:param name: Name of the boot resource. Must be in format 'os/release'.
:type name: `str`
:param architecture: Architecture of the boot resource. Must be in
format 'arch/subarch'.
:type architecture: `str`
:param content: Content of the boot resource.
:type content: `io.IOBase`
:param title: Title of the boot resource.
:type title: `str`
:param filetype: Type of file in content.
:type filetype: `str`
:param chunk_size: Size in bytes to upload to MAAS in chunks.
(Default is 4 MiB).
:type chunk_size: `int`
:param progress_callback: Called to inform the current progress of the
upload. One argument is passed with the progress as a precentage.
If the resource was already complete and no content
needed to be uploaded then this callback will never be called.
:type progress_callback: Callable
:returns: Create boot resource.
:rtype: `BootResource`.
|
def _format_lat(self, lat):
''' Returned a formated latitude format for the file '''
if self.ppd in [4, 16, 64, 128]:
return None
else:
if lat < 0:
return map(lambda x: "{0:0>2}"
.format(int(np.abs(x))) + 'S', self._map_center('lat', lat))
else:
return map(lambda x: "{0:0>2}"
.format(int(x)) + 'N', self._map_center('lat', lat))
|
Returned a formated latitude format for the file
|
def set_conn(self, **kwargs):
""" takes a connection and creates the connection """
# log = logging.getLogger("%s.%s" % (self.log, inspect.stack()[0][3]))
log.setLevel(kwargs.get('log_level',self.log_level))
conn_name = kwargs.get("name")
if not conn_name:
raise NameError("a connection requires a 'name': %s" % kwargs)
elif self.conns.get(conn_name):
raise KeyError("connection '%s' has already been set" % conn_name)
if not kwargs.get("active", True):
log.warning("Connection '%s' is set as inactive" % conn_name)
return
conn_type = kwargs.get("conn_type")
if not conn_type or conn_type not in self.conn_mapping.nested:
err_msg = ["a connection requires a valid 'conn_type':\n",
"%s"]
raise NameError("".join(err_msg) % (list(self.conn_mapping.nested)))
log.info("Setting '%s' connection", conn_name)
if conn_type == "triplestore":
conn = make_tstore_conn(kwargs)
else:
conn = RdfwConnections[conn_type][kwargs['vendor']](**kwargs)
self.conns[conn_name] = conn
self.__is_initialized__ = True
|
takes a connection and creates the connection
|
def message(msg, *args):
'''Program message output.'''
clear_progress()
text = (msg % args)
sys.stdout.write(text + '\n')
|
Program message output.
|
def check_log_files_and_publish_updates(self):
"""Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise.
"""
anything_published = False
for file_info in self.open_file_infos:
assert not file_info.file_handle.closed
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
next_line = file_info.file_handle.readline()
if next_line == "":
break
if next_line[-1] == "\n":
next_line = next_line[:-1]
lines_to_publish.append(next_line)
# Publish the lines if this is a worker process.
filename = file_info.filename.split("/")[-1]
is_worker = (filename.startswith("worker")
and (filename.endswith("out")
or filename.endswith("err")))
if is_worker and file_info.file_position == 0:
if (len(lines_to_publish) > 0 and
lines_to_publish[0].startswith("Ray worker pid: ")):
file_info.worker_pid = int(
lines_to_publish[0].split(" ")[-1])
lines_to_publish = lines_to_publish[1:]
# Record the current position in the file.
file_info.file_position = file_info.file_handle.tell()
if len(lines_to_publish) > 0 and is_worker:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps({
"ip": self.ip,
"pid": file_info.worker_pid,
"lines": lines_to_publish
}))
anything_published = True
return anything_published
|
Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise.
|
def segment_to_line(document, coords):
"polyline with 2 vertices using <line> tag"
return setattribs(
document.createElement('line'),
x1 = coords[0],
y1 = coords[1],
x2 = coords[2],
y2 = coords[3],
)
|
polyline with 2 vertices using <line> tag
|
def restore_type(self, type):
"""Restore type from BigQuery
"""
# Mapping
mapping = {
'BOOLEAN': 'boolean',
'DATE': 'date',
'DATETIME': 'datetime',
'INTEGER': 'integer',
'FLOAT': 'number',
'STRING': 'string',
'TIME': 'time',
}
# Not supported type
if type not in mapping:
message = 'Type %s is not supported' % type
raise tableschema.exceptions.StorageError(message)
return mapping[type]
|
Restore type from BigQuery
|
def get_common_prefix(z):
"""Get common directory in a zip file if any."""
name_list = z.namelist()
if name_list and all(n.startswith(name_list[0]) for n in name_list[1:]):
return name_list[0]
return None
|
Get common directory in a zip file if any.
|
def get_codes():
"""
>> get_codes()
ISO ISO3 ISO-Numeric fips Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode
"""
cache_filename = os.path.join(os.path.dirname(__file__), 'data', 'countryInfo.txt')
data = []
for line in open(cache_filename, 'r'):
if not line.startswith('#'):
data.append(line.split('\t'))
return data
|
>> get_codes()
ISO ISO3 ISO-Numeric fips Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode
|
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures
|
Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
|
def general_attention(key, context, hidden_size, projected_align=False):
""" It is a implementation of the Luong et al. attention mechanism with general score. Based on the paper:
https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation"
Args:
key: A tensorflow tensor with dimensionality [None, None, key_size]
context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size]
hidden_size: Number of units in hidden representation
projected_align: Using bidirectional lstm for hidden representation of context.
If true, beetween input and attention mechanism insert layer of bidirectional lstm with dimensionality [hidden_size].
If false, bidirectional lstm is not used.
Returns:
output: Tensor at the output with dimensionality [None, None, hidden_size]
"""
if hidden_size % 2 != 0:
raise ValueError("hidden size must be dividable by two")
batch_size = tf.shape(context)[0]
max_num_tokens, token_size = context.get_shape().as_list()[-2:]
r_context = tf.reshape(context, shape=[-1, max_num_tokens, token_size])
# projected_key: [None, None, hidden_size]
projected_key = \
tf.layers.dense(key, hidden_size, kernel_initializer=xav())
r_projected_key = tf.reshape(projected_key, shape=[-1, hidden_size, 1])
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size//2)
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size//2)
(output_fw, output_bw), states = \
tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=r_context,
dtype=tf.float32)
# bilstm_output: [-1, max_num_tokens, hidden_size]
bilstm_output = tf.concat([output_fw, output_bw], -1)
attn = tf.nn.softmax(tf.matmul(bilstm_output, r_projected_key), dim=1)
if projected_align:
log.info("Using projected attention alignment")
t_context = tf.transpose(bilstm_output, [0, 2, 1])
output = tf.reshape(tf.matmul(t_context, attn),
shape=[batch_size, -1, hidden_size])
else:
log.info("Using without projected attention alignment")
t_context = tf.transpose(r_context, [0, 2, 1])
output = tf.reshape(tf.matmul(t_context, attn),
shape=[batch_size, -1, token_size])
return output
|
It is a implementation of the Luong et al. attention mechanism with general score. Based on the paper:
https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation"
Args:
key: A tensorflow tensor with dimensionality [None, None, key_size]
context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size]
hidden_size: Number of units in hidden representation
projected_align: Using bidirectional lstm for hidden representation of context.
If true, beetween input and attention mechanism insert layer of bidirectional lstm with dimensionality [hidden_size].
If false, bidirectional lstm is not used.
Returns:
output: Tensor at the output with dimensionality [None, None, hidden_size]
|
def rollforward(self, date):
"""Roll date forward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date + YearEnd(month=self.month)
|
Roll date forward to nearest end of year
|
def get_path_for_core_element(self, core_element_id):
"""Get path to the row representing core element described by handed core_element_id
:param list core_element_id: Core element identifier used in the respective list store column
:rtype: tuple
:return: path
"""
def check_function(row_iter, iter_found):
row_id = self.tree_store.get_value(row_iter, self.ID_STORAGE_ID)
if len(row_id) == len(core_element_id):
if row_id == core_element_id:
iter_found.append(self.tree_store.get_path(row_iter))
found_paths = []
self.iter_tree_with_handed_function(check_function, found_paths)
return found_paths[0] if found_paths else None
|
Get path to the row representing core element described by handed core_element_id
:param list core_element_id: Core element identifier used in the respective list store column
:rtype: tuple
:return: path
|
def _clone(self, **kwargs):
"""
Create a clone of this collection. The only param in the
initial collection is the filter context. Each chainable
filter is added to the clone and returned to preserve
previous iterators and their returned elements.
:return: :class:`.ElementCollection`
"""
params = copy.deepcopy(self._params)
if self._iexact:
params.update(iexact=self._iexact)
params.update(**kwargs)
clone = self.__class__(**params)
return clone
|
Create a clone of this collection. The only param in the
initial collection is the filter context. Each chainable
filter is added to the clone and returned to preserve
previous iterators and their returned elements.
:return: :class:`.ElementCollection`
|
def unscale_dict(C):
"""Undo the scaling applied in `scale_dict`."""
C_out = {k: _scale_dict[k] * v for k, v in C.items()}
for k in C_symm_keys[8]:
C_out['qqql'] = unscale_8(C_out['qqql'])
return C_out
|
Undo the scaling applied in `scale_dict`.
|
def spectral_contrast(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, fmin=200.0, n_bands=6, quantile=0.02,
linear=False):
'''Compute spectral contrast [1]_
.. [1] Jiang, Dan-Ning, Lie Lu, Hong-Jiang Zhang, Jian-Hua Tao,
and Lian-Hong Cai.
"Music type classification by spectral contrast feature."
In Multimedia and Expo, 2002. ICME'02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis='time')
>>> plt.colorbar()
>>> plt.ylabel('Frequency bands')
>>> plt.title('Spectral contrast')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
freq = np.atleast_1d(freq)
if freq.ndim != 1 or len(freq) != S.shape[0]:
raise ParameterError('freq.shape mismatch: expected '
'({:d},)'.format(S.shape[0]))
if n_bands < 1 or not isinstance(n_bands, int):
raise ParameterError('n_bands must be a positive integer')
if not 0.0 < quantile < 1.0:
raise ParameterError('quantile must lie in the range (0, 1)')
if fmin <= 0:
raise ParameterError('fmin must be a positive number')
octa = np.zeros(n_bands + 2)
octa[1:] = fmin * (2.0**np.arange(0, n_bands + 1))
if np.any(octa[:-1] >= 0.5 * sr):
raise ParameterError('Frequency band exceeds Nyquist. '
'Reduce either fmin or n_bands.')
valley = np.zeros((n_bands + 1, S.shape[1]))
peak = np.zeros_like(valley)
for k, (f_low, f_high) in enumerate(zip(octa[:-1], octa[1:])):
current_band = np.logical_and(freq >= f_low, freq <= f_high)
idx = np.flatnonzero(current_band)
if k > 0:
current_band[idx[0] - 1] = True
if k == n_bands:
current_band[idx[-1] + 1:] = True
sub_band = S[current_band]
if k < n_bands:
sub_band = sub_band[:-1]
# Always take at least one bin from each side
idx = np.rint(quantile * np.sum(current_band))
idx = int(np.maximum(idx, 1))
sortedr = np.sort(sub_band, axis=0)
valley[k] = np.mean(sortedr[:idx], axis=0)
peak[k] = np.mean(sortedr[-idx:], axis=0)
if linear:
return peak - valley
else:
return power_to_db(peak) - power_to_db(valley)
|
Compute spectral contrast [1]_
.. [1] Jiang, Dan-Ning, Lie Lu, Hong-Jiang Zhang, Jian-Hua Tao,
and Lian-Hong Cai.
"Music type classification by spectral contrast feature."
In Multimedia and Expo, 2002. ICME'02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis='time')
>>> plt.colorbar()
>>> plt.ylabel('Frequency bands')
>>> plt.title('Spectral contrast')
>>> plt.tight_layout()
|
def identifiers(dataset_uri):
"""List the item identifiers in the dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
for i in dataset.identifiers:
click.secho(i)
|
List the item identifiers in the dataset.
|
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz
|
Convert mouse x, y movement into x, y, z translations
|
def list_of_lists_to_dict(l):
""" Convert list of key,value lists to dict
[['id', 1], ['id', 2], ['id', 3], ['foo': 4]]
{'id': [1, 2, 3], 'foo': [4]}
"""
d = {}
for key, val in l:
d.setdefault(key, []).append(val)
return d
|
Convert list of key,value lists to dict
[['id', 1], ['id', 2], ['id', 3], ['foo': 4]]
{'id': [1, 2, 3], 'foo': [4]}
|
def resolve_weak_types(storage, debug=False):
"""Reslove weak type rules W1 - W3.
See: http://unicode.org/reports/tr9/#Resolving_Weak_Types
"""
for run in storage['runs']:
prev_strong = prev_type = run['sor']
start, length = run['start'], run['length']
chars = storage['chars'][start:start+length]
for _ch in chars:
# W1. Examine each nonspacing mark (NSM) in the level run, and
# change the type of the NSM to the type of the previous character.
# If the NSM is at the start of the level run, it will get the type
# of sor.
bidi_type = _ch['type']
if bidi_type == 'NSM':
_ch['type'] = bidi_type = prev_type
# W2. Search backward from each instance of a European number until
# the first strong type (R, L, AL, or sor) is found. If an AL is
# found, change the type of the European number to Arabic number.
if bidi_type == 'EN' and prev_strong == 'AL':
_ch['type'] = 'AN'
# update prev_strong if needed
if bidi_type in ('R', 'L', 'AL'):
prev_strong = bidi_type
prev_type = _ch['type']
# W3. Change all ALs to R
for _ch in chars:
if _ch['type'] == 'AL':
_ch['type'] = 'R'
# W4. A single European separator between two European numbers changes
# to a European number. A single common separator between two numbers of
# the same type changes to that type.
for idx in range(1, len(chars) - 1):
bidi_type = chars[idx]['type']
prev_type = chars[idx-1]['type']
next_type = chars[idx+1]['type']
if bidi_type == 'ES' and (prev_type == next_type == 'EN'):
chars[idx]['type'] = 'EN'
if bidi_type == 'CS' and prev_type == next_type and \
prev_type in ('AN', 'EN'):
chars[idx]['type'] = prev_type
# W5. A sequence of European terminators adjacent to European numbers
# changes to all European numbers.
for idx in range(len(chars)):
if chars[idx]['type'] == 'EN':
for et_idx in range(idx-1, -1, -1):
if chars[et_idx]['type'] == 'ET':
chars[et_idx]['type'] = 'EN'
else:
break
for et_idx in range(idx+1, len(chars)):
if chars[et_idx]['type'] == 'ET':
chars[et_idx]['type'] = 'EN'
else:
break
# W6. Otherwise, separators and terminators change to Other Neutral.
for _ch in chars:
if _ch['type'] in ('ET', 'ES', 'CS'):
_ch['type'] = 'ON'
# W7. Search backward from each instance of a European number until the
# first strong type (R, L, or sor) is found. If an L is found, then
# change the type of the European number to L.
prev_strong = run['sor']
for _ch in chars:
if _ch['type'] == 'EN' and prev_strong == 'L':
_ch['type'] = 'L'
if _ch['type'] in ('L', 'R'):
prev_strong = _ch['type']
if debug:
debug_storage(storage, runs=True)
|
Reslove weak type rules W1 - W3.
See: http://unicode.org/reports/tr9/#Resolving_Weak_Types
|
def parse_mcast_grps(family, grp_attr):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code.
"""
remaining = c_int()
if not grp_attr:
raise BUG
for nla in nla_for_each_nested(grp_attr, remaining):
tb = dict()
err = nla_parse_nested(tb, CTRL_ATTR_MCAST_GRP_MAX, nla, family_grp_policy)
if err < 0:
return err
if not tb[CTRL_ATTR_MCAST_GRP_ID] or not tb[CTRL_ATTR_MCAST_GRP_NAME]:
return -NLE_MISSING_ATTR
id_ = nla_get_u32(tb[CTRL_ATTR_MCAST_GRP_ID])
name = nla_get_string(tb[CTRL_ATTR_MCAST_GRP_NAME])
err = genl_family_add_grp(family, id_, name)
if err < 0:
return err
return 0
|
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code.
|
def play_NoteContainer(self, nc, channel=1, velocity=100):
"""Play the Notes in the NoteContainer nc."""
self.notify_listeners(self.MSG_PLAY_NC, {'notes': nc,
'channel': channel, 'velocity': velocity})
if nc is None:
return True
for note in nc:
if not self.play_Note(note, channel, velocity):
return False
return True
|
Play the Notes in the NoteContainer nc.
|
def _publisher_callback(self, publish_ack):
"""
publisher callback that grpc and web socket can pass messages to
address the received message onto the queue
:param publish_ack: EventHub_pb2.Ack the ack received from either wss or grpc
:return: None
"""
logging.debug("ack received: " + str(publish_ack).replace('\n', ' '))
self._rx_queue.append(publish_ack)
|
publisher callback that grpc and web socket can pass messages to
address the received message onto the queue
:param publish_ack: EventHub_pb2.Ack the ack received from either wss or grpc
:return: None
|
def ANNASSIGN(self, node):
"""
Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level).
"""
if node.value:
# Only bind the *target* if the assignment has value.
# Otherwise it's not really ast.Store and shouldn't silence
# UndefinedLocal warnings.
self.handleNode(node.target, node)
if not isinstance(self.scope, FunctionScope):
self.deferHandleNode(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value*...
if isinstance(self.scope, ModuleScope):
# ...later (if module scope).
self.deferHandleNode(node.value, node)
else:
# ...now.
self.handleNode(node.value, node)
|
Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level).
|
def safe_file(path, suffix=None, cleanup=True):
"""A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param str suffix: Use this suffix to create the copy. Otherwise use a random string.
:param bool cleanup: Whether or not to clean up the copy.
"""
safe_path = '{0}.{1}'.format(path, suffix or uuid.uuid4())
if os.path.exists(path):
shutil.copy(path, safe_path)
try:
yield safe_path
if cleanup:
shutil.move(safe_path, path)
else:
shutil.copy(safe_path, path)
finally:
if cleanup:
safe_delete(safe_path)
|
A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param str suffix: Use this suffix to create the copy. Otherwise use a random string.
:param bool cleanup: Whether or not to clean up the copy.
|
def _suppressed(self, filename, line, code):
"""Return true if linter error code is suppressed inline.
The suppression format is suppress(CODE1,CODE2,CODE3) etc.
"""
if code in self.suppress_codes:
return True
lines = self._file_lines(filename)
# File is zero length, cannot be suppressed
if not lines:
return False
# Handle errors which appear after the end of the document.
while line > len(lines):
line = line - 1
relevant_line = lines[line - 1]
try:
suppressions_function = relevant_line.split("#")[1].strip()
if suppressions_function.startswith("suppress("):
return code in _parse_suppressions(suppressions_function)
except IndexError:
above_line = lines[max(0, line - 2)]
suppressions_function = above_line.strip()[1:].strip()
if suppressions_function.startswith("suppress("):
return code in _parse_suppressions(suppressions_function)
finally:
pass
|
Return true if linter error code is suppressed inline.
The suppression format is suppress(CODE1,CODE2,CODE3) etc.
|
def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
|
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
|
def has_parent_bins(self, bin_id):
"""Tests if the ``Bin`` has any parents.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the bin has parents, ``false``
otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_parent_bins
if self._catalog_session is not None:
return self._catalog_session.has_parent_catalogs(catalog_id=bin_id)
return self._hierarchy_session.has_parents(id_=bin_id)
|
Tests if the ``Bin`` has any parents.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the bin has parents, ``false``
otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True)
|
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
|
def prefetch_relations(weak_queryset):
"""
FROM: https://djangosnippets.org/snippets/2492/
Consider such a model class::
class Action(models.Model):
actor_content_type = models.ForeignKey(ContentType,related_name='actor')
actor_object_id = models.PositiveIntegerField()
actor = GenericForeignKey('actor_content_type','actor_object_id')
And dataset::
Action(actor=user1).save()
Action(actor=user2).save()
This will hit the user table once for each action::
[a.actor for a in Action.objects.all()]
Whereas this will hit the user table once::
[a.actor for a in prefetch_relations(Action.objects.all())]
Actually, the example above will hit the database N+1 times, where N is
the number of actions. But with prefetch_relations(), the database will be
hit N+1 times where N is the number of distinct content types.
Note that prefetch_relations() is recursive.
Here an example, making a list with prefetch_relations(), and then without
prefetch_relations(). See the number of database hits after each test.
In [1]: from django import db; from prefetch_relations import prefetch_relations
In [2]: db.reset_queries()
In [3]: x = [(a.actor, a.action_object, a.target) for a in prefetch_relations(Action.objects.all().order_by('-pk'))]
In [4]: print len(db.connection.queries)
34
In [5]: db.reset_queries()
In [6]: print len(db.connection.queries)
0
In [7]: x = [(a.actor, a.action_object, a.target) for a in Action.objects.all().order_by('-pk')]
In [8]: print len(db.connection.queries)
396
"""
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
# reverse model's generic foreign keys into a dict:
# { 'field_name': generic.GenericForeignKey instance, ... }
gfks = {}
for name, gfk in weak_queryset.model.__dict__.items():
if not isinstance(gfk, GenericForeignKey):
continue
gfks[name] = gfk
data = {}
for weak_model in weak_queryset:
for gfk_name, gfk_field in gfks.items():
related_content_type_id = getattr(
weak_model,
gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_content_type not in data.keys():
data[related_content_type] = []
data[related_content_type].append(related_object_id)
for content_type, object_ids in data.items():
model_class = content_type.model_class()
models = prefetch_relations(model_class.objects.filter(pk__in=object_ids).select_related())
for model in models:
for weak_model in weak_queryset:
for gfk_name, gfk_field in gfks.items():
related_content_type_id = getattr(
weak_model,
gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_object_id != model.pk:
continue
if related_content_type != content_type:
continue
setattr(weak_model, gfk_name, model)
return weak_queryset
|
FROM: https://djangosnippets.org/snippets/2492/
Consider such a model class::
class Action(models.Model):
actor_content_type = models.ForeignKey(ContentType,related_name='actor')
actor_object_id = models.PositiveIntegerField()
actor = GenericForeignKey('actor_content_type','actor_object_id')
And dataset::
Action(actor=user1).save()
Action(actor=user2).save()
This will hit the user table once for each action::
[a.actor for a in Action.objects.all()]
Whereas this will hit the user table once::
[a.actor for a in prefetch_relations(Action.objects.all())]
Actually, the example above will hit the database N+1 times, where N is
the number of actions. But with prefetch_relations(), the database will be
hit N+1 times where N is the number of distinct content types.
Note that prefetch_relations() is recursive.
Here an example, making a list with prefetch_relations(), and then without
prefetch_relations(). See the number of database hits after each test.
In [1]: from django import db; from prefetch_relations import prefetch_relations
In [2]: db.reset_queries()
In [3]: x = [(a.actor, a.action_object, a.target) for a in prefetch_relations(Action.objects.all().order_by('-pk'))]
In [4]: print len(db.connection.queries)
34
In [5]: db.reset_queries()
In [6]: print len(db.connection.queries)
0
In [7]: x = [(a.actor, a.action_object, a.target) for a in Action.objects.all().order_by('-pk')]
In [8]: print len(db.connection.queries)
396
|
def create_database_session(engine):
"""Connect to the database"""
try:
Session = sessionmaker(bind=engine)
return Session()
except OperationalError as e:
raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0])
|
Connect to the database
|
def _update_property_from_dict(self, section, option, new_properties):
""" Update a config property value with a new property value
Property name must be equal to 'Section_option' of config property
:param section: config section
:param option: config option
:param new_properties: dict with new properties values
"""
try:
property_name = "{0}_{1}".format(section, option)
self.set(section, option, new_properties[property_name])
except KeyError:
pass
|
Update a config property value with a new property value
Property name must be equal to 'Section_option' of config property
:param section: config section
:param option: config option
:param new_properties: dict with new properties values
|
def handle(self, *args, **kwargs):
"""Run the executor listener. This method never returns."""
listener = ExecutorListener(redis_params=getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_CONNECTION', {}))
def _killer(signum, frame):
"""Kill the listener on receipt of a signal."""
listener.terminate()
signal(SIGINT, _killer)
signal(SIGTERM, _killer)
async def _runner():
"""Run the listener instance."""
if kwargs['clear_queue']:
await listener.clear_queue()
async with listener:
pass
loop = asyncio.new_event_loop()
loop.run_until_complete(_runner())
loop.close()
|
Run the executor listener. This method never returns.
|
def _check_cats(cats, vtypes, df, prep, callers):
"""Only include categories in the final output if they have values.
"""
out = []
for cat in cats:
all_vals = []
for vtype in vtypes:
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
all_vals.extend(vals)
if sum(all_vals) / float(len(all_vals)) > 2:
out.append(cat)
if len(out) == 0:
return cats
else:
return out
|
Only include categories in the final output if they have values.
|
def next_population(self, population, fitnesses):
"""Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
"""
return common.make_population(self._population_size,
self._generate_solution)
|
Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
|
def get_path(brain_or_object):
"""Calculate the physical path of this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Physical path of the object
:rtype: string
"""
if is_brain(brain_or_object):
return brain_or_object.getPath()
return "/".join(get_object(brain_or_object).getPhysicalPath())
|
Calculate the physical path of this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Physical path of the object
:rtype: string
|
def plugins_post_process(self, retcode):
"""
Call post_process() on all plugins
"""
logger.info("Post-processing test...")
self.publish("core", "stage", "post_process")
for plugin in self.plugins.values():
logger.debug("Post-process %s", plugin)
try:
logger.debug("RC before: %s", retcode)
retcode = plugin.post_process(retcode)
logger.debug("RC after: %s", retcode)
except Exception: # FIXME too broad exception clause
logger.error("Failed post-processing plugin %s", plugin, exc_info=True)
if not retcode:
retcode = 1
return retcode
|
Call post_process() on all plugins
|
def _add_snps(
self,
snps,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
):
""" Add SNPs to this Individual.
Parameters
----------
snps : SNPs
SNPs to add
discrepant_snp_positions_threshold : int
see above
discrepant_genotypes_threshold : int
see above
save_output
see above
Returns
-------
discrepant_positions : pandas.DataFrame
discrepant_genotypes : pandas.DataFrame
"""
discrepant_positions = pd.DataFrame()
discrepant_genotypes = pd.DataFrame()
if snps.snps is None:
return discrepant_positions, discrepant_genotypes
build = snps.build
source = [s.strip() for s in snps.source.split(",")]
if not snps.build_detected:
print("build not detected, assuming build {}".format(snps.build))
if self._build is None:
self._build = build
elif self._build != build:
print(
"build / assembly mismatch between current build of SNPs and SNPs being loaded"
)
# ensure there area always two X alleles
snps = self._double_single_alleles(snps.snps, "X")
if self._snps is None:
self._source.extend(source)
self._snps = snps
else:
common_snps = self._snps.join(snps, how="inner", rsuffix="_added")
discrepant_positions = common_snps.loc[
(common_snps["chrom"] != common_snps["chrom_added"])
| (common_snps["pos"] != common_snps["pos_added"])
]
if 0 < len(discrepant_positions) < discrepant_snp_positions_threshold:
print(
str(len(discrepant_positions)) + " SNP positions were discrepant; "
"keeping original positions"
)
if save_output:
self._discrepant_positions_file_count += 1
lineage.save_df_as_csv(
discrepant_positions,
self._output_dir,
self.get_var_name()
+ "_discrepant_positions_"
+ str(self._discrepant_positions_file_count)
+ ".csv",
)
elif len(discrepant_positions) >= discrepant_snp_positions_threshold:
print(
"too many SNPs differ in position; ensure same genome build is being used"
)
return discrepant_positions, discrepant_genotypes
# remove null genotypes
common_snps = common_snps.loc[
~common_snps["genotype"].isnull()
& ~common_snps["genotype_added"].isnull()
]
# discrepant genotypes are where alleles are not equivalent (i.e., alleles are not the
# same and not swapped)
discrepant_genotypes = common_snps.loc[
(
(common_snps["genotype"].str.len() == 1)
& (common_snps["genotype_added"].str.len() == 1)
& ~(
common_snps["genotype"].str[0]
== common_snps["genotype_added"].str[0]
)
)
| (
(common_snps["genotype"].str.len() == 2)
& (common_snps["genotype_added"].str.len() == 2)
& ~(
(
common_snps["genotype"].str[0]
== common_snps["genotype_added"].str[0]
)
& (
common_snps["genotype"].str[1]
== common_snps["genotype_added"].str[1]
)
)
& ~(
(
common_snps["genotype"].str[0]
== common_snps["genotype_added"].str[1]
)
& (
common_snps["genotype"].str[1]
== common_snps["genotype_added"].str[0]
)
)
)
]
if 0 < len(discrepant_genotypes) < discrepant_genotypes_threshold:
print(
str(len(discrepant_genotypes)) + " SNP genotypes were discrepant; "
"marking those as null"
)
if save_output:
self._discrepant_genotypes_file_count += 1
lineage.save_df_as_csv(
discrepant_genotypes,
self._output_dir,
self.get_var_name()
+ "_discrepant_genotypes_"
+ str(self._discrepant_genotypes_file_count)
+ ".csv",
)
elif len(discrepant_genotypes) >= discrepant_genotypes_threshold:
print(
"too many SNPs differ in their genotype; ensure file is for same "
"individual"
)
return discrepant_positions, discrepant_genotypes
# add new SNPs
self._source.extend(source)
self._snps = self._snps.combine_first(snps)
self._snps.loc[discrepant_genotypes.index, "genotype"] = np.nan
# combine_first converts position to float64, so convert it back to int64
self._snps["pos"] = self._snps["pos"].astype(np.int64)
self._snps = sort_snps(self._snps)
return discrepant_positions, discrepant_genotypes
|
Add SNPs to this Individual.
Parameters
----------
snps : SNPs
SNPs to add
discrepant_snp_positions_threshold : int
see above
discrepant_genotypes_threshold : int
see above
save_output
see above
Returns
-------
discrepant_positions : pandas.DataFrame
discrepant_genotypes : pandas.DataFrame
|
def t_heredoc(self, t):
r'<<\S+\r?\n'
t.lexer.is_tabbed = False
self._init_heredoc(t)
t.lexer.begin('heredoc')
|
r'<<\S+\r?\n
|
def untlpy2etd_ms(untl_elements, **kwargs):
"""Convert the UNTL elements structure into an ETD_MS structure.
kwargs can be passed to the function for certain effects.
"""
degree_children = {}
date_exists = False
seen_creation = False
# Make the root element.
etd_ms_root = ETD_MS_CONVERSION_DISPATCH['thesis']()
for element in untl_elements.children:
etd_ms_element = None
# Convert the UNTL element to etd_ms where applicable.
if element.tag in ETD_MS_CONVERSION_DISPATCH:
# Create the etd_ms_element if the element's content
# is stored in children nodes.
if element.children:
etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag](
qualifier=element.qualifier,
children=element.children,
)
# If we hit a degree element, make just that one.
elif element.tag == 'degree':
# Make a dict of the degree children information.
if element.qualifier in ['name',
'level',
'discipline',
'grantor']:
degree_children[element.qualifier] = element.content
# For date elements, limit to first instance of creation date.
elif element.tag == 'date':
if element.qualifier == 'creation':
# If the root already has a date, delete the child.
for child in etd_ms_root.children:
if child.tag == 'date':
del child
if not seen_creation:
date_exists = False
seen_creation = True
if not date_exists:
# Create the etd_ms element.
etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag](
qualifier=element.qualifier,
content=element.content,
)
date_exists = True
# It is a normal element.
elif element.tag not in ['date', 'degree']:
# Create the etd_ms_element.
etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag](
qualifier=element.qualifier,
content=element.content,
)
# Add the element to the structure if the element exists.
if etd_ms_element:
etd_ms_root.add_child(etd_ms_element)
if element.tag == 'meta':
# Initialize ark to False because it may not exist yet.
ark = False
# Iterate through children and look for ark.
for i in etd_ms_root.children:
if i.tag == 'identifier' and i.content.startswith(
'http://digital.library.unt.edu/'
):
ark = True
# If the ark doesn't yet exist, try and create it.
if not ark:
# Reset for future tests.
ark = False
if element.qualifier == 'ark':
ark = element.content
if ark is not None:
# Create the ark identifier element and add it.
ark_identifier = ETD_MS_CONVERSION_DISPATCH['identifier'](
ark=ark,
)
etd_ms_root.add_child(ark_identifier)
# If children exist for the degree, make a degree element.
if degree_children:
degree_element = ETD_MS_CONVERSION_DISPATCH['degree']()
# When we have all the elements stored, add the children to the
# degree node.
degree_child_element = None
for k, v in degree_children.iteritems():
# Create the individual classes for degrees.
degree_child_element = ETD_MS_DEGREE_DISPATCH[k](
content=v,
)
# If the keys in degree_children are valid,
# add it to the child.
if degree_child_element:
degree_element.add_child(degree_child_element)
etd_ms_root.add_child(degree_element)
return etd_ms_root
|
Convert the UNTL elements structure into an ETD_MS structure.
kwargs can be passed to the function for certain effects.
|
def set_restricted(self, obj, restricted):
"""Set the restriction on the given object.
You can use this to signal that a certain function is restricted.
Then you can query the restriction later with :meth:`Reftrack.is_restricted`.
:param obj: a hashable object
:param restricted: True, if you want to restrict the object.
:type restricted: :class:`bool`
:returns: None
:rtype: None
:raises: None
"""
if restricted:
self._restricted.add(obj)
elif obj in self._restricted:
self._restricted.remove(obj)
|
Set the restriction on the given object.
You can use this to signal that a certain function is restricted.
Then you can query the restriction later with :meth:`Reftrack.is_restricted`.
:param obj: a hashable object
:param restricted: True, if you want to restrict the object.
:type restricted: :class:`bool`
:returns: None
:rtype: None
:raises: None
|
def update_col(self, column_name, series):
"""
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
"""
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
self.local[column_name] = series
|
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
|
def setOverlayTransformOverlayRelative(self, ulOverlayHandle, ulOverlayHandleParent):
"""Sets the transform to relative to the transform of the specified overlay. This overlays visibility will also track the parents visibility"""
fn = self.function_table.setOverlayTransformOverlayRelative
pmatParentOverlayToOverlayTransform = HmdMatrix34_t()
result = fn(ulOverlayHandle, ulOverlayHandleParent, byref(pmatParentOverlayToOverlayTransform))
return result, pmatParentOverlayToOverlayTransform
|
Sets the transform to relative to the transform of the specified overlay. This overlays visibility will also track the parents visibility
|
async def teardown_conn(self, context):
"""Teardown a connection from a client."""
client_id = context.user_data
self._logger.info("Tearing down client connection: %s", client_id)
if client_id not in self.clients:
self._logger.warning("client_id %s did not exist in teardown_conn", client_id)
else:
del self.clients[client_id]
|
Teardown a connection from a client.
|
def train_model(best_processed_path, weight_path='../weight/model_weight.h5', verbose=2):
"""
Given path to processed BEST dataset,
train CNN model for words beginning alongside with
character label encoder and character type label encoder
Input
=====
best_processed_path: str, path to processed BEST dataset
weight_path: str, path to weight path file
verbose: int, verbost option for training Keras model
Output
======
model: keras model, keras model for tokenize prediction
"""
x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option='train')
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
validation_set = False
if os.path.isdir(os.path.join(best_processed_path, 'val')):
validation_set = True
x_val_char, x_val_type, y_val = prepare_feature(best_processed_path, option='val')
if not os.path.isdir(os.path.dirname(weight_path)):
os.makedirs(os.path.dirname(weight_path)) # make directory if weight does not exist
callbacks_list = [
ReduceLROnPlateau(),
ModelCheckpoint(
weight_path,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
mode='min',
verbose=1
)
]
# train model
model = get_convo_nn2()
train_params = [(10, 256), (3, 512), (3, 2048), (3, 4096), (3, 8192)]
for (epochs, batch_size) in train_params:
print("train with {} epochs and {} batch size".format(epochs, batch_size))
if validation_set:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list,
validation_data=([x_val_char, x_val_type], y_val))
else:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list)
return model
|
Given path to processed BEST dataset,
train CNN model for words beginning alongside with
character label encoder and character type label encoder
Input
=====
best_processed_path: str, path to processed BEST dataset
weight_path: str, path to weight path file
verbose: int, verbost option for training Keras model
Output
======
model: keras model, keras model for tokenize prediction
|
def get_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> RequestParameters:
"""
Method to parse `query_string` using `urllib.parse.parse_qs`.
This methods is used by `args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: RequestParameters
"""
if not self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]:
if self.query_string:
self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = RequestParameters(
parse_qs(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
)
return self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
|
Method to parse `query_string` using `urllib.parse.parse_qs`.
This methods is used by `args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: RequestParameters
|
def wrap(self, row: Union[Mapping[str, Any], Sequence[Any]]):
"""Return row tuple for row."""
return (
self.dataclass(
**{
ident: row[column_name]
for ident, column_name in self.ids_and_column_names.items()
}
)
if isinstance(row, Mapping)
else self.dataclass(
**{ident: val for ident, val in zip(self.ids_and_column_names.keys(), row)}
)
)
|
Return row tuple for row.
|
def do_bc(self, arg):
"""
[~process] bc <address> - clear a code breakpoint
[~thread] bc <address> - clear a hardware breakpoint
[~process] bc <address-address> - clear a memory breakpoint
[~process] bc <address> <size> - clear a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.dont_watch_variable(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.dont_break_at(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.dont_watch_buffer(pid, address, size)
found = True
if not found:
print("Error: breakpoint not found.")
|
[~process] bc <address> - clear a code breakpoint
[~thread] bc <address> - clear a hardware breakpoint
[~process] bc <address-address> - clear a memory breakpoint
[~process] bc <address> <size> - clear a memory breakpoint
|
def set_authoring_nodes(self, editor):
"""
Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool
"""
project_node = self.default_project_node
file_node = self.register_file(editor.file, project_node)
editor_node = self.register_editor(editor, file_node)
return True
|
Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool
|
def _boundary(self):
""" Returns a random string to use as the boundary for a message.
Returns:
string. Boundary
"""
boundary = None
try:
import uuid
boundary = uuid.uuid4().hex
except ImportError:
import random, sha
bits = random.getrandbits(160)
boundary = sha.new(str(bits)).hexdigest()
return boundary
|
Returns a random string to use as the boundary for a message.
Returns:
string. Boundary
|
def payload(self):
"""
Renders the resource payload.
:returns: a dict representing the object to be used as payload for a request
"""
payload = {'type': self.resource_type(), 'attributes': self.attributes}
if self.id:
payload['id'] = self.id
return payload
|
Renders the resource payload.
:returns: a dict representing the object to be used as payload for a request
|
def _print_napps(cls, napp_list):
"""Format the NApp list to be printed."""
mgr = NAppsManager()
enabled = mgr.get_enabled()
installed = mgr.get_installed()
napps = []
for napp, desc in sorted(napp_list):
status = 'i' if napp in installed else '-'
status += 'e' if napp in enabled else '-'
status = '[{}]'.format(status)
name = '{}/{}'.format(*napp)
napps.append((status, name, desc))
cls.print_napps(napps)
|
Format the NApp list to be printed.
|
def get_2d_local_memory_v2(x, query_shape, memory_flange):
"""Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]
tensor.
"""
(_, height, width, depth_x) = common_layers.shape_list(x)
# add extra padding to x so that we can extract the memory region
# around the center
paddings = [[0, 0], [memory_flange[0], memory_flange[0]],
[memory_flange[1], memory_flange[1]], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_x.set_shape([None, height+2*memory_flange[0],
width+2*memory_flange[1], depth_x])
num_h_memory_blocks = height//query_shape[0] + 1
num_w_memory_blocks = width//query_shape[1] + 1
x_memory_blocks = _extract_blocks(padded_x,
query_shape[0], query_shape[1])
x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks,
2)
x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2)
x_right_width = tf.concat(x_width_blocks[1:], axis=2)
x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4)
x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1)
x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1)
x_bottom_height = tf.concat(x_height_blocks[1:], axis=1)
x = tf.concat([x_top_height, x_bottom_height], axis=3)
return x
|
Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]
tensor.
|
def close(self):
"""Close and remove hooks."""
if not self.closed:
self._ipython.events.unregister('post_run_cell', self._fill)
self._box.close()
self.closed = True
|
Close and remove hooks.
|
def visit(self, node):
"""Visit a node.
This method is largely modelled after the ast.NodeTransformer class.
Args:
node: The node to visit.
Returns:
A tuple of the primal and adjoint, each of which is a node or a list of
nodes.
"""
method = 'visit_' + node.__class__.__name__
if not hasattr(self, method):
raise ValueError('Unknown node type: %s' % node.__class__.__name__)
visitor = getattr(self, method)
# If this node is a statement, inform all child nodes what the active
# variables in this statement are
if anno.hasanno(node, 'active_in'):
self.active_variables = anno.getanno(node, 'active_in')
pri, adj = visitor(node)
# Annotate primal and adjoint statements
if isinstance(pri, gast.AST):
anno.setdefaultanno(pri, 'adj', adj)
else:
for node in pri:
anno.setdefaultanno(node, 'adj', adj)
if isinstance(adj, gast.AST):
anno.setdefaultanno(adj, 'pri', pri)
else:
for node in adj:
anno.setdefaultanno(node, 'pri', pri)
return pri, adj
|
Visit a node.
This method is largely modelled after the ast.NodeTransformer class.
Args:
node: The node to visit.
Returns:
A tuple of the primal and adjoint, each of which is a node or a list of
nodes.
|
def get(self, session):
'''taobao.aftersale.get 查询用户售后服务模板
查询用户设置的售后服务模板,仅返回标题和id'''
request = TOPRequest('taobao.aftersale.get')
self.create(self.execute(request, session))
return self.after_sales
|
taobao.aftersale.get 查询用户售后服务模板
查询用户设置的售后服务模板,仅返回标题和id
|
def subtract(self, years=0, months=0, weeks=0, days=0):
"""
Remove duration from the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date
"""
return self.add(years=-years, months=-months, weeks=-weeks, days=-days)
|
Remove duration from the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date
|
def rewrite_update(clauseelement, multiparams, params):
""" change the params to enable partial updates
sqlalchemy by default only supports updates of complex types in the form of
"col = ?", ({"x": 1, "y": 2}
but crate supports
"col['x'] = ?, col['y'] = ?", (1, 2)
by using the `Craty` (`MutableDict`) type.
The update statement is only rewritten if an item of the MutableDict was
changed.
"""
newmultiparams = []
_multiparams = multiparams[0]
if len(_multiparams) == 0:
return clauseelement, multiparams, params
for _params in _multiparams:
newparams = {}
for key, val in _params.items():
if (
not isinstance(val, MutableDict) or
(not any(val._changed_keys) and not any(val._deleted_keys))
):
newparams[key] = val
continue
for subkey, subval in val.items():
if subkey in val._changed_keys:
newparams["{0}['{1}']".format(key, subkey)] = subval
for subkey in val._deleted_keys:
newparams["{0}['{1}']".format(key, subkey)] = None
newmultiparams.append(newparams)
_multiparams = (newmultiparams, )
clause = clauseelement.values(newmultiparams[0])
clause._crate_specific = True
return clause, _multiparams, params
|
change the params to enable partial updates
sqlalchemy by default only supports updates of complex types in the form of
"col = ?", ({"x": 1, "y": 2}
but crate supports
"col['x'] = ?, col['y'] = ?", (1, 2)
by using the `Craty` (`MutableDict`) type.
The update statement is only rewritten if an item of the MutableDict was
changed.
|
def copy(self):
'''Create a copy of the current instance.
:returns: A safely-editable copy of the current sequence.
:rtype: coral.DNA
'''
# Significant performance improvements by skipping alphabet check
features_copy = [feature.copy() for feature in self.features]
copy = type(self)(self.top.seq, circular=self.circular,
features=features_copy, name=self.name,
bottom=self.bottom.seq, run_checks=False)
return copy
|
Create a copy of the current instance.
:returns: A safely-editable copy of the current sequence.
:rtype: coral.DNA
|
def sort_item(iterable, number, reverse=False):
"""Sort the itertable according to the given number item."""
return sorted(iterable, key=itemgetter(number), reverse=reverse)
|
Sort the itertable according to the given number item.
|
def b58decode(val, charset=DEFAULT_CHARSET):
"""Decode base58check encoded input to original raw bytes.
:param bytes val: The value to base58cheeck decode.
:param bytes charset: (optional) The character set to use for decoding.
:return: the decoded bytes.
:rtype: bytes
Usage::
>>> import base58check
>>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe'
... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde')
b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT'
"""
def _b58decode_int(val):
output = 0
for char in val:
output = output * base + charset.index(char)
return output
if isinstance(val, str):
val = val.encode()
if isinstance(charset, str):
charset = charset.encode()
base = len(charset)
if not base == 58:
raise ValueError('charset base must be 58, not %s' % base)
pad_len = len(val)
val = val.lstrip(bytes([charset[0]]))
pad_len -= len(val)
acc = _b58decode_int(val)
result = deque()
while acc > 0:
acc, mod = divmod(acc, 256)
result.appendleft(mod)
prefix = b'\0' * pad_len
return prefix + bytes(result)
|
Decode base58check encoded input to original raw bytes.
:param bytes val: The value to base58cheeck decode.
:param bytes charset: (optional) The character set to use for decoding.
:return: the decoded bytes.
:rtype: bytes
Usage::
>>> import base58check
>>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe'
... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde')
b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT'
|
def Validate(self, value, **_):
"""Check that value is a valid enum."""
if value is None:
return
return rdfvalue.RDFBool(super(ProtoBoolean, self).Validate(value))
|
Check that value is a valid enum.
|
def autobuild_docproject():
"""Autobuild a project that only contains documentation"""
try:
#Build only release information
family = utilities.get_family('module_settings.json')
autobuild_release(family)
autobuild_documentation(family.tile)
except unit_test.IOTileException as e:
print(e.format())
Exit(1)
|
Autobuild a project that only contains documentation
|
def merge_rest_api_config(configs):
"""
Given a list of PathConfig objects, merges them into a single PathConfig,
giving priority in the order of the configs (first has highest priority).
"""
bind = None
connect = None
timeout = None
opentsdb_url = None
opentsdb_db = None
opentsdb_username = None
opentsdb_password = None
client_max_size = None
for config in reversed(configs):
if config.bind is not None:
bind = config.bind
if config.connect is not None:
connect = config.connect
if config.timeout is not None:
timeout = config.timeout
if config.opentsdb_url is not None:
opentsdb_url = config.opentsdb_url
if config.opentsdb_db is not None:
opentsdb_db = config.opentsdb_db
if config.opentsdb_username is not None:
opentsdb_username = config.opentsdb_username
if config.opentsdb_password is not None:
opentsdb_password = config.opentsdb_password
if config.client_max_size is not None:
client_max_size = config.client_max_size
return RestApiConfig(
bind=bind,
connect=connect,
timeout=timeout,
opentsdb_url=opentsdb_url,
opentsdb_db=opentsdb_db,
opentsdb_username=opentsdb_username,
opentsdb_password=opentsdb_password,
client_max_size=client_max_size)
|
Given a list of PathConfig objects, merges them into a single PathConfig,
giving priority in the order of the configs (first has highest priority).
|
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
super(MapRegisterMessage, self).sanitize()
# P: This is the proxy-map-reply bit, when set to 1 an ETR sends a Map-
# Register message requesting for the Map-Server to proxy Map-Reply.
# The Map-Server will send non-authoritative Map-Replies on behalf
# of the ETR. Details on this usage can be found in [LISP-MS].
if not isinstance(self.proxy_map_reply, bool):
raise ValueError('Proxy Map Reply flag must be a boolean')
# The third bit after the Type field in the Map-Register message is
# allocated as "I" bit. I bit indicates that a 128 bit xTR-ID and a 64
# bit site-ID field is present at the end of the Map-Register message.
# If an xTR is configured with an xTR-ID or site-ID, it MUST set the I
# bit to 1 and include its xTR-ID and site-ID in the Map-Register
# messages it generates, if either the xTR-ID or site-ID is not
# configured an unspecified value is encoded for the ID not configured.
# If the R bit in the Map-Register is set to 1, the I bit must also be
# set to 1, and an xTR-ID must be included in the Map-Register message
# sent to an RTR.
#
# xTR-ID is a 128 bit field at the end of the Map-Register message,
# starting after the final Record in the message. The xTR-ID is used
# to identify the intended recipient xTR for a Map-Notify message,
# especially in the case where a site has more than one xTR. A value
# of all zeros indicate that an xTR-ID is not specified, though encoded
# in the message. This is useful in the case where a site-ID is
# specified, but no xTR-ID is configured. When a Map-Server receives a
# Map-Register with an xTR-ID specified (I bit set and xTR-ID has a
# non-zero value), it MUST copy the XTR-ID from the Map-Register to the
# associated Map-Notify message. When a Map-Server is sending an
# unsolicited Map-Notify to an xTR to notify the xTR of a change in
# locators, the Map-Server must include the xTR-ID for the intended
# recipient xTR, if it has one stored locally.
if not isinstance(self.xtr_id, numbers.Integral) \
or self.xtr_id < 0 or self.xtr_id >= 2 ** 128:
raise ValueError('Invalid xTR-ID')
# site-ID is a 64 bit field at the end of the Map-Register message,
# following the xTR-ID. The site-ID is used by the Map-Server
# receiving the Map-Register message to identify which xTRs belong to
# the same site. A value of 0 indicate that a site-ID is not
# specified, though encoded in the message. When a Map-Server receives
# a Map-Regeter with a site-ID specified (I bit set and site-ID has
# non-zero value), it must copy the site-ID from the Map-Register to
# the associated Map-Notify message. When a Map-Server is sending an
# unsolicited Map-Notify to an xTR to notify the xTR of a change in
# locators, the Map-Server must include the site-ID for the intended
# recipient xTR, if it has one stored locally.
if not isinstance(self.site_id, numbers.Integral) \
or self.site_id < 0 or self.site_id >= 2 ** 64:
raise ValueError('Invalid site-ID')
# The fourth bit after the Type field in the Map-Register message is
# allocated as "R" bit. R bit indicates that the Map-Register is built
# for an RTR. R bit must be set in a Map-Register that a LISP device
# sends to an RTR.
if not isinstance(self.for_rtr, bool):
raise ValueError('For-RTR flag must be a boolean')
# M: This is the want-map-notify bit, when set to 1 an ETR is
# requesting for a Map-Notify message to be returned in response to
# sending a Map-Register message. The Map-Notify message sent by a
# Map-Server is used to an acknowledge receipt of a Map-Register
# message.
if not isinstance(self.want_map_notify, bool):
raise ValueError('Want Map Notify flag must be a boolean')
# Nonce: This 8-octet Nonce field is set to 0 in Map-Register
# messages. Since the Map-Register message is authenticated, the
# nonce field is not currently used for any security function but
# may be in the future as part of an anti-replay solution.
if self.nonce != '\x00\x00\x00\x00\x00\x00\x00\x00':
# Cisco devices seems to fill it in even if it should be 0
pass
# raise ValueError('Invalid nonce (must be 0 for Map-Register): {0}'.format(self.nonce.encode('hex')))
# Key ID: A configured ID to find the configured Message
# Authentication Code (MAC) algorithm and key value used for the
# authentication function. See Section 14.4 for codepoint
# assignments.
if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96,
KEY_ID_HMAC_SHA_256_128):
raise ValueError('Invalid Key ID')
# Authentication Data: The message digest used from the output of the
# Message Authentication Code (MAC) algorithm. The entire Map-
# Register payload is authenticated with this field preset to 0.
# After the MAC is computed, it is placed in this field.
# Implementations of this specification MUST include support for
# HMAC-SHA-1-96 [RFC2404] and support for HMAC-SHA-256-128 [RFC6234]
# is RECOMMENDED.
if not isinstance(self.authentication_data, bytes):
raise ValueError('Invalid authentication data')
# Map-Reply Record: When the M bit is set, this field is the size of a
# single "Record" in the Map-Reply format. This Map-Reply record
# contains the EID-to-RLOC mapping entry associated with the Source
# EID. This allows the ETR which will receive this Map-Request to
# cache the data if it chooses to do so.
for record in self.records:
if not isinstance(record, MapRegisterRecord):
raise ValueError('Invalid record')
record.sanitize()
|
Check if the current settings conform to the LISP specifications and
fix them where possible.
|
def addAEMOD(rh):
"""
Send an Activation Modification Script to the virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'AEMOD'
userid - userid of the virtual machine
parms['aeScript'] - File specification of the AE script
parms['invparms'] - invparms operand
Output:
Request Handle updated with the results.
Return code - 0: ok
Return code - 4: input error, rs - 11 AE script not found
"""
rh.printSysLog("Enter changeVM.addAEMOD")
invokeScript = "invokeScript.sh"
trunkFile = "aemod.doscript"
fileClass = "X"
tempDir = tempfile.mkdtemp()
if os.path.isfile(rh.parms['aeScript']):
# Get the short name of our activation engine modifier script
if rh.parms['aeScript'].startswith("/"):
s = rh.parms['aeScript']
tmpAEScript = s[s.rindex("/") + 1:]
else:
tmpAEScript = rh.parms['aeScript']
# Copy the mod script to our temp directory
shutil.copyfile(rh.parms['aeScript'], tempDir + "/" + tmpAEScript)
# Create the invocation script.
conf = "#!/bin/bash \n"
baseName = os.path.basename(rh.parms['aeScript'])
parm = "/bin/bash %s %s \n" % (baseName, rh.parms['invParms'])
fh = open(tempDir + "/" + invokeScript, "w")
fh.write(conf)
fh.write(parm)
fh.close()
# Generate the tar package for punch
tar = tarfile.open(tempDir + "/" + trunkFile, "w")
for file in os.listdir(tempDir):
tar.add(tempDir + "/" + file, arcname=file)
tar.close()
# Punch file to reader
punch2reader(rh, rh.userid, tempDir + "/" + trunkFile, fileClass)
shutil.rmtree(tempDir)
else:
# Worker script does not exist.
shutil.rmtree(tempDir)
msg = msgs.msg['0400'][1] % (modId, rh.parms['aeScript'])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0400'][0])
rh.printSysLog("Exit changeVM.addAEMOD, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
|
Send an Activation Modification Script to the virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'AEMOD'
userid - userid of the virtual machine
parms['aeScript'] - File specification of the AE script
parms['invparms'] - invparms operand
Output:
Request Handle updated with the results.
Return code - 0: ok
Return code - 4: input error, rs - 11 AE script not found
|
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
if x_var in self.data.coords:
self._x_var = x_var
self.set_xlabels(label_from_attrs(self.data[x_var]))
else:
# x_var is a string
self.set_xlabels(x_var)
if y_var is not None:
if y_var in self.data.coords:
self._y_var = y_var
self.set_ylabels(label_from_attrs(self.data[y_var]))
else:
self.set_ylabels(y_var)
return self
|
Set axis labels on the left column and bottom row of the grid.
|
def tabular(client, records):
"""Format dataset files with a tabular output.
:param client: LocalClient instance.
:param records: Filtered collection.
"""
from renku.models._tabulate import tabulate
echo_via_pager(
tabulate(
records,
headers=OrderedDict((
('added', None),
('authors_csv', 'authors'),
('dataset', None),
('full_path', 'path'),
)),
)
)
|
Format dataset files with a tabular output.
:param client: LocalClient instance.
:param records: Filtered collection.
|
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return Packet(id=self.id, secret=self.secret,
authenticator=self.authenticator, dict=self.dict,
**attributes)
|
Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
|
def clone(self, run=True):
"""
Clone task
:param run: run task after cloning
:return: Task object.
"""
params = {}
if run:
params.update({'action': 'run'})
extra = {
'resource': self.__class__.__name__,
'query': {'id': self.id, 'run': run}
}
logger.info('Cloning task', extra=extra)
task_data = self._api.post(
url=self._URL['clone'].format(id=self.id), params=params).json()
return Task(api=self._api, **task_data)
|
Clone task
:param run: run task after cloning
:return: Task object.
|
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
"""
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
# Check if we have any PDBs
if self.num_structures_experimental == 0:
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return
downloaded_pdb_ids = []
# Download the PDBs
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
return downloaded_pdb_ids
|
Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
|
def superclasses(self, inherited=False):
"""Iterate over the superclasses of the class.
This function is the Python equivalent
of the CLIPS class-superclasses command.
"""
data = clips.data.DataObject(self._env)
lib.EnvClassSuperclasses(
self._env, self._cls, data.byref, int(inherited))
for klass in classes(self._env, data.value):
yield klass
|
Iterate over the superclasses of the class.
This function is the Python equivalent
of the CLIPS class-superclasses command.
|
def pngout(ext_args):
"""Run the external program pngout on the file."""
args = _PNGOUT_ARGS + [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _PNG_FORMAT
|
Run the external program pngout on the file.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.