code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def sync(self):
'Update state of folder from Jottacloud server'
log.info("syncing %r" % self.path)
self.folder = self.jfs.get(self.path)
self.synced = True
|
Update state of folder from Jottacloud server
|
def get_submissions_multiple_assignments_by_sis_id(
self, is_section, sis_id, students=None, assignments=None,
**params):
"""
List submissions for multiple assignments by course/section sis id and
optionally student
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students
"""
if is_section:
return self.get_submissions_multiple_assignments(
is_section, self._sis_id(sis_id, 'section'), students,
assignments, **params)
else:
return self.get_submissions_multiple_assignments(
is_section, self._sis_id(sis_id, 'course'), students,
assignments, **params)
|
List submissions for multiple assignments by course/section sis id and
optionally student
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students
|
def is_group(value):
"""
Check whether groupname or gid as argument exists.
if this function recieved groupname, convert gid and exec validation.
"""
if type(value) == str:
try:
entry = grp.getgrnam(value)
value = entry.gr_gid
except KeyError:
err_message = ('{0}: No such group.'.format(value))
raise validate.VdtValueError(err_message)
return value
elif type(value) == int:
try:
grp.getgrgid(value)
except KeyError:
err_message = ('{0}: No such group.'.format(value))
raise validate.VdtValueError(err_message)
return value
else:
err_message = ('Please, use str or int to "user" parameter.')
raise validate.VdtTypeError(err_message)
|
Check whether groupname or gid as argument exists.
if this function recieved groupname, convert gid and exec validation.
|
def configure(logstash_host=None, logstash_port=None, logdir=None):
'''Configuration settings.'''
if not (logstash_host or logstash_port or logdir):
raise ValueError('you must specify at least one parameter')
config.logstash.host = logstash_host or config.logstash.host
config.logstash.port = logstash_port or config.logstash.port
config.logdir = logdir or config.logdir
create_logdir(config.logdir)
|
Configuration settings.
|
def blockreplace(path,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
insert_before_match=None,
insert_after_match=None):
'''
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline : False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: 2019.2.0
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
exclusive_params = [append_if_not_found, prepend_if_not_found, bool(insert_before_match), bool(insert_after_match)]
if sum(exclusive_params) > 1:
raise SaltInvocationError(
'Only one of append_if_not_found, prepend_if_not_found,'
' insert_before_match, and insert_after_match is permitted'
)
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
try:
file_encoding = __utils__['files.get_encoding'](path)
except CommandExecutionError:
file_encoding = None
if __utils__['files.is_binary'](path):
if not file_encoding:
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if insert_before_match or insert_after_match:
if insert_before_match:
if not isinstance(insert_before_match, six.string_types):
raise CommandExecutionError(
'RegEx expected in insert_before_match parameter.'
)
elif insert_after_match:
if not isinstance(insert_after_match, six.string_types):
raise CommandExecutionError(
'RegEx expected in insert_after_match parameter.'
)
if append_newline is None and not content.endswith((os.linesep, '\n')):
append_newline = True
# Split the content into a list of lines, removing newline characters. To
# ensure that we handle both Windows and POSIX newlines, first split on
# Windows newlines, and then split on POSIX newlines.
split_content = []
for win_line in content.split('\r\n'):
for content_line in win_line.split('\n'):
split_content.append(content_line)
line_count = len(split_content)
has_changes = False
orig_file = []
new_file = []
in_block = False
block_found = False
linesep = None
def _add_content(linesep, lines=None, include_marker_start=True,
end_line=None):
if lines is None:
lines = []
include_marker_start = True
if end_line is None:
end_line = marker_end
end_line = end_line.rstrip('\r\n') + linesep
if include_marker_start:
lines.append(marker_start + linesep)
if split_content:
for index, content_line in enumerate(split_content, 1):
if index != line_count:
lines.append(content_line + linesep)
else:
# We're on the last line of the content block
if append_newline:
lines.append(content_line + linesep)
lines.append(end_line)
else:
lines.append(content_line + end_line)
else:
lines.append(end_line)
return lines
# We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
try:
fi_file = io.open(path, mode='r', encoding=file_encoding, newline='')
for line in fi_file:
write_line_to_new_file = True
if linesep is None:
# Auto-detect line separator
if line.endswith('\r\n'):
linesep = '\r\n'
elif line.endswith('\n'):
linesep = '\n'
else:
# No newline(s) in file, fall back to system's linesep
linesep = os.linesep
if marker_start in line:
# We've entered the content block
in_block = True
else:
if in_block:
# We're not going to write the lines from the old file to
# the new file until we have exited the block.
write_line_to_new_file = False
marker_end_pos = line.find(marker_end)
if marker_end_pos != -1:
# End of block detected
in_block = False
# We've found and exited the block
block_found = True
_add_content(linesep, lines=new_file,
include_marker_start=False,
end_line=line[marker_end_pos:])
# Save the line from the original file
orig_file.append(line)
if write_line_to_new_file:
new_file.append(line)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read from {0}: {1}'.format(path, exc)
)
finally:
if linesep is None:
# If the file was empty, we will not have set linesep yet. Assume
# the system's line separator. This is needed for when we
# prepend/append later on.
linesep = os.linesep
try:
fi_file.close()
except Exception:
pass
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
prepended_content = _add_content(linesep)
prepended_content.extend(new_file)
new_file = prepended_content
block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(linesep):
new_file[-1] += linesep
# add the markers and content at the end of file
_add_content(linesep, lines=new_file)
block_found = True
elif insert_before_match or insert_after_match:
match_regex = insert_before_match or insert_after_match
match_idx = [i for i, item in enumerate(orig_file) if re.search(match_regex, item)]
if match_idx:
match_idx = match_idx[0]
for line in _add_content(linesep):
if insert_after_match:
match_idx += 1
new_file.insert(match_idx, line)
if insert_before_match:
match_idx += 1
block_found = True
if not block_found:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
diff = __utils__['stringutils.get_diff'](orig_file, new_file)
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line, encoding=file_encoding))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes
|
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline : False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: 2019.2.0
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
|
def add_sync_methods(cls):
"""Class decorator to add synchronous methods corresponding to async methods.
This modifies the class in place, adding additional methods to it.
If a synchronous method of a given name already exists it is not
replaced.
Args:
cls: A class.
Returns:
The same class, modified in place.
"""
for name in cls.__dict__.keys():
if name.endswith('_async'):
sync_name = name[:-6]
if not hasattr(cls, sync_name):
setattr(cls, sync_name, _make_sync_method(name))
return cls
|
Class decorator to add synchronous methods corresponding to async methods.
This modifies the class in place, adding additional methods to it.
If a synchronous method of a given name already exists it is not
replaced.
Args:
cls: A class.
Returns:
The same class, modified in place.
|
def to_download():
"""
Build interval of urls to download.
We always get the first file of the next day.
Ex: 2013-01-01 => 2013-01-02.0000
"""
first_day = parse(interval_first)
last_day = parse(interval_last)
format_change = parse('2010-06-14')
one_day = datetime.timedelta(1)
cur_day = first_day
url_list = []
while cur_day < last_day:
fname = filename.format(day=cur_day.strftime("%Y%m%d"))
if cur_day > format_change:
cur_day += one_day
url = base_url.format(year_month=cur_day.strftime("%Y.%m"),
file_day=cur_day.strftime("%Y%m%d"))
else:
url = base_url_old.format(year_month=cur_day.strftime("%Y.%m"),
file_day=cur_day.strftime("%Y%m%d"))
cur_day += one_day
url_list.append((fname, url))
return sorted(url_list, key=lambda tup: tup[0], reverse=True)
|
Build interval of urls to download.
We always get the first file of the next day.
Ex: 2013-01-01 => 2013-01-02.0000
|
def project_new_folder(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /project-xxxx/newFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder
"""
return DXHTTPRequest('/%s/newFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /project-xxxx/newFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder
|
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
|
Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
|
def _generate_annotation_type_class(self, ns, annotation_type):
# type: (ApiNamespace, AnnotationType) -> None
"""Defines a Python class that represents an annotation type in Stone."""
self.emit('class {}(bb.AnnotationType):'.format(
class_name_for_annotation_type(annotation_type, ns)))
with self.indent():
if annotation_type.has_documented_type_or_params():
self.emit('"""')
if annotation_type.doc:
self.emit_wrapped_text(
self.process_doc(annotation_type.doc, self._docf))
if annotation_type.has_documented_params():
self.emit()
for param in annotation_type.params:
if not param.doc:
continue
self.emit_wrapped_text(':ivar {}: {}'.format(
fmt_var(param.name, True),
self.process_doc(param.doc, self._docf)),
subsequent_prefix=' ')
self.emit('"""')
self.emit()
self._generate_annotation_type_class_slots(annotation_type)
self._generate_annotation_type_class_init(ns, annotation_type)
self._generate_annotation_type_class_properties(ns, annotation_type)
self.emit()
|
Defines a Python class that represents an annotation type in Stone.
|
def G(self, T):
"""Calculate the heat capacity of the compound phase at the specified
temperature.
:param T: [K] temperature
:returns: [J/mol] The Gibbs free energy of the compound phase.
"""
h = self.DHref
s = self.Sref
for Tmax in sorted([float(TT) for TT in self._Cp_records.keys()]):
h = h + self._Cp_records[str(Tmax)].H(T)
s = s + self._Cp_records[str(Tmax)].S(T)
if T <= Tmax:
return h - T * s + self.G_mag(T)
# Extrapolate beyond the upper limit by using a constant heat capacity.
Tmax = max([float(TT) for TT in self._Cp_records.keys()])
h = h + self.Cp(Tmax)*(T - Tmax)
s = s + self.Cp(Tmax)*math.log(T / Tmax)
return h - T * s + self.G_mag(T)
|
Calculate the heat capacity of the compound phase at the specified
temperature.
:param T: [K] temperature
:returns: [J/mol] The Gibbs free energy of the compound phase.
|
def create(ctx, name, description, tags, private, init):
"""Create a new project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon project create --name=cats-vs-dogs --description="Image Classification with DL"
```
"""
try:
tags = tags.split(',') if tags else None
project_dict = dict(name=name, description=description, is_public=not private, tags=tags)
project_config = ProjectConfig.from_dict(project_dict)
except ValidationError:
Printer.print_error('Project name should contain only alpha numerical, "-", and "_".')
sys.exit(1)
try:
_project = PolyaxonClient().project.create_project(project_config)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create project `{}`.'.format(name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Project `{}` was created successfully.".format(_project.name))
if init:
ctx.obj = {}
ctx.invoke(init_project, project=name)
|
Create a new project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon project create --name=cats-vs-dogs --description="Image Classification with DL"
```
|
def _write_source_data(self, sources):
"""
See src/jjk/measure3
"""
for i, source in enumerate(sources):
self._write_source(source)
|
See src/jjk/measure3
|
def join_pretty_tensors(tensors, output, join_function=None, name='join'):
"""Joins the list of pretty_tensors and sets head of output_pretty_tensor.
Args:
tensors: A sequence of Layers or SequentialLayerBuilders to join.
output: A pretty_tensor to set the head with the result.
join_function: A function to join the tensors, defaults to concat on the
last dimension.
name: A name that is used for the name_scope
Returns:
The result of calling with_tensor on output
Raises:
ValueError: if pretty_tensors is None or empty.
"""
if not tensors:
raise ValueError('pretty_tensors must be a non-empty sequence.')
with output.g.name_scope(name):
if join_function is None:
# Use depth concat
last_dim = len(tensors[0].shape) - 1
return output.with_tensor(tf.concat(tensors, last_dim))
else:
return output.with_tensor(join_function(tensors))
|
Joins the list of pretty_tensors and sets head of output_pretty_tensor.
Args:
tensors: A sequence of Layers or SequentialLayerBuilders to join.
output: A pretty_tensor to set the head with the result.
join_function: A function to join the tensors, defaults to concat on the
last dimension.
name: A name that is used for the name_scope
Returns:
The result of calling with_tensor on output
Raises:
ValueError: if pretty_tensors is None or empty.
|
def _add_error(self, *args, **kwargs): # type: () -> None
"""Convenience function to add an error to this object, with line numbers
An error title or description should not accidentally leak self._value, for privacy/redaction purposes.
:rtype: None
"""
if kwargs.get('node', None):
# if node specified and not none
error = ConfigError.create_from_yaml_node(
*args,
**kwargs
)
elif self._value_node:
# default to using the node if we have one
error = ConfigError.create_from_yaml_node(
node=self._value_node,
*args,
**kwargs
)
else:
# no nodes or error_obj to attach
error = ConfigError(*args, **kwargs)
self._errors.append(error)
|
Convenience function to add an error to this object, with line numbers
An error title or description should not accidentally leak self._value, for privacy/redaction purposes.
:rtype: None
|
def set_weather_from_metar(
metar: typing.Union[Metar.Metar, str],
in_file: typing.Union[str, Path],
out_file: typing.Union[str, Path] = None
) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]:
"""
Applies the weather from a METAR object to a MIZ file
Args:
metar: metar object
in_file: path to MIZ file
out_file: path to output MIZ file (will default to in_file)
Returns: tuple of error, success
"""
error, metar = custom_metar.CustomMetar.get_metar(metar)
if error:
return error, None
if metar:
LOGGER.debug('METAR: %s', metar.code)
in_file = elib.path.ensure_file(in_file)
if out_file is None:
out_file = in_file
else:
out_file = elib.path.ensure_file(out_file, must_exist=False)
LOGGER.debug('applying metar: %s -> %s', in_file, out_file)
try:
LOGGER.debug('building MissionWeather')
_mission_weather = mission_weather.MissionWeather(metar)
with Miz(str(in_file)) as miz:
_mission_weather.apply_to_miz(miz)
miz.zip(str(out_file))
return None, f'successfully applied METAR to {in_file}'
except ValueError:
error = f'Unable to apply METAR string to the mission.\n' \
f'This is most likely due to a freak value, this feature is still experimental.\n' \
f'I will fix it ASAP !'
return error, None
|
Applies the weather from a METAR object to a MIZ file
Args:
metar: metar object
in_file: path to MIZ file
out_file: path to output MIZ file (will default to in_file)
Returns: tuple of error, success
|
def SensorsTriggersNotificationsDelete(self, sensor_id, trigger_id, notification_id):
"""
Disconnect a notification from a sensor-trigger combination.
@param sensor_id (int) - Sensor id if the sensor-trigger combination.
@param trigger_id (int) - Trigger id of the sensor-trigger combination.
@param notification_id (int) - Notification id of the notification to disconnect.
@param (bool) - Boolean indicating whether SensorstriggersNotificationsDelete was successful.
"""
if self.__SenseApiCall__('/sensors/{0}/triggers/{1}/notifications/{2}.json'.format(sensor_id, trigger_id, notification_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False
|
Disconnect a notification from a sensor-trigger combination.
@param sensor_id (int) - Sensor id if the sensor-trigger combination.
@param trigger_id (int) - Trigger id of the sensor-trigger combination.
@param notification_id (int) - Notification id of the notification to disconnect.
@param (bool) - Boolean indicating whether SensorstriggersNotificationsDelete was successful.
|
def write_users(dburl):
"""Write users to the DB."""
data = {
'username': 'admin',
'realname': 'Website Administrator',
'email': 'coil@example.com',
'password':
r'$bcrypt-sha256$2a,12$NNtd2TC9mZO6.EvLwEwlLO$axojD34/iE8x'
r'QitQnCCOGPhofgmjNdq',
}
for p in PERMISSIONS:
data[p] = '1'
db = redis.StrictRedis.from_url(dburl)
db.hmset('user:1', data)
db.hset('users', 'admin', '1')
if not db.exists('last_uid'):
db.incr('last_uid')
print("Username: admin")
print("Password: admin")
return 0
|
Write users to the DB.
|
def to_dict(self):
"""Return a dictionary containing Atom data."""
data = {'aid': self.aid, 'number': self.number, 'element': self.element}
for coord in {'x', 'y', 'z'}:
if getattr(self, coord) is not None:
data[coord] = getattr(self, coord)
if self.charge is not 0:
data['charge'] = self.charge
return data
|
Return a dictionary containing Atom data.
|
def _disappeared(self, fd, path, **params):
"""
Called when an open path is no longer acessible. This will either
move the path to pending (if the 'missing' param is set for the
file), or fire an exception.
"""
log = self._getparam('log', self._discard, **params)
log.debug("Path %r removed or renamed, handling removal", path)
self._close(fd)
if self._mode == WF_POLLING and fd in self._poll_stat:
del self._poll_stat[fd]
if self._mode == WF_INOTIFYX and path in self._inx_inode:
del self._inx_inode[path]
del self.fds_open[fd]
del self.paths_open[path]
if self.paths[path]:
try:
if self._add_file(path, **params):
log.debug("Path %r immediately reappeared, pending transition skipped", path)
return
except Exception as e:
log.debug("Path %r reappearance check failed -- %s", path, e)
log.debug("Path %r marked as pending", path)
self.paths_pending[path] = True
else:
del self.paths[path]
raise Exception("Path %r has been removed or renamed" % path)
|
Called when an open path is no longer acessible. This will either
move the path to pending (if the 'missing' param is set for the
file), or fire an exception.
|
def set_timestamp_to_current(self):
"""
Set timestamp to current time utc
:rtype: None
"""
# Good form to add tzinfo
self.timestamp = pytz.UTC.localize(datetime.datetime.utcnow())
|
Set timestamp to current time utc
:rtype: None
|
def seat_button_count(self):
"""The total number of buttons pressed on all devices on
the associated seat after the the event was triggered.
For events that are not of type
:attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
int: The seat wide pressed button count for the key of this event.
"""
if self.type != EventType.TABLET_TOOL_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_tool_get_seat_button_count(
self._handle)
|
The total number of buttons pressed on all devices on
the associated seat after the the event was triggered.
For events that are not of type
:attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
int: The seat wide pressed button count for the key of this event.
|
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
self.pipeline[self.pipe_names.index(name)] = (name, component)
|
Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
|
def sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options):
"""Creates a generator to send SNMP notification.
When iterator gets advanced by :py:mod:`asyncio` main loop,
SNMP TRAP or INFORM notification is send (:RFC:`1905#section-4.2.6`).
The iterator yields :py:class:`asyncio.Future` which gets done whenever
response arrives or error occurs.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asynio-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData`
Class instance representing SNMPv1/v2c credentials.
transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncio.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.v1arch.asyncio.Udp6TransportTarget` Class instance representing
transport type along with SNMP peer address.
notifyType : str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`,
unless :py:class:`~pysnmp.smi.rfc1902.ObjectType` or
:py:class:`~pysnmp.smi.rfc1902.NotificationType` is present
among `varBinds` in which case `lookupMib` gets automatically
enabled.
Yields
------
errorIndication: str
True value indicates SNMP engine error.
errorStatus: str
True value indicates SNMP PDU error.
errorIndex: int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds: tuple
A sequence of OID-value pairs in form of base SNMP types (if
`lookupMib` is `False`) or :py:class:`~pysnmp.smi.rfc1902.ObjectType`
class instances (if `lookupMib` is `True`) representing MIB variables
returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> import asyncio
>>> from pysnmp.hlapi.asyncio import *
>>>
>>> @asyncio.coroutine
... def run():
... errorIndication, errorStatus, errorIndex, varBinds = yield from sendNotification(
... SnmpDispatcher(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 162)),
... 'trap',
... NotificationType(ObjectIdentity('IF-MIB', 'linkDown')))
... print(errorIndication, errorStatus, errorIndex, varBinds)
...
>>> asyncio.get_event_loop().run_until_complete(run())
(None, 0, 0, [])
>>>
"""
sysUpTime = v2c.apiTrapPDU.sysUpTime
snmpTrapOID = v2c.apiTrapPDU.snmpTrapOID
def _ensureVarBinds(varBinds):
# Add sysUpTime if not present already
if not varBinds or varBinds[0][0] != sysUpTime:
varBinds.insert(0, (v2c.ObjectIdentifier(sysUpTime), v2c.TimeTicks(0)))
# Search for and reposition sysUpTime if it's elsewhere
for idx, varBind in enumerate(varBinds[1:]):
if varBind[0] == sysUpTime:
varBinds[0] = varBind
del varBinds[idx + 1]
break
if len(varBinds) < 2:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
# Search for and reposition snmpTrapOID if it's elsewhere
for idx, varBind in enumerate(varBinds[2:]):
if varBind[0] == snmpTrapOID:
del varBinds[idx + 2]
if varBinds[1][0] == snmpTrapOID:
varBinds[1] = varBind
else:
varBinds.insert(1, varBind)
break
# Fail on missing snmpTrapOID
if varBinds[1][0] != snmpTrapOID:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
return varBinds
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if future.cancelled():
return
errorStatus = v2c.apiTrapPDU.getErrorStatus(rspPdu)
errorIndex = v2c.apiTrapPDU.getErrorIndex(rspPdu)
varBinds = v2c.apiTrapPDU.getVarBinds(rspPdu)
try:
varBindsUnmade = VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, varBinds,
lookupMib)
except Exception as e:
future.set_exception(e)
else:
future.set_result(
(errorIndication, errorStatus, errorIndex, varBindsUnmade)
)
lookupMib = options.get('lookupMib')
if not lookupMib and any(isinstance(x, (NotificationType, ObjectType))
for x in varBinds):
lookupMib = True
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
if notifyType == 'trap':
reqPdu = v2c.TrapPDU()
else:
reqPdu = v2c.InformRequestPDU()
v2c.apiTrapPDU.setDefaults(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, varBinds)
varBinds = v2c.apiTrapPDU.getVarBinds(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(varBinds))
if authData.mpModel == 0:
reqPdu = rfc2576.v2ToV1(reqPdu)
future = asyncio.Future()
snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
if notifyType == 'trap':
def __trapFun(future):
if future.cancelled():
return
future.set_result((None, 0, 0, []))
loop = asyncio.get_event_loop()
loop.call_soon(__trapFun, future)
return future
|
Creates a generator to send SNMP notification.
When iterator gets advanced by :py:mod:`asyncio` main loop,
SNMP TRAP or INFORM notification is send (:RFC:`1905#section-4.2.6`).
The iterator yields :py:class:`asyncio.Future` which gets done whenever
response arrives or error occurs.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asynio-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData`
Class instance representing SNMPv1/v2c credentials.
transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncio.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.v1arch.asyncio.Udp6TransportTarget` Class instance representing
transport type along with SNMP peer address.
notifyType : str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`,
unless :py:class:`~pysnmp.smi.rfc1902.ObjectType` or
:py:class:`~pysnmp.smi.rfc1902.NotificationType` is present
among `varBinds` in which case `lookupMib` gets automatically
enabled.
Yields
------
errorIndication: str
True value indicates SNMP engine error.
errorStatus: str
True value indicates SNMP PDU error.
errorIndex: int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds: tuple
A sequence of OID-value pairs in form of base SNMP types (if
`lookupMib` is `False`) or :py:class:`~pysnmp.smi.rfc1902.ObjectType`
class instances (if `lookupMib` is `True`) representing MIB variables
returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> import asyncio
>>> from pysnmp.hlapi.asyncio import *
>>>
>>> @asyncio.coroutine
... def run():
... errorIndication, errorStatus, errorIndex, varBinds = yield from sendNotification(
... SnmpDispatcher(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 162)),
... 'trap',
... NotificationType(ObjectIdentity('IF-MIB', 'linkDown')))
... print(errorIndication, errorStatus, errorIndex, varBinds)
...
>>> asyncio.get_event_loop().run_until_complete(run())
(None, 0, 0, [])
>>>
|
def swo_set_emu_buffer_size(self, buf_size):
"""Sets the size of the buffer used by the J-Link to collect SWO data.
Args:
self (JLink): the ``JLink`` instance
buf_size (int): the new size of the emulator buffer
Returns:
``None``
Raises:
JLinkException: on error
"""
buf = ctypes.c_uint32(buf_size)
res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_EMU,
ctypes.byref(buf))
if res < 0:
raise errors.JLinkException(res)
return None
|
Sets the size of the buffer used by the J-Link to collect SWO data.
Args:
self (JLink): the ``JLink`` instance
buf_size (int): the new size of the emulator buffer
Returns:
``None``
Raises:
JLinkException: on error
|
async def fetch(self, *args, timeout=None):
r"""Execute the statement and return a list of :class:`Record` objects.
:param str query: Query text
:param args: Query arguments
:param float timeout: Optional timeout value in seconds.
:return: A list of :class:`Record` instances.
"""
data = await self.__bind_execute(args, 0, timeout)
return data
|
r"""Execute the statement and return a list of :class:`Record` objects.
:param str query: Query text
:param args: Query arguments
:param float timeout: Optional timeout value in seconds.
:return: A list of :class:`Record` instances.
|
def _get_crawled_urls(self, handle, request):
"""
Main method where the crawler html content is parsed with
beautiful soup and out of the DOM, we get the urls
"""
try:
content = six.text_type(handle.open(request).read(), "utf-8",
errors="replace")
soup = BeautifulSoup(content, "html.parser")
tags = soup('a')
for tag in tqdm(tags):
href = tag.get("href")
if href is not None:
url = urllib.parse.urljoin(self.url, escape(href))
if url not in self:
self.urls.append(url)
except urllib.request.HTTPError as error:
if error.code == 404:
logger.warning("ERROR: %s -> %s for %s" % (error, error.url, self.url))
else:
logger.warning("ERROR: %s for %s" % (error, self.url))
except urllib.request.URLError as error:
logger.warning("ERROR: %s for %s" % (error, self.url))
raise urllib.request.URLError("URL entered is Incorrect")
|
Main method where the crawler html content is parsed with
beautiful soup and out of the DOM, we get the urls
|
def get_account_db_class(cls) -> Type[BaseAccountDB]:
"""
Return the :class:`~eth.db.account.BaseAccountDB` class that the
state class uses.
"""
if cls.account_db_class is None:
raise AttributeError("No account_db_class set for {0}".format(cls.__name__))
return cls.account_db_class
|
Return the :class:`~eth.db.account.BaseAccountDB` class that the
state class uses.
|
def encodeValue(value):
"""
TODO
"""
if isinstance(value, (list, tuple)):
return [common.AttributeValue(string_value=str(v)) for v in value]
else:
return [common.AttributeValue(string_value=str(value))]
|
TODO
|
def breadth_first(problem, graph_search=False, viewer=None):
'''
Breadth first search.
If graph_search=True, will avoid exploring repeated states.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.is_goal.
'''
return _search(problem,
FifoList(),
graph_search=graph_search,
viewer=viewer)
|
Breadth first search.
If graph_search=True, will avoid exploring repeated states.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.is_goal.
|
def convert_contentbody_to_new_type(self, content_data, old_representation, new_representation, callback=None):
"""
Converts between content body representations.
Not all representations can be converted to/from other formats. Supported conversions:
Source Representation | Destination Representation Supported
--------------------------------------------------------------
"storage" | "view","export_view","editor"
"editor" | "storage"
"view" | None
"export_view" | None
:param content_data (string): The content data to transform.
:param old_representation (string): The representation to convert from.
:param new_representation (string): The representation to convert to.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the contentbody/convert/{to} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
assert {old_representation, new_representation} < {"storage", "editor", "view", "export_view"}
# TODO: Enforce conversion rules better here.
request_data = {"value": str(content_data), "representation": old_representation}
return self._service_post_request("rest/api/contentbody/convert/{to}".format(to=new_representation),
data=json.dumps(request_data),
headers={"Content-Type": "application/json"}, callback=callback)
|
Converts between content body representations.
Not all representations can be converted to/from other formats. Supported conversions:
Source Representation | Destination Representation Supported
--------------------------------------------------------------
"storage" | "view","export_view","editor"
"editor" | "storage"
"view" | None
"export_view" | None
:param content_data (string): The content data to transform.
:param old_representation (string): The representation to convert from.
:param new_representation (string): The representation to convert to.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the contentbody/convert/{to} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
|
def type_to_string(f, map_types):
"""
Convert type info to pretty names, based on numbers from from FieldDescriptorProto
https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor.pb
"""
if f.type in [1]:
return "double"
elif f.type in [2]:
return "float"
elif f.type in [3]:
return "long"
elif f.type in [4]:
return "uint64"
elif f.type in [5]:
return "integer"
elif f.type in [6]:
return "fixed64"
elif f.type in [7]:
return "fixed32"
elif f.type in [8]:
return "boolean"
elif f.type in [9]:
return "string"
# missing type 10 - Group
elif f.type in [11, 14]:
ref_name = f.ref_type
if ref_name in map_types:
ref_fields = map_types[ref_name]
return {
"type": "map",
"key": " %s "% type_to_string(ref_fields["key"], map_types),
"value": " %s "% type_to_string(ref_fields["value"], map_types)
}
else:
kind = ":protobuf:message:`%s`" % simplify_name(f.ref_type)
if f.label == 3: # LABEL_REPEATED
return "list of " + kind
else:
return kind
elif f.type in [12]:
return "bytes"
elif f.type in [13]:
return "uint32"
elif f.type in [15]:
return "sfixed32"
elif f.type in [16]:
return "sfixed64"
elif f.type in [17]:
return "sint32"
elif f.type in [18]:
return "sint64"
else:
raise Exception, f.type
|
Convert type info to pretty names, based on numbers from from FieldDescriptorProto
https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor.pb
|
def filter_labels(sent: Sequence[str], labels: Set[str] = None) -> List[str]:
""" Returns only the tokens present in the sentence that are in labels."""
if labels:
return [tok for tok in sent if tok in labels]
return list(sent)
|
Returns only the tokens present in the sentence that are in labels.
|
def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None,
S3ObjectVersion=None, Publish=False,
region=None, key=None, keyid=None, profile=None):
'''
Upload the given code to the named lambda function.
Returns {updated: true} if the function was updated and returns
{updated: False} if the function was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_function_code my_function ZipFile=function.zip
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if ZipFile:
if S3Bucket or S3Key or S3ObjectVersion:
raise SaltInvocationError('Either ZipFile must be specified, or '
'S3Bucket and S3Key must be provided.')
r = conn.update_function_code(FunctionName=FunctionName,
ZipFile=_filedata(ZipFile),
Publish=Publish)
else:
if not S3Bucket or not S3Key:
raise SaltInvocationError('Either ZipFile must be specified, or '
'S3Bucket and S3Key must be provided.')
args = {
'S3Bucket': S3Bucket,
'S3Key': S3Key,
}
if S3ObjectVersion:
args['S3ObjectVersion'] = S3ObjectVersion
r = conn.update_function_code(FunctionName=FunctionName,
Publish=Publish, **args)
if r:
keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
'CodeSize', 'Description', 'Timeout', 'MemorySize',
'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Function was not updated')
return {'updated': False}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
|
Upload the given code to the named lambda function.
Returns {updated: true} if the function was updated and returns
{updated: False} if the function was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_function_code my_function ZipFile=function.zip
|
def _validate_min(self, proposal):
"""Enforce min <= value <= max"""
min = proposal['value']
if min > self.max:
raise TraitError('Setting min > max')
if min > self.value:
self.value = min
return min
|
Enforce min <= value <= max
|
def wait_for_logs_matching(self, matcher, timeout=10, encoding='utf-8',
**logs_kwargs):
"""
Wait for logs matching the given matcher.
"""
wait_for_logs_matching(
self.inner(), matcher, timeout=timeout, encoding=encoding,
**logs_kwargs)
|
Wait for logs matching the given matcher.
|
def _to_chimera(M, N, L, q):
"Converts a qubit's linear index to chimera coordinates."
return (q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L)
|
Converts a qubit's linear index to chimera coordinates.
|
def from_credentials(cls: Type[SigningKeyType], salt: Union[str, bytes], password: Union[str, bytes],
scrypt_params: Optional[ScryptParams] = None) -> SigningKeyType:
"""
Create a SigningKey object from credentials
:param salt: Secret salt passphrase credential
:param password: Secret password credential
:param scrypt_params: ScryptParams instance
"""
if scrypt_params is None:
scrypt_params = ScryptParams()
salt = ensure_bytes(salt)
password = ensure_bytes(password)
seed = scrypt(password, salt, scrypt_params.N, scrypt_params.r, scrypt_params.p, scrypt_params.seed_length)
return cls(seed)
|
Create a SigningKey object from credentials
:param salt: Secret salt passphrase credential
:param password: Secret password credential
:param scrypt_params: ScryptParams instance
|
def _write_entries(self, stream, entries, converter, properties=None):
"""Write iterable of entries as YAML object to stream.
Args:
stream: File-like object.
entries: Iterable of entries.
converter: Conversion function from entry to YAML object.
properties: Set of compartment properties to output (or None to
output all).
"""
def iter_entries():
for c in entries:
entry = converter(c)
if entry is None:
continue
if properties is not None:
entry = OrderedDict(
(key, value) for key, value in iteritems(entry)
if key == 'id' or key in properties)
yield entry
self._dump(stream, list(iter_entries()))
|
Write iterable of entries as YAML object to stream.
Args:
stream: File-like object.
entries: Iterable of entries.
converter: Conversion function from entry to YAML object.
properties: Set of compartment properties to output (or None to
output all).
|
async def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position
according to mode . Same as :meth:`Cursor.scroll`, but move cursor
on server side one by one row. If you want to move 20 rows forward
scroll will make 20 queries to move cursor. Currently only forward
scrolling is supported.
:param int value: move cursor to next position according to mode.
:param str mode: scroll mode, possible modes: `relative` and `absolute`
"""
self._check_executed()
if mode == 'relative':
if value < 0:
raise NotSupportedError("Backwards scrolling not supported "
"by this cursor")
for _ in range(value):
await self._read_next()
self._rownumber += value
elif mode == 'absolute':
if value < self._rownumber:
raise NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self._rownumber
for _ in range(end):
await self._read_next()
self._rownumber = value
else:
raise ProgrammingError("unknown scroll mode %s" % mode)
|
Scroll the cursor in the result set to a new position
according to mode . Same as :meth:`Cursor.scroll`, but move cursor
on server side one by one row. If you want to move 20 rows forward
scroll will make 20 queries to move cursor. Currently only forward
scrolling is supported.
:param int value: move cursor to next position according to mode.
:param str mode: scroll mode, possible modes: `relative` and `absolute`
|
def var_added(self, v):
"""
var was added in the bot while it ran, possibly
by livecoding
:param v:
:return:
"""
self.add_variable(v)
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all()
|
var was added in the bot while it ran, possibly
by livecoding
:param v:
:return:
|
def mmGetCellActivityPlot(self, title="", showReset=False,
resetShading=0.25, activityType="activeCells"):
"""
Returns plot of the cell activity.
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a gray background
@param resetShading (float) if showReset is true, this float specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@param activityType (string) The type of cell activity to display. Valid
types include "activeCells"
@return (Plot) plot
"""
cellTrace = copy.deepcopy(self._mmTraces[activityType].data)
for i in xrange(len(cellTrace)):
cellTrace[i] = self.getCellIndices(cellTrace[i])
return self.mmGetCellTracePlot(cellTrace, self.numberOfCells(),
activityType, title, showReset,
resetShading)
|
Returns plot of the cell activity.
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a gray background
@param resetShading (float) if showReset is true, this float specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@param activityType (string) The type of cell activity to display. Valid
types include "activeCells"
@return (Plot) plot
|
def get_matching_indexes(self, possible_hash, possible_range):
"""
Get all indexes that could be queried on using a set of keys.
If any indexes match both hash AND range keys, indexes that only match
the hash key will be excluded from the result.
Parameters
----------
possible_hash : set
The names of fields that could be used as the hash key
possible_range : set
The names of fields that could be used as the range key
"""
matches = [
index
for index in self.iter_query_indexes()
if index.hash_key in possible_hash
]
range_matches = [
index for index in matches if index.range_key in possible_range
]
if range_matches:
return range_matches
return matches
|
Get all indexes that could be queried on using a set of keys.
If any indexes match both hash AND range keys, indexes that only match
the hash key will be excluded from the result.
Parameters
----------
possible_hash : set
The names of fields that could be used as the hash key
possible_range : set
The names of fields that could be used as the range key
|
def do_transform(self):
"""Apply the transformation (if it exists) to the latest_value"""
if not self.transform:
return
try:
self.latest_value = utils.Transform(
expr=self.transform, value=self.latest_value,
timedelta=self.time_between_updates().total_seconds()).result()
except (TypeError, ValueError):
logger.warn("Invalid transformation '%s' for metric %s",
self.transfrom, self.pk)
self.transform = ''
|
Apply the transformation (if it exists) to the latest_value
|
def links(xmrs):
"""Return the list of Links for the *xmrs*."""
# Links exist for every non-intrinsic argument that has a variable
# that is the intrinsic variable of some other predicate, as well
# as for label equalities when no argument link exists (even
# considering transitivity).
links = []
prelinks = []
_eps = xmrs._eps
_hcons = xmrs._hcons
_vars = xmrs._vars
lsh = xmrs.labelset_heads
lblheads = {v: lsh(v) for v, vd in _vars.items() if 'LBL' in vd['refs']}
top = xmrs.top
if top is not None:
prelinks.append((0, top, None, top, _vars[top]))
for nid, ep in _eps.items():
for role, val in ep[3].items():
if role == IVARG_ROLE or val not in _vars:
continue
prelinks.append((nid, ep[2], role, val, _vars[val]))
for src, srclbl, role, val, vd in prelinks:
if IVARG_ROLE in vd['refs']:
tgtnids = [n for n in vd['refs'][IVARG_ROLE]
if not _eps[n].is_quantifier()]
if len(tgtnids) == 0:
continue # maybe some bad MRS with a lonely quantifier
tgt = tgtnids[0] # what do we do if len > 1?
tgtlbl = _eps[tgt][2]
post = EQ_POST if srclbl == tgtlbl else NEQ_POST
elif val in _hcons:
lbl = _hcons[val][2]
if lbl not in lblheads or len(lblheads[lbl]) == 0:
continue # broken MRS; log this?
tgt = lblheads[lbl][0] # sorted list; first item is most "heady"
post = H_POST
elif 'LBL' in vd['refs']:
if val not in lblheads or len(lblheads[val]) == 0:
continue # broken MRS; log this?
tgt = lblheads[val][0] # again, should be sorted already
post = HEQ_POST
else:
continue # CARGs, maybe?
links.append(Link(src, tgt, role, post))
# now EQ links unattested by arg links
for lbl, heads in lblheads.items():
# I'm pretty sure this does what we want
if len(heads) > 1:
first = heads[0]
for other in heads[1:]:
links.append(Link(other, first, BARE_EQ_ROLE, EQ_POST))
# If not, something like this is more explicit
# lblset = self.labelset(lbl)
# sg = g.subgraph(lblset)
# ns = [nid for nid, deg in sg.degree(lblset).items() if deg == 0]
# head = self.labelset_head(lbl)
# for n in ns:
# links.append(Link(head, n, post=EQ_POST))
def _int(x):
try:
return int(x)
except ValueError:
return 0
return sorted(
links,
key=lambda link: (_int(link.start), _int(link.end), link.rargname)
)
|
Return the list of Links for the *xmrs*.
|
def brightness_prob(self, clip=True):
"""The brightest water may have Band 5 reflectance
as high as LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF.11
Equation 10 (Zhu and Woodcock, 2012)
Parameters
----------
nir: ndarray
clip: boolean
Output
------
ndarray:
brightness probability, constrained LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF..1
"""
thresh = 0.11
bp = np.minimum(thresh, self.nir) / thresh
if clip:
bp[bp > 1] = 1
bp[bp < 0] = 0
return bp
|
The brightest water may have Band 5 reflectance
as high as LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF.11
Equation 10 (Zhu and Woodcock, 2012)
Parameters
----------
nir: ndarray
clip: boolean
Output
------
ndarray:
brightness probability, constrained LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF..1
|
def put( self, path_or_tuple, folder_id='me/skydrive',
overwrite=None, downsize=None, bits_api_fallback=True ):
'''Upload a file (object), possibly overwriting (default behavior)
a file with the same "name" attribute, if it exists.
First argument can be either path to a local file or tuple
of "(name, file)", where "file" can be either a file-like object
or just a string of bytes.
overwrite option can be set to False to allow two identically-named
files or "ChooseNewName" to let OneDrive derive some similar
unique name. Behavior of this option mimics underlying API.
downsize is a true/false API flag, similar to overwrite.
bits_api_fallback can be either True/False or an integer (number of
bytes), and determines whether method will fall back to using BITS API
(as implemented by "put_bits" method) for large files. Default "True"
(bool) value will use non-BITS file size limit (api_put_max_bytes, ~100 MiB)
as a fallback threshold, passing False will force using single-request uploads.'''
api_overwrite = self._translate_api_flag(overwrite, 'overwrite', ['ChooseNewName'])
api_downsize = self._translate_api_flag(downsize, 'downsize')
name, src = self._process_upload_source(path_or_tuple)
if not isinstance(bits_api_fallback, (int, float, long)):
bits_api_fallback = bool(bits_api_fallback)
if bits_api_fallback is not False:
if bits_api_fallback is True: bits_api_fallback = self.api_put_max_bytes
src.seek(0, os.SEEK_END)
if src.tell() >= bits_api_fallback:
if bits_api_fallback > 0: # not really a "fallback" in this case
log.info(
'Falling-back to using BITS API due to file size (%.1f MiB > %.1f MiB)',
*((float(v) / 2**20) for v in [src.tell(), bits_api_fallback]) )
if overwrite is not None and api_overwrite != 'true':
raise NoAPISupportError( 'Passed "overwrite" flag (value: {!r})'
' is not supported by the BITS API (always "true" there)'.format(overwrite) )
if downsize is not None:
log.info( 'Passed "downsize" flag (value: %r) will not'
' be used with BITS API, as it is not supported there', downsize )
file_id = self.put_bits(path_or_tuple, folder_id=folder_id) # XXX: overwrite/downsize
return self.info(file_id)
# PUT seem to have better support for unicode
# filenames and is recommended in the API docs, see #19.
# return self( self._api_url_join(folder_id, 'files'),
# dict(overwrite=api_overwrite, downsize_photo_uploads=api_downsize),
# method='post', files=dict(file=(name, src)) )
return self( self._api_url_join(folder_id, 'files', name),
dict(overwrite=api_overwrite, downsize_photo_uploads=api_downsize),
data=src, method='put', auth_header=True )
|
Upload a file (object), possibly overwriting (default behavior)
a file with the same "name" attribute, if it exists.
First argument can be either path to a local file or tuple
of "(name, file)", where "file" can be either a file-like object
or just a string of bytes.
overwrite option can be set to False to allow two identically-named
files or "ChooseNewName" to let OneDrive derive some similar
unique name. Behavior of this option mimics underlying API.
downsize is a true/false API flag, similar to overwrite.
bits_api_fallback can be either True/False or an integer (number of
bytes), and determines whether method will fall back to using BITS API
(as implemented by "put_bits" method) for large files. Default "True"
(bool) value will use non-BITS file size limit (api_put_max_bytes, ~100 MiB)
as a fallback threshold, passing False will force using single-request uploads.
|
def export_image3d(input, output, size=(800, 600), pcb_rotate=(0, 0, 0), timeout=20, showgui=False):
'''
Exporting eagle .brd file into 3D image file
using Eagle3D and povray.
GUI is not displayed if ``pyvirtualdisplay`` is installed.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
:param input: eagle .brd file name
:param output: image file name (.png)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param size: tuple(width, size), image size
:rtype: None
'''
input = norm_path(input)
output = norm_path(output)
ext = os.path.splitext(input)[1]
if ext not in ['.brd']:
raise ValueError('Input extension is not ".brd", brd=' + str(input))
commands = []
eagle3d = Path(__file__).dirname() / 'eagle3d'
ulp = (eagle3d / '3d50.ulp').abspath()
commands += ['RUN ' + ulp]
commands += ['QUIT']
def render(dir, f):
# povray has strange file access policy,
# better to generate under tmp
# cli doc:
# http://library.thinkquest.org/3285/language/cmdln.html
templ = '#local pcb_rotate_%s = %s'
pov = Path(f.replace('.brd', '.pov'))
if pcb_rotate != (0, 0, 0):
s = pov.bytes()
s = s.replace(templ % ('x', 0), templ % ('x', pcb_rotate[0]))
s = s.replace(templ % ('y', 0), templ % ('y', pcb_rotate[1]))
s = s.replace(templ % ('z', 0), templ % ('z', pcb_rotate[2]))
pov.write_bytes(s)
fpng = Path(f.replace('.brd', '.png'))
cmd = []
cmd += ["povray"]
cmd += ["-d"] # no display
cmd += ["-a"] # anti-aliasing
cmd += ['+W' + str(size[0])] # width
cmd += ['+H' + str(size[1])] # height
cmd += ['-o' + fpng]
cmd += ['-L' + eagle3d]
cmd += [pov]
p = Proc(cmd).call()
if not fpng.exists():
raise EagleError('povray error, proc=%s' % p)
fpng.copy(output)
command_eagle(input=input, timeout=timeout, commands=commands,
showgui=showgui, callback=render)
|
Exporting eagle .brd file into 3D image file
using Eagle3D and povray.
GUI is not displayed if ``pyvirtualdisplay`` is installed.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
:param input: eagle .brd file name
:param output: image file name (.png)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param size: tuple(width, size), image size
:rtype: None
|
def stdout_avail(self):
"""Data is available in stdout, let's empty the queue and write it!"""
data = self.interpreter.stdout_write.empty_queue()
if data:
self.write(data)
|
Data is available in stdout, let's empty the queue and write it!
|
def removeOutliers(points, radius):
"""
Remove outliers from a cloud of points within the specified `radius` search.
.. hint:: |clustering| |clustering.py|_
"""
isactor = False
if isinstance(points, vtk.vtkActor):
isactor = True
poly = points.GetMapper().GetInput()
else:
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(points))
src.Update()
vpts = src.GetOutput().GetPoints()
for i, p in enumerate(points):
vpts.SetPoint(i, p)
poly = src.GetOutput()
removal = vtk.vtkRadiusOutlierRemoval()
removal.SetInputData(poly)
removal.SetRadius(radius)
removal.SetNumberOfNeighbors(5)
removal.GenerateOutliersOff()
removal.Update()
rpoly = removal.GetOutput()
print("# of removed outlier points: ",
removal.GetNumberOfPointsRemoved(), '/', poly.GetNumberOfPoints())
outpts = []
for i in range(rpoly.GetNumberOfPoints()):
outpts.append(list(rpoly.GetPoint(i)))
outpts = np.array(outpts)
if not isactor:
return outpts
actor = vs.Points(outpts)
return actor
|
Remove outliers from a cloud of points within the specified `radius` search.
.. hint:: |clustering| |clustering.py|_
|
def get(self, key, env=None):
"""
Returns the config setting for the specified environment. If no
environment is specified, the value for the current environment is
returned. If an unknown key or environment is passed, None is returned.
"""
if env is None:
env = self.environment
try:
ret = self._settings[env][key]
except KeyError:
ret = None
if ret is None:
# See if it's set in the environment
if key == "identity_class":
# This is defined via the identity_type
env_var = self.env_dct.get("identity_type")
ityp = os.environ.get(env_var)
if ityp:
return _import_identity(ityp)
else:
env_var = self.env_dct.get(key)
if env_var is not None:
ret = os.environ.get(env_var)
return ret
|
Returns the config setting for the specified environment. If no
environment is specified, the value for the current environment is
returned. If an unknown key or environment is passed, None is returned.
|
async def strings(self, request: Optional['Request']=None) \
-> List[Tuple[Text, ...]]:
"""
For the given request, find the list of strings of that intent. If the
intent does not exist, it will raise a KeyError.
"""
if request:
locale = await request.get_locale()
else:
locale = None
return self.db.get(self.key, locale)
|
For the given request, find the list of strings of that intent. If the
intent does not exist, it will raise a KeyError.
|
def collect(self):
"""
Walks self.migration_home and load all potential migration modules
"""
for root, dirname, files in walk(self.migration_home):
for file_name in file_filter(files, "*.py"):
file_name = file_name.replace('.py', '')
file = None
try:
if file_name == '__init__':
continue
file, pathname, description = find_module(
file_name, [root])
load_module(file_name, file, pathname, description)
finally:
if file is not None:
file.close()
|
Walks self.migration_home and load all potential migration modules
|
def result(self,num):
"""result(N) -> return the result of job N."""
try:
return self.all[num].result
except KeyError:
error('Job #%s not found' % num)
|
result(N) -> return the result of job N.
|
def reporter(self):
"""
Creates the metadata report by pulling specific attributes from the metadata objects
"""
logging.info('Creating summary report')
header = '{}\n'.format(','.join(self.headers))
# Create a string to store all the results
data = str()
for sample in self.metadata:
# Add the value of the appropriate attribute to the results string
data += GenObject.returnattr(sample, 'name')
# SampleName
data += GenObject.returnattr(sample.run, 'SamplePlate')
# Genus
data += GenObject.returnattr(sample.general, 'closestrefseqgenus')
# SequencingDate
data += GenObject.returnattr(sample.run, 'Date')
# Analyst
data += GenObject.returnattr(sample.run, 'InvestigatorName')
# SamplePurity
data += GenObject.returnattr(sample.confindr, 'contam_status')
# N50
n50 = GenObject.returnattr(sample.quality_features_polished, 'n50',
number=True)
if n50 != '-,':
data += n50
else:
data += '0,'
# NumContigs
data += GenObject.returnattr(sample.quality_features_polished, 'num_contigs',
number=True)
# TotalLength
data += GenObject.returnattr(sample.quality_features_polished, 'genome_length',
number=True)
# MeanInsertSize
data += GenObject.returnattr(sample.mapping, 'MeanInsertSize',
number=True)
# InsertSizeSTD
data += GenObject.returnattr(sample.mapping, 'StdInsertSize',
number=True)
# AverageCoverageDepth
data += GenObject.returnattr(sample.mapping, 'MeanCoveragedata',
number=True)
# CoverageDepthSTD
data += GenObject.returnattr(sample.mapping, 'StdCoveragedata',
number=True)
# PercentGC
data += GenObject.returnattr(sample.quality_features_polished, 'gc',
number=True)
# MASH_ReferenceGenome
data += GenObject.returnattr(sample.mash, 'closestrefseq')
# MASH_NumMatchingHashes
data += GenObject.returnattr(sample.mash, 'nummatches')
# 16S_result
data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')
# rMLST_Result
try:
# If the number of matches to the closest reference profile is 53, return the profile number
if sample.rmlst.matches == 53:
rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')
rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'
data += rmlst_seq_type
else:
# Otherwise the profile is set to new
data += 'new,'
except AttributeError:
data += 'new,'
# MLST_Result
try:
if sample.mlst.matches == 7:
data += GenObject.returnattr(sample.mlst, 'sequencetype')
else:
data += 'new,'
# # Create a set of all the genes present in the results (gene name split from allele)
# mlst_gene_set = {gene.split('_')[0] for gene in sample.mlst.results}
# # If there are all the genes present, but no perfect match to a reference profile, state that
# # the profile is new
# if len(mlst_gene_set) == 7:
# data += 'new,'
# # Otherwise indicate that the profile is ND
# else:
# data += 'ND,'
except AttributeError:
data += 'new,'
# MLST_gene_X_alleles
try:
# Create a set of all the genes present in the results (gene name split from allele)
gene_set = {gene.split('_')[0] for gene in sample.mlst.results}
for gene in sorted(gene_set):
allele_list = list()
# Determine all the alleles that are present for each gene
for allele in sample.mlst.results:
if gene in allele:
allele_list.append(allele)
# If there is more than one allele in the sample, add both to the string separated by a ';'
if len(allele_list) > 1:
data += '{},'.format(';'.join(allele_list))
# Otherwise add the only allele
else:
data += allele_list[0] + ','
# If there are fewer than seven matching alleles, add a ND for each missing result
if len(gene_set) < 7:
data += (7 - len(gene_set)) * 'ND,'
except AttributeError:
# data += '-,-,-,-,-,-,-,'
data += 'ND,ND,ND,ND,ND,ND,ND,'
# CoreGenesPresent
data += GenObject.returnattr(sample.coregenome, 'coreresults')
# E_coli_Serotype
try:
# If no O-type was found, set the output to be O-untypeable
if ';'.join(sample.serosippr.o_set) == '-':
otype = 'O-untypeable'
else:
otype = '{oset} ({opid})'.format(oset=';'.join(sample.serosippr.o_set),
opid=sample.serosippr.best_o_pid)
# Same as above for the H-type
if ';'.join(sample.serosippr.h_set) == '-':
htype = 'H-untypeable'
else:
htype = '{hset} ({hpid})'.format(hset=';'.join(sample.serosippr.h_set),
hpid=sample.serosippr.best_h_pid)
serotype = '{otype}:{htype},'.format(otype=otype,
htype=htype)
# Add the serotype to the data string unless neither O-type not H-type were found; add ND instead
data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'
except AttributeError:
data += 'ND,'
# SISTR_serovar_antigen
data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')
# SISTR_serovar_cgMLST
data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')
# SISTR_serogroup
data += GenObject.returnattr(sample.sistr, 'serogroup')
# SISTR_h1
data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')
# SISTR_h2
data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')
# SISTR_serovar
data += GenObject.returnattr(sample.sistr, 'serovar')
# GeneSeekr_Profile
try:
if sample.genesippr.report_output:
data += ';'.join(sample.genesippr.report_output) + ','
else:
data += 'ND,'
except AttributeError:
data += 'ND,'
# Vtyper_Profile
data += GenObject.returnattr(sample.legacy_vtyper, 'toxinprofile')
# AMR_Profile and resistant/sensitive status
if sample.resfinder_assembled.pipelineresults:
# Profile
for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):
data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),
r_set=';'.join(sorted(list(resistance_set))))
data += ','
# Resistant/Sensitive
data += 'Resistant,'
else:
# Profile
data += 'ND,'
# Resistant/Sensitive
data += 'Sensitive,'
# Plasmid Result'
if sample.mobrecon.pipelineresults:
for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):
data += '{plasmid}({details});'.format(plasmid=plasmid,
details=details)
data += ','
else:
data += 'ND,'
# TotalPredictedGenes
data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',
number=True)
# PredictedGenesOver3000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',
number=True)
# PredictedGenesOver1000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',
number=True)
# PredictedGenesOver500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',
number=True)
# PredictedGenesUnder500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',
number=True)
# NumClustersPF
data += GenObject.returnattr(sample.run, 'NumberofClustersPF')
# Percent of reads mapping to PhiX control
data += GenObject.returnattr(sample.run, 'phix_aligned')
# Error rate calculated from PhiX control
data += GenObject.returnattr(sample.run, 'error_rate')
# LengthForwardRead
data += GenObject.returnattr(sample.run, 'forwardlength',
number=True)
# LengthReverseRead
data += GenObject.returnattr(sample.run, 'reverselength',
number=True)
# Real time strain
data += GenObject.returnattr(sample.run, 'Description')
# Flowcell
data += GenObject.returnattr(sample.run, 'flowcell')
# MachineName
data += GenObject.returnattr(sample.run, 'instrument')
# PipelineVersion
data += self.commit + ','
# AssemblyDate
data += datetime.now().strftime('%Y-%m-%d')
# Append a new line to the end of the results for this sample
data += '\n'
# Replace any NA values with -
cleandata = data.replace('NA', 'ND')
with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:
metadatareport.write(header)
metadatareport.write(cleandata)
|
Creates the metadata report by pulling specific attributes from the metadata objects
|
def cfg_to_dot(self, filename):
"""
Export the function to a dot file
Args:
filename (str)
"""
with open(filename, 'w', encoding='utf8') as f:
f.write('digraph{\n')
for node in self.nodes:
f.write('{}[label="{}"];\n'.format(node.node_id, str(node)))
for son in node.sons:
f.write('{}->{};\n'.format(node.node_id, son.node_id))
f.write("}\n")
|
Export the function to a dot file
Args:
filename (str)
|
def _and_join(self, terms):
""" Joins terms using AND operator.
Args:
terms (list): terms to join
Examples:
self._and_join(['term1']) -> 'term1'
self._and_join(['term1', 'term2']) -> 'term1 AND term2'
self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3'
Returns:
str
"""
if len(terms) > 1:
return ' AND '.join([self._or_join(t) for t in terms])
else:
return self._or_join(terms[0])
|
Joins terms using AND operator.
Args:
terms (list): terms to join
Examples:
self._and_join(['term1']) -> 'term1'
self._and_join(['term1', 'term2']) -> 'term1 AND term2'
self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3'
Returns:
str
|
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
"""
ll = loglevel.upper()
if ll == 'NONE':
return ''
else:
if "run_keyword_and_ignore_error" not in [check_error_ignored[3] for check_error_ignored in inspect.stack()]:
source = self._current_application().page_source
self._log(source, ll)
return source
else:
return ''
|
Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
|
def _population_load_script(work_bams, names, chrom, pairmode, items):
"""Prepare BAMs for assessing CNVs in a population.
"""
bed_file = _get_regional_bed_file(items[0])
if bed_file:
return _population_prep_targeted.format(bam_file_str=",".join(work_bams), names_str=",".join(names),
chrom=chrom, num_cores=0, pairmode=pairmode, bed_file=bed_file)
else:
return _population_prep.format(bam_file_str=",".join(work_bams), names_str=",".join(names),
chrom=chrom, num_cores=0, pairmode=pairmode)
|
Prepare BAMs for assessing CNVs in a population.
|
def _chain_future(source, dest):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (asyncio.Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(dest, (asyncio.Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
source_loop = source._loop if isinstance(source, asyncio.Future) else None
dest_loop = dest._loop if isinstance(dest, asyncio.Future) else None
def _set_state(future, other):
if isinstance(future, asyncio.Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if dest_loop is None or dest_loop is source_loop:
_set_state(dest, source)
else:
dest_loop.call_soon_threadsafe(_set_state, dest, source)
dest.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
|
Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
|
def shutdown(self):
"""
Called by the server to commence a graceful shutdown.
"""
if self.cycle is None or self.cycle.response_complete:
self.transport.close()
else:
self.cycle.keep_alive = False
|
Called by the server to commence a graceful shutdown.
|
def vq_gating(x,
num_experts,
k,
bneck,
hparams=None,
name="vq_gating"):
"""VQ gating.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
k: an integer - number of experts per example
bneck: a bottleneck object
hparams: optional hparams
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.use_scales:
scales = tf.get_variable(
"scales", [num_experts],
tf.float32,
initializer=tf.ones_initializer())
scales = tf.nn.softmax(scales)
hparams.scales = scales
input_size = x.get_shape().as_list()[-1]
batch_size = common_layers.shape_list(x)[0]
if k > 1:
# first project into two dense layers, chop and discretize, and gate
# TODO(avaswani): Maybe scale the embeddings flowing out of the experts.
# We might want to do this to match the computation being done by topk
x = tf.layers.dense(x, input_size * k)
# x goes from [batch_size, input_size*k] to [batch_size*k, input_size]
x = tf.reshape(x, [batch_size * k, input_size])
inputs = tf.expand_dims(x, axis=1)
inputs = tf.expand_dims(inputs, axis=1)
# VQ hparams
hparams.z_size = int(math.log(num_experts, 2))
hparams.hidden_size = input_size
hparams.top_k = k
d = bneck.discrete_bottleneck(inputs)
centroids = None
exp_discrete = d["discrete"]
embed_lookup = d["embed"]
extra_loss = d["loss"]
if hparams.residual_centroids:
centroids = embed_lookup(exp_discrete) # gives the centroids
top_k_indices = tf.squeeze(exp_discrete, axis=1)
tf.summary.histogram("discrete_counts", top_k_indices)
# if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1]
# to [batch_size, k]
if k > 1:
top_k_indices = tf.reshape(top_k_indices, [batch_size, k])
# get the top k gates
top_k_gates = tf.ones([batch_size, k])
# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the
# positions corresponding to all but the top k experts per example.
gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,
num_experts)
# Compute count per expert from the gates.
# gates has shape [batch_size, num_experts]
# count per expert has shape [num_experts, 1]
count_per_expert = tf.reduce_sum(gates, axis=0)
if hparams.use_scales:
scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales)
extra_loss += scale_loss
if common_layers.should_generate_summaries():
tf.summary.histogram("vq_loss", extra_loss)
tf.summary.historgram("scale_loss", scale_loss)
return gates, extra_loss, centroids
|
VQ gating.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
k: an integer - number of experts per example
bneck: a bottleneck object
hparams: optional hparams
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
|
def _set_pg(self, v, load=False):
"""
Setter method for pg, mapped from YANG variable /rbridge_id/ag/pg (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pg() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("pgid",pg.pg, yang_name="pg", rest_name="pg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pgid', extensions={u'tailf-common': {u'info': u'Creates a new port group.', u'cli-full-command': None, u'callpoint': u'pg_callpoint', u'cli-mode-name': u'config-rbridge-id-ag-pg-$(pgid)'}}), is_container='list', yang_name="pg", rest_name="pg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Creates a new port group.', u'cli-full-command': None, u'callpoint': u'pg_callpoint', u'cli-mode-name': u'config-rbridge-id-ag-pg-$(pgid)'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pg must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("pgid",pg.pg, yang_name="pg", rest_name="pg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pgid', extensions={u'tailf-common': {u'info': u'Creates a new port group.', u'cli-full-command': None, u'callpoint': u'pg_callpoint', u'cli-mode-name': u'config-rbridge-id-ag-pg-$(pgid)'}}), is_container='list', yang_name="pg", rest_name="pg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Creates a new port group.', u'cli-full-command': None, u'callpoint': u'pg_callpoint', u'cli-mode-name': u'config-rbridge-id-ag-pg-$(pgid)'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='list', is_config=True)""",
})
self.__pg = t
if hasattr(self, '_set'):
self._set()
|
Setter method for pg, mapped from YANG variable /rbridge_id/ag/pg (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pg() directly.
|
def topology_mdtraj(traj):
'''Generate topology spec for the MolecularViewer from mdtraj.
:param mdtraj.Trajectory traj: the trajectory
:return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj.
'''
import mdtraj as md
top = {}
top['atom_types'] = [a.element.symbol for a in traj.topology.atoms]
top['atom_names'] = [a.name for a in traj.topology.atoms]
top['bonds'] = [(a.index, b.index) for a, b in traj.topology.bonds]
top['secondary_structure'] = md.compute_dssp(traj[0])[0]
top['residue_types'] = [r.name for r in traj.topology.residues ]
top['residue_indices'] = [ [a.index for a in r.atoms] for r in traj.topology.residues ]
return top
|
Generate topology spec for the MolecularViewer from mdtraj.
:param mdtraj.Trajectory traj: the trajectory
:return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj.
|
def bulk_history_create(self, objs, batch_size=None):
"""Bulk create the history for the objects specified by objs"""
historical_instances = [
self.model(
history_date=getattr(instance, "_history_date", now()),
history_user=getattr(instance, "_history_user", None),
history_change_reason=getattr(instance, "changeReason", ""),
history_type="+",
**{
field.attname: getattr(instance, field.attname)
for field in instance._meta.fields
if field.name not in self.model._history_excluded_fields
}
)
for instance in objs
]
return self.model.objects.bulk_create(
historical_instances, batch_size=batch_size
)
|
Bulk create the history for the objects specified by objs
|
def get_changed_files(self) -> List[str]:
"""Get the files changed on one git branch vs another.
Returns:
List[str]: File paths of changed files, relative to the git repo
root.
"""
out = shell_tools.output_of(
'git',
'diff',
'--name-only',
self.compare_commit_id,
self.actual_commit_id,
'--',
cwd=self.destination_directory)
return [e for e in out.split('\n') if e.strip()]
|
Get the files changed on one git branch vs another.
Returns:
List[str]: File paths of changed files, relative to the git repo
root.
|
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc.
conn_id unused here. Used in log"""
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers
|
Set response http info including headers, status, etc.
conn_id unused here. Used in log
|
def str_numerator(self):
"""Returns the numerator with formatting."""
if not self.undefined:
return None
unit_numerator, unit = self._unit_class(self.numerator).auto
formatter = '%d' if unit_numerator == self.numerator else '%0.2f'
numerator = locale.format(formatter, unit_numerator, grouping=True)
return '{0} {1}'.format(numerator, unit)
|
Returns the numerator with formatting.
|
def mount(name=None, **kwargs):
'''
Mounts ZFS file systems
name : string
name of the filesystem, having this set to None will mount all filesystems. (this is the default)
overlay : boolean
perform an overlay mount.
options : string
optional comma-separated list of mount options to use temporarily for
the duration of the mount.
.. versionadded:: 2016.3.0
.. versionchanged:: 2018.3.1
.. warning::
Passing '-a' as name is deprecated and will be removed in Sodium.
CLI Example:
.. code-block:: bash
salt '*' zfs.mount
salt '*' zfs.mount myzpool/mydataset
salt '*' zfs.mount myzpool/mydataset options=ro
'''
## Configure command
# NOTE: initialize the defaults
flags = []
opts = {}
# NOTE: set extra config from kwargs
if kwargs.get('overlay', False):
flags.append('-O')
if kwargs.get('options', False):
opts['-o'] = kwargs.get('options')
if name in [None, '-a']:
# NOTE: the new way to mount all filesystems is to have name
# set to ```None```. We still accept the old '-a' until
# Sodium. After Sodium we can update the if statement
# to ```if not name:```
if name == '-a':
salt.utils.versions.warn_until(
'Sodium',
'Passing \'-a\' as name is deprecated as of Salt 2019.2.0. This '
'warning will be removed in Salt Sodium. Please pass name as '
'\'None\' instead to mount all filesystems.')
flags.append('-a')
name = None
## Mount filesystem
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='mount',
flags=flags,
opts=opts,
target=name,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'mounted')
|
Mounts ZFS file systems
name : string
name of the filesystem, having this set to None will mount all filesystems. (this is the default)
overlay : boolean
perform an overlay mount.
options : string
optional comma-separated list of mount options to use temporarily for
the duration of the mount.
.. versionadded:: 2016.3.0
.. versionchanged:: 2018.3.1
.. warning::
Passing '-a' as name is deprecated and will be removed in Sodium.
CLI Example:
.. code-block:: bash
salt '*' zfs.mount
salt '*' zfs.mount myzpool/mydataset
salt '*' zfs.mount myzpool/mydataset options=ro
|
def _act(self, utterance: str) -> list:
"""Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
"""
if self.stateful:
utterance = [[utterance], [self.key]]
else:
utterance = [[utterance]]
agent_response: list = self.agent(*utterance)
return agent_response
|
Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
|
def find_by_tag(self, tag, params={}, **options):
"""Returns the compact task records for all tasks with the given tag.
Parameters
----------
tag : {Id} The tag in which to search for tasks.
[params] : {Object} Parameters for the request
"""
path = "/tags/%s/tasks" % (tag)
return self.client.get_collection(path, params, **options)
|
Returns the compact task records for all tasks with the given tag.
Parameters
----------
tag : {Id} The tag in which to search for tasks.
[params] : {Object} Parameters for the request
|
def urlvoid_check(name, api_key):
"""Checks URLVoid.com for info on a domain"""
if not is_fqdn(name):
return None
url = 'http://api.urlvoid.com/api1000/{key}/host/{name}'.format(key=api_key, name=name)
response = requests.get(url)
tree = ET.fromstring(response.text)
if tree.find('./detections/engines'):
return [e.text for e in tree.find('./detections/engines')]
else:
return None
|
Checks URLVoid.com for info on a domain
|
def _validate_readonly(self, readonly, field, value):
""" {'type': 'boolean'} """
if readonly:
if not self._is_normalized:
self._error(field, errors.READONLY_FIELD)
# If the document was normalized (and therefore already been
# checked for readonly fields), we still have to return True
# if an error was filed.
has_error = errors.READONLY_FIELD in \
self.document_error_tree.fetch_errors_from(
self.document_path + (field,))
if self._is_normalized and has_error:
self._drop_remaining_rules()
|
{'type': 'boolean'}
|
def remove_sub(self, sub):
"""
Remove all references to a specific Subject ID
:param sub: A Subject ID
"""
for _sid in self.get('sub2sid', sub):
self.remove('sid2sub', _sid, sub)
self.delete('sub2sid', sub)
|
Remove all references to a specific Subject ID
:param sub: A Subject ID
|
def convert_pronouns( mrf_lines ):
''' Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if '_P_' in line: # only consider lines containing pronoun analyses
for [pattern, replacement] in _pronConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
mrf_lines[i] = line
break
i += 1
return mrf_lines
|
Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another;
|
def K_value(P=None, Psat=None, phi_l=None, phi_g=None, gamma=None, Poynting=1):
r'''Calculates the equilibrium K-value assuming Raoult's law,
or an equation of state model, or an activity coefficient model,
or a combined equation of state-activity model.
The calculation procedure will use the most advanced approach with the
provided inputs:
* If `P`, `Psat`, `phi_l`, `phi_g`, and `gamma` are provided, use the
combined approach.
* If `P`, `Psat`, and `gamma` are provided, use the modified Raoult's
law.
* If `phi_l` and `phi_g` are provided, use the EOS only method.
* If `P` and `Psat` are provided, use Raoult's law.
Definitions:
.. math::
K_i=\frac{y_i}{x_i}
Raoult's law:
.. math::
K_i = \frac{P_{i}^{sat}}{P}
Activity coefficient, no EOS (modified Raoult's law):
.. math::
K_i = \frac{\gamma_i P_{i}^{sat}}{P}
Equation of state only:
.. math::
K_i = \frac{\phi_i^l}{\phi_i^v} = \frac{f_i^l}{f_i^v}
Combined approach (liquid reference fugacity coefficient is normally
calculated the saturation pressure for it as a pure species; vapor fugacity
coefficient calculated normally):
.. math::
K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l,ref}}{\phi_i^v P}
Combined approach, with Poynting Correction Factor (liquid molar volume in
the integral is for i as a pure species only):
.. math::
K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{
\int_{P_i^{sat}}^P V_i^l dP}{RT}\right]}{\phi_i^v P}
Parameters
----------
P : float
System pressure, optional
Psat : float
Vapor pressure of species i, [Pa]
phi_l : float
Fugacity coefficient of species i in the liquid phase, either
at the system conditions (EOS-only case) or at the saturation pressure
of species i as a pure species (reference condition for the combined
approach), optional [-]
phi_g : float
Fugacity coefficient of species i in the vapor phase at the system
conditions, optional [-]
gamma : float
Activity coefficient of species i in the liquid phase, optional [-]
Poynting : float
Poynting correction factor, optional [-]
Returns
-------
K : float
Equilibrium K value of component i, calculated with an approach
depending on the provided inputs [-]
Notes
-----
The Poynting correction factor is normally simplified as follows, due to
a liquid's low pressure dependency:
.. math::
K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{V_l
(P-P_i^{sat})}{RT}\right]}{\phi_i^v P}
Examples
--------
Raoult's law:
>>> K_value(101325, 3000.)
0.029607698001480384
Modified Raoult's law:
>>> K_value(P=101325, Psat=3000, gamma=0.9)
0.026646928201332347
EOS-only approach:
>>> K_value(phi_l=1.6356, phi_g=0.88427)
1.8496613025433408
Gamma-phi combined approach:
>>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92)
2.8958055544121137
Gamma-phi combined approach with a Poynting factor:
>>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92,
... Poynting=0.999)
2.8929097488577016
References
----------
.. [1] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.
Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:
Wiley-VCH, 2012.
.. [2] Skogestad, Sigurd. Chemical and Energy Process Engineering. 1st
edition. Boca Raton, FL: CRC Press, 2008.
'''
try:
if gamma:
if phi_l:
return gamma*Psat*phi_l*Poynting/(phi_g*P)
return gamma*Psat*Poynting/P
elif phi_l:
return phi_l/phi_g
return Psat/P
except TypeError:
raise Exception('Input must consist of one set from (P, Psat, phi_l, \
phi_g, gamma), (P, Psat, gamma), (phi_l, phi_g), (P, Psat)')
|
r'''Calculates the equilibrium K-value assuming Raoult's law,
or an equation of state model, or an activity coefficient model,
or a combined equation of state-activity model.
The calculation procedure will use the most advanced approach with the
provided inputs:
* If `P`, `Psat`, `phi_l`, `phi_g`, and `gamma` are provided, use the
combined approach.
* If `P`, `Psat`, and `gamma` are provided, use the modified Raoult's
law.
* If `phi_l` and `phi_g` are provided, use the EOS only method.
* If `P` and `Psat` are provided, use Raoult's law.
Definitions:
.. math::
K_i=\frac{y_i}{x_i}
Raoult's law:
.. math::
K_i = \frac{P_{i}^{sat}}{P}
Activity coefficient, no EOS (modified Raoult's law):
.. math::
K_i = \frac{\gamma_i P_{i}^{sat}}{P}
Equation of state only:
.. math::
K_i = \frac{\phi_i^l}{\phi_i^v} = \frac{f_i^l}{f_i^v}
Combined approach (liquid reference fugacity coefficient is normally
calculated the saturation pressure for it as a pure species; vapor fugacity
coefficient calculated normally):
.. math::
K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l,ref}}{\phi_i^v P}
Combined approach, with Poynting Correction Factor (liquid molar volume in
the integral is for i as a pure species only):
.. math::
K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{
\int_{P_i^{sat}}^P V_i^l dP}{RT}\right]}{\phi_i^v P}
Parameters
----------
P : float
System pressure, optional
Psat : float
Vapor pressure of species i, [Pa]
phi_l : float
Fugacity coefficient of species i in the liquid phase, either
at the system conditions (EOS-only case) or at the saturation pressure
of species i as a pure species (reference condition for the combined
approach), optional [-]
phi_g : float
Fugacity coefficient of species i in the vapor phase at the system
conditions, optional [-]
gamma : float
Activity coefficient of species i in the liquid phase, optional [-]
Poynting : float
Poynting correction factor, optional [-]
Returns
-------
K : float
Equilibrium K value of component i, calculated with an approach
depending on the provided inputs [-]
Notes
-----
The Poynting correction factor is normally simplified as follows, due to
a liquid's low pressure dependency:
.. math::
K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{V_l
(P-P_i^{sat})}{RT}\right]}{\phi_i^v P}
Examples
--------
Raoult's law:
>>> K_value(101325, 3000.)
0.029607698001480384
Modified Raoult's law:
>>> K_value(P=101325, Psat=3000, gamma=0.9)
0.026646928201332347
EOS-only approach:
>>> K_value(phi_l=1.6356, phi_g=0.88427)
1.8496613025433408
Gamma-phi combined approach:
>>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92)
2.8958055544121137
Gamma-phi combined approach with a Poynting factor:
>>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92,
... Poynting=0.999)
2.8929097488577016
References
----------
.. [1] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.
Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:
Wiley-VCH, 2012.
.. [2] Skogestad, Sigurd. Chemical and Energy Process Engineering. 1st
edition. Boca Raton, FL: CRC Press, 2008.
|
def run(self, i_str, start_count=0, start_chunk_time=None):
'''Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item
'''
try:
if not os.path.exists(self.tmp_dir_path):
os.makedirs(self.tmp_dir_path)
if start_chunk_time is None:
start_chunk_time = time.time()
## the reader returns generators of StreamItems
i_chunk = self.reader(i_str)
## t_path points to the currently in-progress temp chunk
t_path = None
## loop over all docs in the chunk processing and cutting
## smaller chunks if needed
len_clean_visible = 0
sources = set()
next_idx = 0
## how many have we input and actually done processing on?
input_item_count = 0
for si in i_chunk:
# TODO: break out a _process_stream_item function?
next_idx += 1
## yield to the gevent hub to allow other things to run
if gevent:
gevent.sleep(0)
## skip forward until we reach start_count
if next_idx <= start_count:
continue
if next_idx % self.rate_log_interval == 0:
## indexing is zero-based, so next_idx corresponds
## to length of list of SIs processed so far
elapsed = time.time() - start_chunk_time
if elapsed > 0:
rate = float(next_idx) / elapsed
logger.info('%d in %.1f --> %.1f per sec on '
'(pre-partial_commit) %s',
next_idx - start_count, elapsed, rate,
i_str)
if not self.t_chunk:
## make a temporary chunk at a temporary path
# (Lazy allocation after we've read an item that might get processed out to the new chunk file)
# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready
t_path = os.path.join(self.tmp_dir_path,
't_chunk-%s' % uuid.uuid4().hex)
self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb')
assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message
# TODO: a set of incremental transforms is equivalent
# to a batch transform. Make the pipeline explicitly
# configurable as such:
#
# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]
#
# OR: for some list of transforms (mixed incremental
# and batch) pipeline can detect and batchify as needed
## incremental transforms populate t_chunk
## let the incremental transforms destroy the si by
## returning None
si = self._run_incremental_transforms(
si, self.incremental_transforms)
## insist that every chunk has only one source string
if si:
sources.add(si.source)
if self.assert_single_source and len(sources) != 1:
raise InvalidStreamItem(
'stream item %r had source %r, not %r '
'(set assert_single_source: false to suppress)' %
(si.stream_id, si.source, sources))
if si and si.body and si.body.clean_visible:
len_clean_visible += len(si.body.clean_visible)
## log binned clean_visible lengths, for quick stats estimates
#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))
#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))
if ((self.output_chunk_max_count is not None and
len(self.t_chunk) == self.output_chunk_max_count)):
logger.info('reached output_chunk_max_count (%d) at: %d',
len(self.t_chunk), next_idx)
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
elif (self.output_max_clean_visible_bytes is not None and
len_clean_visible >=
self.output_chunk_max_clean_visible_bytes):
logger.info(
'reached output_chunk_max_clean_visible_bytes '
'(%d) at: %d',
self.output_chunk_max_clean_visible_bytes,
len_clean_visible)
len_clean_visible = 0
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
input_item_count += 1
if (((self.input_item_limit is not None) and
(input_item_count > self.input_item_limit))):
break
if self.t_chunk is not None:
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
## return how many stream items we processed
return next_idx
finally:
if self.t_chunk is not None:
self.t_chunk.close()
for transform in self.batch_transforms:
transform.shutdown()
if self.cleanup_tmp_files:
rmtree(self.tmp_dir_path)
|
Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item
|
def next(self) -> Future:
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = Future()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
|
Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
|
def all_after_notification(self, model, prop_name, info):
""" The method logs all changes that notified recursively trough the hierarchies of the states after the change
occurs in the rafcon.core object. The method register as observer of observable
StateMachineModel.state_machine of any observed StateMachineModel.
:param model: StateMachineModel that is represents the state_machine which has been changed
:param prop_name: Name of property that notifies -> here always 'state_machine'
:param info: Dictionary that hold recursive notification information like models, property and method names
:return:
"""
self.logger.debug(NotificationOverview(info))
|
The method logs all changes that notified recursively trough the hierarchies of the states after the change
occurs in the rafcon.core object. The method register as observer of observable
StateMachineModel.state_machine of any observed StateMachineModel.
:param model: StateMachineModel that is represents the state_machine which has been changed
:param prop_name: Name of property that notifies -> here always 'state_machine'
:param info: Dictionary that hold recursive notification information like models, property and method names
:return:
|
def alien_filter(name, location, size, unsize):
"""Fix to avoid packages include in slackbuilds folder
"""
(fname, flocation, fsize, funsize) = ([] for i in range(4))
for n, l, s, u in zip(name, location, size, unsize):
if "slackbuilds" != l:
fname.append(n)
flocation.append(l)
fsize.append(s)
funsize.append(u)
return [fname, flocation, fsize, funsize]
|
Fix to avoid packages include in slackbuilds folder
|
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
|
compare triple to ontology, return error or None
|
def _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):
"""Gets the part of speech name for *pos_code*."""
pos_code = pos_code.lower() # Issue #10
if names not in ('parent', 'child', 'all'):
raise ValueError("names must be one of 'parent', 'child', or "
"'all'; not '{}'".format(names))
logger.debug("Getting {} POS name for '{}' formatted as '{}'.".format(
'English' if english else 'Chinese', pos_code, names))
for i in range(1, len(pos_code) + 1):
try:
pos_key = pos_code[0:i]
pos_entry = pos_map[pos_key]
break
except KeyError:
if i == len(pos_code):
logger.warning("part of speech not recognized: '{}'".format(
pos_code))
return None # Issue #20
pos = (pos_entry[1 if english else 0], )
if names == 'parent':
logger.debug("Part of speech name found: '{}'".format(pos[0]))
return pos[0]
if len(pos_entry) == 3 and pos_key != pos_code:
sub_map = pos_entry[2]
logger.debug("Found parent part of speech name '{}'. Descending to "
"look for child name for '{}'".format(
pos_entry[1], pos_code))
sub_pos = _get_pos_name(pos_code, names, english, sub_map)
if names == 'all':
# sub_pos can be None sometimes (e.g. for a word '甲')
pos = pos + sub_pos if sub_pos else pos
else:
pos = (sub_pos, )
name = pos if names == 'all' else pos[-1]
logger.debug("Part of speech name found: '{}'".format(name))
return name
|
Gets the part of speech name for *pos_code*.
|
def keys_present(name, number, save_dir, region=None, key=None, keyid=None, profile=None,
save_format="{2}\n{0}\n{3}\n{1}\n"):
'''
.. versionadded:: 2015.8.0
Ensure the IAM access keys are present.
name (string)
The name of the new user.
number (int)
Number of keys that user should have.
save_dir (string)
The directory that the key/keys will be saved. Keys are saved to a file named according
to the username privided.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
save_format (dict)
Save format is repeated for each key. Default format is
"{2}\\n{0}\\n{3}\\n{1}\\n", where {0} and {1} are placeholders for new
key_id and key respectively, whereas {2} and {3} are "key_id-{number}"
and 'key-{number}' strings kept for compatibility.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_iam.get_user'](name, region, key, keyid, profile):
ret['result'] = False
ret['comment'] = 'IAM User {0} does not exist.'.format(name)
return ret
if not isinstance(number, int):
ret['comment'] = 'The number of keys must be an integer.'
ret['result'] = False
return ret
if not os.path.isdir(save_dir):
ret['comment'] = 'The directory {0} does not exist.'.format(save_dir)
ret['result'] = False
return ret
keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
keyid=keyid, profile=profile)
if isinstance(keys, six.string_types):
log.debug('keys are : false %s', keys)
error, message = _get_error(keys)
ret['comment'] = 'Could not get keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
log.debug('Keys are : %s.', keys)
if len(keys) >= number:
ret['comment'] = 'The number of keys exist for user {0}'.format(name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'Access key is set to be created for {0}.'.format(name)
ret['result'] = None
return ret
new_keys = {}
for i in range(number-len(keys)):
created = __salt__['boto_iam.create_access_key'](name, region, key, keyid, profile)
if isinstance(created, six.string_types):
error, message = _get_error(created)
ret['comment'] = 'Could not create keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
log.debug('Created is : %s', created)
response = 'create_access_key_response'
result = 'create_access_key_result'
new_keys[six.text_type(i)] = {}
new_keys[six.text_type(i)]['key_id'] = created[response][result]['access_key']['access_key_id']
new_keys[six.text_type(i)]['secret_key'] = created[response][result]['access_key']['secret_access_key']
try:
with salt.utils.files.fopen('{0}/{1}'.format(save_dir, name), 'a') as _wrf:
for key_num, key in new_keys.items():
key_id = key['key_id']
secret_key = key['secret_key']
_wrf.write(salt.utils.stringutils.to_str(
save_format.format(
key_id,
secret_key,
'key_id-{0}'.format(key_num),
'key-{0}'.format(key_num)
)
))
ret['comment'] = 'Keys have been written to file {0}/{1}.'.format(save_dir, name)
ret['result'] = True
ret['changes'] = new_keys
return ret
except IOError:
ret['comment'] = 'Could not write to file {0}/{1}.'.format(save_dir, name)
ret['result'] = False
return ret
|
.. versionadded:: 2015.8.0
Ensure the IAM access keys are present.
name (string)
The name of the new user.
number (int)
Number of keys that user should have.
save_dir (string)
The directory that the key/keys will be saved. Keys are saved to a file named according
to the username privided.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
save_format (dict)
Save format is repeated for each key. Default format is
"{2}\\n{0}\\n{3}\\n{1}\\n", where {0} and {1} are placeholders for new
key_id and key respectively, whereas {2} and {3} are "key_id-{number}"
and 'key-{number}' strings kept for compatibility.
|
def get_installed_distributions(local_only=True, skip=('setuptools', 'pip', 'python')):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
return [d for d in pkg_resources.working_set if local_test(d) and d.key not in skip]
|
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
|
def draw_points_heatmap_array(self, image_shape, alpha=1.0,
size=1, raise_if_out_of_image=False):
"""
Draw the points of the line string as a heatmap array.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the point mask.
alpha : float, optional
Opacity of the line string points. Higher values denote a more
visible points.
size : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Float array of shape `image_shape` (no channel axis) with drawn
line string points. All values are in the interval ``[0.0, 1.0]``.
"""
assert len(image_shape) == 2 or (
len(image_shape) == 3 and image_shape[-1] == 1), (
"Expected (H,W) or (H,W,1) as image_shape, got %s." % (
image_shape,))
arr = self.draw_points_on_image(
np.zeros(image_shape, dtype=np.uint8),
color=255, alpha=alpha, size=size,
raise_if_out_of_image=raise_if_out_of_image
)
return arr.astype(np.float32) / 255.0
|
Draw the points of the line string as a heatmap array.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the point mask.
alpha : float, optional
Opacity of the line string points. Higher values denote a more
visible points.
size : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Float array of shape `image_shape` (no channel axis) with drawn
line string points. All values are in the interval ``[0.0, 1.0]``.
|
def validate_args(f):
"""
Ensures that *args consist of a consistent type
:param f: any client method with *args parameter
:return: function f
"""
def wrapper(self, args):
arg_types = set([type(arg) for arg in args])
if len(arg_types) > 1:
raise TypeError("Mixed input types are not allowed")
elif list(arg_types)[0] not in (dict, str):
raise TypeError("Only dict and str types accepted")
return f(self, args)
return wrapper
|
Ensures that *args consist of a consistent type
:param f: any client method with *args parameter
:return: function f
|
def get_tmaster(self, topologyName, callback=None):
""" get tmaster """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
"""
Custom callback to get the topologies right now.
"""
ret["result"] = data
self._get_tmaster_with_watch(topologyName, callback, isWatching)
# The topologies are now populated with the data.
return ret["result"]
|
get tmaster
|
def to_primitive(self, value, context=None):
""" Schematics serializer override
If epoch_date is true then convert the `datetime.datetime`
object into an epoch `int`.
"""
if context and context.get('epoch_date'):
epoch = dt(1970, 1, 1)
value = (value - epoch).total_seconds()
return int(value)
elif context and context.get('datetime_date'):
return value
else:
return super(Type, self).to_primitive(value, context)
|
Schematics serializer override
If epoch_date is true then convert the `datetime.datetime`
object into an epoch `int`.
|
def configure(self):
"""Configure the device.
Send the device configuration saved inside the MCP342x object to the target device."""
logger.debug('Configuring ' + hex(self.get_address())
+ ' ch: ' + str(self.get_channel())
+ ' res: ' + str(self.get_resolution())
+ ' gain: ' + str(self.get_gain()))
self.bus.write_byte(self.address, self.config)
|
Configure the device.
Send the device configuration saved inside the MCP342x object to the target device.
|
def get_id(date=None, project: str = 'sip',
instance_id: int = None) -> str:
"""Get a SBI Identifier.
Args:
date (str or datetime.datetime, optional): UTC date of the SBI
project (str, optional ): Project Name
instance_id (int, optional): SBI instance identifier
Returns:
str, Scheduling Block Instance (SBI) ID.
"""
if date is None:
date = datetime.datetime.utcnow()
if isinstance(date, datetime.datetime):
date = date.strftime('%Y%m%d')
if instance_id is None:
instance_id = randint(0, 9999)
return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
|
Get a SBI Identifier.
Args:
date (str or datetime.datetime, optional): UTC date of the SBI
project (str, optional ): Project Name
instance_id (int, optional): SBI instance identifier
Returns:
str, Scheduling Block Instance (SBI) ID.
|
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_mode, eval_with_learner, log_every_steps, debug_video_path,
num_debug_videos=1, random_starts_step_limit=None,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writers = ()
kwargs = {}
if eval_mode in ["agent_real", "agent_simulated"]:
if not eval_with_learner:
if debug_video_path:
tf.gfile.MakeDirs(debug_video_path)
video_writers = [
common_video.WholeVideoWriter( # pylint: disable=g-complex-comprehension
fps=10,
output_path=os.path.join(debug_video_path, "{}.avi".format(i)),
file_format="avi",
)
for i in range(num_debug_videos)
]
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, eval_mode, planner_hparams, model_dir,
log_every_steps=log_every_steps,
video_writers=video_writers,
random_starts_step_limit=random_starts_step_limit
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
else:
eval_metrics = evaluate_world_model(
agent_type, loop_hparams, planner_hparams, model_dir, policy_dir,
random_starts_step_limit, debug_video_path, log_every_steps
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
for video_writer in video_writers:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
|
Evaluate.
|
def get_word_id (root):
"""
lookup/assign a unique identify for each word root
"""
global UNIQ_WORDS
# in practice, this should use a microservice via some robust
# distributed cache, e.g., Redis, Cassandra, etc.
if root not in UNIQ_WORDS:
UNIQ_WORDS[root] = len(UNIQ_WORDS)
return UNIQ_WORDS[root]
|
lookup/assign a unique identify for each word root
|
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
verify user has permission
|
def _handle_union(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_union.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling union")
union_cls = StructUnionDef("union", self, node)
return union_cls
|
TODO: Docstring for _handle_union.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
|
def generate_all(sumlevel, d):
"""Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values. """
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
|
Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values.
|
def plate_exchanger_identifier(self):
'''Method to create an identifying string in format 'L' + wavelength +
'A' + amplitude + 'B' + chevron angle-chevron angle. Wavelength and
amplitude are specified in units of mm and rounded to two decimal places.
'''
s = ('L' + str(round(self.wavelength*1000, 2))
+ 'A' + str(round(self.amplitude*1000, 2))
+ 'B' + '-'.join([str(i) for i in self.chevron_angles]))
return s
|
Method to create an identifying string in format 'L' + wavelength +
'A' + amplitude + 'B' + chevron angle-chevron angle. Wavelength and
amplitude are specified in units of mm and rounded to two decimal places.
|
def _process_wave_param(self, pval):
"""Process individual model parameter representing wavelength."""
return self._process_generic_param(
pval, self._internal_wave_unit, equivalencies=u.spectral())
|
Process individual model parameter representing wavelength.
|
def safe_int_conv(number):
"""Safely convert a single number to integer."""
try:
return int(np.array(number).astype(int, casting='safe'))
except TypeError:
raise ValueError('cannot safely convert {} to integer'.format(number))
|
Safely convert a single number to integer.
|
def is_for_driver_task(self):
"""See whether this function descriptor is for a driver or not.
Returns:
True if this function descriptor is for driver tasks.
"""
return all(
len(x) == 0
for x in [self.module_name, self.class_name, self.function_name])
|
See whether this function descriptor is for a driver or not.
Returns:
True if this function descriptor is for driver tasks.
|
def list(self, **filters):
""" Returns a queryset filtering object by user permission. If you want,
you can specify filter arguments.
See https://docs.djangoproject.com/en/dev/ref/models/querysets/#filter for more details
"""
LOG.debug(u'Querying %s by filters=%s', self.model_class.__name__, filters)
query = self.__queryset__()
perm = build_permission_name(self.model_class, 'view')
LOG.debug(u"Checking if user %s has_perm %s" % (self.user, perm))
query_with_permission = filter(lambda o: self.user.has_perm(perm, obj=o), query)
ids = map(lambda o: o.pk, query_with_permission)
# FIXME: Return to query again without use database
queryset = self.__queryset__().filter(pk__in=ids)
related = getattr(self, 'select_related', None)
if related:
queryset = queryset.select_related(*related)
return queryset
|
Returns a queryset filtering object by user permission. If you want,
you can specify filter arguments.
See https://docs.djangoproject.com/en/dev/ref/models/querysets/#filter for more details
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.