code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def container_start(name, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Start a container
name :
Name of the container to start
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
container.start(wait=True)
return _pylxd_model_to_dict(container)
|
Start a container
name :
Name of the container to start
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
|
def get_command(self, version=2):
"""Return the SSH protocol specific command to connect."""
try:
options = _C['options']
options_str = " -o ".join(options)
if options_str:
options_str = "-o " + options_str + " "
except KeyError:
options_str = ""
if self.username:
# Not supported on SunOS
# "-o ConnectTimeout={}
command = "ssh {}" \
"-{} " \
"-p {} {}@{}".format(options_str, version, self.port, self.username, self.hostname)
else:
command = "ssh {} " \
"-{} " \
"-p {} {}".format(options_str, version, self.port, self.hostname)
return command
|
Return the SSH protocol specific command to connect.
|
def get_many(self, content_ids, feature_names=None):
'''Returns an iterable of feature collections.
This efficiently retrieves multiple FCs corresponding to the
list of ids given. Tuples of identifier and feature collection
are yielded. If the feature collection for a given id does not
exist, then ``None`` is returned as the second element of the
tuple.
:param [str] content_ids: List of content ids.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
try:
resp = self.conn.mget(index=self.index, doc_type=self.type,
_source=self._source(feature_names),
body={'ids': map(eid, content_ids)})
except TransportError:
return
for doc in resp['docs']:
fc = None
if doc['found']:
fc = self.fc_from_dict(doc['_source']['fc'])
yield did(doc['_id']), fc
|
Returns an iterable of feature collections.
This efficiently retrieves multiple FCs corresponding to the
list of ids given. Tuples of identifier and feature collection
are yielded. If the feature collection for a given id does not
exist, then ``None`` is returned as the second element of the
tuple.
:param [str] content_ids: List of content ids.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
|
def reStructuredText_to_html(input, output, css_file):
"""
Outputs a reStructuredText file to html.
:param input: Input reStructuredText file to convert.
:type input: unicode
:param output: Output html file.
:type output: unicode
:param css_file: Css file.
:type css_file: unicode
:return: Definition success.
:rtype: bool
"""
LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(
reStructuredText_to_html.__name__, input))
os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(RST2HTML,
os.path.join(os.path.dirname(__file__), css_file),
input,
output))
LOGGER.info("{0} | Formatting html file!".format("Tidy"))
os.system("tidy -config {0} -m '{1}'".format(os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), output))
file = File(output)
file.cache()
LOGGER.info("{0} | Replacing spaces with tabs!".format(reStructuredText_to_html.__name__))
file.content = [line.replace(" " * 4, "\t") for line in file.content]
file.write()
return True
|
Outputs a reStructuredText file to html.
:param input: Input reStructuredText file to convert.
:type input: unicode
:param output: Output html file.
:type output: unicode
:param css_file: Css file.
:type css_file: unicode
:return: Definition success.
:rtype: bool
|
def _forward(X, s=1.1, gamma=1., k=5):
"""
Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg
(2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_.
Parameters
----------
X : list
A series of time-gaps between events.
s : float
(default: 1.1) Scaling parameter ( > 1.)that controls graininess of
burst detection. Lower values make the model more sensitive.
gamma : float
(default: 1.0) Parameter that controls the 'cost' of higher burst
states. Higher values make it more 'difficult' to achieve a higher
burst state.
k : int
(default: 5) Number of states. Higher values increase computational
cost of the algorithm. A maximum of 25 is suggested by the literature.
Returns
-------
states : list
Optimal state sequence.
"""
X = list(X)
def alpha(i):
return (n/T)*(s**i)
def tau(i, j):
if j > i:
return (j-i)*gamma*log(n)
return 0.
def f(j, x):
return alpha(j) * exp(-1. * alpha(j) * x)
def C(j, t):
if j == 0 and t == 0:
return 0.
elif t == 0:
return float("inf")
C_tau = min([C_values[l][t-1] + tau(l, j) for l in xrange(k)])
return (-1. * log(f(j,X[t]))) + C_tau
T = sum(X)
n = len(X)
# C() requires default (0) values, so we construct the "array" in advance.
C_values = [[0 for t in xrange(len(X))] for j in xrange(k)]
for j in xrange(k):
for t in xrange(len(X)):
C_values[j][t] = C(j,t)
# Find the optimal state sequence.
states = [argmin([c[t] for c in C_values]) for t in xrange(n)]
return states
|
Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg
(2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_.
Parameters
----------
X : list
A series of time-gaps between events.
s : float
(default: 1.1) Scaling parameter ( > 1.)that controls graininess of
burst detection. Lower values make the model more sensitive.
gamma : float
(default: 1.0) Parameter that controls the 'cost' of higher burst
states. Higher values make it more 'difficult' to achieve a higher
burst state.
k : int
(default: 5) Number of states. Higher values increase computational
cost of the algorithm. A maximum of 25 is suggested by the literature.
Returns
-------
states : list
Optimal state sequence.
|
def seq_dup_levels_plot (self):
""" Create the HTML for the Sequence Duplication Levels plot """
data = dict()
max_dupval = 0
for s_name in self.fastqc_data:
try:
thisdata = {}
for d in self.fastqc_data[s_name]['sequence_duplication_levels']:
thisdata[d['duplication_level']] = d['percentage_of_total']
max_dupval = max(max_dupval, d['percentage_of_total'])
data[s_name] = OrderedDict()
for k in self.dup_keys:
try:
data[s_name][k] = thisdata[k]
except KeyError:
pass
except KeyError:
pass
if len(data) == 0:
log.debug('sequence_length_distribution not found in FastQC reports')
return None
pconfig = {
'id': 'fastqc_sequence_duplication_levels_plot',
'title': 'FastQC: Sequence Duplication Levels',
'categories': True,
'ylab': '% of Library',
'xlab': 'Sequence Duplication Level',
'ymax': 100 if max_dupval <= 100.0 else None,
'ymin': 0,
'yMinTickInterval': 0.1,
'colors': self.get_status_cols('sequence_duplication_levels'),
'tt_label': '<b>{point.x}</b>: {point.y:.1f}%',
}
self.add_section (
name = 'Sequence Duplication Levels',
anchor = 'fastqc_sequence_duplication_levels',
description = 'The relative level of duplication found for every sequence.',
helptext = '''
From the [FastQC Help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/8%20Duplicate%20Sequences.html):
_In a diverse library most sequences will occur only once in the final set.
A low level of duplication may indicate a very high level of coverage of the
target sequence, but a high level of duplication is more likely to indicate
some kind of enrichment bias (eg PCR over amplification). This graph shows
the degree of duplication for every sequence in a library: the relative
number of sequences with different degrees of duplication._
_Only sequences which first appear in the first 100,000 sequences
in each file are analysed. This should be enough to get a good impression
for the duplication levels in the whole file. Each sequence is tracked to
the end of the file to give a representative count of the overall duplication level._
_The duplication detection requires an exact sequence match over the whole length of
the sequence. Any reads over 75bp in length are truncated to 50bp for this analysis._
_In a properly diverse library most sequences should fall into the far left of the
plot in both the red and blue lines. A general level of enrichment, indicating broad
oversequencing in the library will tend to flatten the lines, lowering the low end
and generally raising other categories. More specific enrichments of subsets, or
the presence of low complexity contaminants will tend to produce spikes towards the
right of the plot._
''',
plot = linegraph.plot(data, pconfig)
)
|
Create the HTML for the Sequence Duplication Levels plot
|
def make_file_path(project_dir, project_name, root, name):
"""
Generates the target path for a file
"""
return path.join(make_dir_path(project_dir, root, project_name), name)
|
Generates the target path for a file
|
def make_mutant_tuples(example_protos, original_feature, index_to_mutate,
viz_params):
"""Return a list of `MutantFeatureValue`s and a list of mutant Examples.
Args:
example_protos: The examples to mutate.
original_feature: A `OriginalFeatureList` that encapsulates the feature to
mutate.
index_to_mutate: The index of the int64_list or float_list to mutate.
viz_params: A `VizParams` object that contains the UI state of the request.
Returns:
A list of `MutantFeatureValue`s and a list of mutant examples.
"""
mutant_features = make_mutant_features(original_feature, index_to_mutate,
viz_params)
mutant_examples = []
for example_proto in example_protos:
for mutant_feature in mutant_features:
copied_example = copy.deepcopy(example_proto)
feature_name = mutant_feature.original_feature.feature_name
try:
feature_list = proto_value_for_feature(copied_example, feature_name)
if index_to_mutate is None:
new_values = mutant_feature.mutant_value
else:
new_values = list(feature_list)
new_values[index_to_mutate] = mutant_feature.mutant_value
del feature_list[:]
feature_list.extend(new_values)
mutant_examples.append(copied_example)
except (ValueError, IndexError):
# If the mutant value can't be set, still add the example to the
# mutant_example even though no change was made. This is necessary to
# allow for computation of global PD plots when not all examples have
# the same number of feature values for a feature.
mutant_examples.append(copied_example)
return mutant_features, mutant_examples
|
Return a list of `MutantFeatureValue`s and a list of mutant Examples.
Args:
example_protos: The examples to mutate.
original_feature: A `OriginalFeatureList` that encapsulates the feature to
mutate.
index_to_mutate: The index of the int64_list or float_list to mutate.
viz_params: A `VizParams` object that contains the UI state of the request.
Returns:
A list of `MutantFeatureValue`s and a list of mutant examples.
|
def setup(self, services):
"""Service setup."""
super(SchedulerService, self).setup(services)
# Register filesystem event handlers on an FSEventService instance.
self._fs_event_service.register_all_files_handler(self._enqueue_fs_event)
# N.B. We compute the invalidating fileset eagerly at launch with an assumption that files
# that exist at startup are the only ones that can affect the running daemon.
if self._invalidation_globs:
self._invalidating_snapshot = self._get_snapshot()
self._invalidating_files = self._invalidating_snapshot.files
self._logger.info('watching invalidating files: {}'.format(self._invalidating_files))
if self._pantsd_pidfile:
self._fs_event_service.register_pidfile_handler(self._pantsd_pidfile, self._enqueue_fs_event)
|
Service setup.
|
def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams
|
big 1d model for conditional image generation.2.99 on cifar10.
|
def make_processitem_arguments(arguments, condition='contains', negate=False, preserve_case=False):
"""
Create a node for ProcessItem/arguments
:return: A IndicatorItem represented as an Element node
"""
document = 'ProcessItem'
search = 'ProcessItem/arguments'
content_type = 'string'
content = arguments
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node
|
Create a node for ProcessItem/arguments
:return: A IndicatorItem represented as an Element node
|
def normalize_url(url):
"""
Returns the given URL with all query keys properly escaped.
Args:
url (str): The URL to normalize.
Returns:
str: The normalized URL.
"""
uri = urlparse(url)
query = uri.query or ""
pairs = parse_qsl(query)
decoded_pairs = [(unquote(key), value) for key, value in pairs]
encoded_pairs = [(quote(key), value) for key, value in decoded_pairs]
normalized_query = urlencode(encoded_pairs)
return ParseResult(
scheme=uri.scheme,
netloc=uri.netloc,
path=uri.path,
params=uri.params,
query=normalized_query,
fragment=uri.fragment).geturl()
|
Returns the given URL with all query keys properly escaped.
Args:
url (str): The URL to normalize.
Returns:
str: The normalized URL.
|
def get_interfaces(self):
"""
Return a list of sham.network.interfaces.NetworkInterface
describing all the interfaces this VM has
"""
interfaces = self.xml.find('devices').iter('interface')
iobjs = []
for interface in interfaces:
_type = interface.attrib['type']
mac = interface.find('mac').attrib['address']
source = interface.find('source').attrib[_type]
model = interface.find('model').attrib['type']
iobjs.append(NetworkInterface(_type, mac, source, model))
return iobjs
|
Return a list of sham.network.interfaces.NetworkInterface
describing all the interfaces this VM has
|
def get_checksum(self, encoder=base64.b64encode, hasher=hashlib.sha256):
"""Return the b64 encoded sha256 checksum of the archive."""
assert self._closed, "Archive not closed"
with open(self._temp_archive_file.name, 'rb') as fh:
return encoder(checksum(fh, hasher())).decode('ascii')
|
Return the b64 encoded sha256 checksum of the archive.
|
def render(self, template_name, __data=None, **kw):
'''Given a template name and template data.
Renders a template and returns as string'''
return self.template.render(template_name,
**self._vars(__data, **kw))
|
Given a template name and template data.
Renders a template and returns as string
|
def _group_by_sample_and_batch(samples):
"""Group samples split by QC method back one per sample-batch.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)
return [xs[0] for xs in out.values()]
|
Group samples split by QC method back one per sample-batch.
|
def add_file(self, **args):
'''
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
'''
# Check if allowed:
self.__check_if_adding_files_allowed_right_now()
# Check if args ok:
mandatory_args = ['file_name', 'file_handle', 'file_size',
'checksum', 'publish_path', 'checksum_type',
'file_version']
utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__enforce_integer_file_size(args)
self.__enforce_string_file_version(args)
# Add file:
self.__check_and_correct_handle_syntax(args)
self.__add_file(**args)
|
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
|
def createAllShaders(self):
"Purpose: Creates all the shaders used by HelloVR SDL"
self.m_unSceneProgramID = self.compileGLShader(
"Scene",
# Vertex Shader
dedent("""\
#version 410
uniform mat4 matrix;
layout(location = 0) in vec4 position;
layout(location = 1) in vec2 v2UVcoordsIn;
layout(location = 2) in vec3 v3NormalIn;
out vec2 v2UVcoords;
void main()
{
v2UVcoords = v2UVcoordsIn;
gl_Position = matrix * position;
}
"""),
# Fragment Shader
dedent("""\
#version 410 core
uniform sampler2D mytexture;
in vec2 v2UVcoords;
out vec4 outputColor;
void main()
{
outputColor = texture(mytexture, v2UVcoords);
}
""")
)
self.m_nSceneMatrixLocation = glGetUniformLocation( self.m_unSceneProgramID, "matrix" )
if self.m_nSceneMatrixLocation == -1:
dprintf( "Unable to find matrix uniform in scene shader\n" )
return False
self.m_unControllerTransformProgramID = self.compileGLShader(
"Controller",
# vertex shader
dedent("""\
#version 410
uniform mat4 matrix;
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 v3ColorIn;
out vec4 v4Color;
void main()
{
v4Color.xyz = v3ColorIn;
v4Color.a = 1.0;
gl_Position = matrix * position;
}
"""),
# fragment shader
dedent("""\
#version 410
in vec4 v4Color;
out vec4 outputColor;
void main()
{
outputColor = v4Color;
}
""") )
self.m_nControllerMatrixLocation = glGetUniformLocation( self.m_unControllerTransformProgramID, "matrix" )
if self.m_nControllerMatrixLocation == -1:
dprintf( "Unable to find matrix uniform in controller shader\n" )
return False
self.m_unRenderModelProgramID = self.compileGLShader(
"render model",
# vertex shader
dedent("""\
#version 410
uniform mat4 matrix;
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 v3NormalIn;
layout(location = 2) in vec2 v2TexCoordsIn;
out vec2 v2TexCoord;
void main()
{
v2TexCoord = v2TexCoordsIn;
gl_Position = matrix * vec4(position.xyz, 1);
}
"""),
#fragment shader
dedent("""\
#version 410 core
uniform sampler2D diffuse;
in vec2 v2TexCoord;
out vec4 outputColor;
void main()
{
outputColor = texture( diffuse, v2TexCoord);
}
""") )
self.m_nRenderModelMatrixLocation = glGetUniformLocation( self.m_unRenderModelProgramID, "matrix" )
if self.m_nRenderModelMatrixLocation == -1:
dprintf( "Unable to find matrix uniform in render model shader\n" )
return False
self.m_unLensProgramID = self.compileGLShader(
"Distortion",
# vertex shader
dedent("""\
#version 410 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec2 v2UVredIn;
layout(location = 2) in vec2 v2UVGreenIn;
layout(location = 3) in vec2 v2UVblueIn;
noperspective out vec2 v2UVred;
noperspective out vec2 v2UVgreen;
noperspective out vec2 v2UVblue;
void main()
{
v2UVred = v2UVredIn;
v2UVgreen = v2UVGreenIn;
v2UVblue = v2UVblueIn;
gl_Position = position;
}
"""),
# fragment shader
dedent("""\
#version 410 core
uniform sampler2D mytexture;
noperspective in vec2 v2UVred;
noperspective in vec2 v2UVgreen;
noperspective in vec2 v2UVblue;
out vec4 outputColor;
void main()
{
float fBoundsCheck = (
(dot( vec2( lessThan( v2UVgreen.xy, vec2(0.05, 0.05)) ), vec2(1.0, 1.0))
+ dot( vec2( greaterThan( v2UVgreen.xy, vec2( 0.95, 0.95)) ), vec2(1.0, 1.0)))
);
if( fBoundsCheck > 1.0 ) {
outputColor = vec4( 0, 0, 0, 1.0 );
}
else {
float red = texture(mytexture, v2UVred).x;
float green = texture(mytexture, v2UVgreen).y;
float blue = texture(mytexture, v2UVblue).z;
outputColor = vec4( red, green, blue, 1.0 );
}
}
""") )
return self.m_unSceneProgramID != 0 and self.m_unControllerTransformProgramID != 0 and self.m_unRenderModelProgramID != 0 and self.m_unLensProgramID != 0
|
Purpose: Creates all the shaders used by HelloVR SDL
|
def collect(self):
"""
Create some concurrent workers that process the tasks simultaneously.
"""
collected = super(Command, self).collect()
if self.faster:
self.worker_spawn_method()
self.post_processor()
return collected
|
Create some concurrent workers that process the tasks simultaneously.
|
def generate_hash(filepath):
"""Public function that reads a local file and generates a SHA256 hash digest for it"""
fr = FileReader(filepath)
data = fr.read_bin()
return _calculate_sha256(data)
|
Public function that reads a local file and generates a SHA256 hash digest for it
|
def get_portchannel_info_by_intf_output_lacp_receive_machine_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
receive_machine_state = ET.SubElement(lacp, "receive-machine-state")
receive_machine_state.text = kwargs.pop('receive_machine_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def setStyles(self, styleUpdatesDict):
'''
setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made.
'''
setStyleMethod = self.setStyle
for newName, newValue in styleUpdatesDict.items():
setStyleMethod(newName, newValue)
return self.style
|
setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made.
|
def calculate(self, batch_info):
""" Calculate value of a metric """
value = self._value_function(batch_info)
self.buffer += value
|
Calculate value of a metric
|
def get_event(self, *etypes, timeout=None):
"""
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
"""
self._validate_etypes(*etypes)
start = time.time()
e = self._eventq.get(timeout=timeout)
if isinstance(e, Exception):
raise e
self._stats['events_recieved'] += 1
if etypes and e.type not in etypes:
if timeout:
timeout -= time.time() - start
log.debug('ignoring filtered event: {}'.format(e.json))
self._stats['events_dropped'] += 1
return self.get_event(*etypes, timeout=timeout)
return e
|
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
|
def qteKillMiniApplet(self):
"""
Remove the mini applet.
If a different applet is to be restored/focused then call
``qteMakeAppletActive`` for that applet *after* calling this
method.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
# Sanity check: is the handle valid?
if self._qteMiniApplet is None:
return
# Sanity check: is it really a mini applet?
if not self.qteIsMiniApplet(self._qteMiniApplet):
msg = ('Mini applet does not have its mini applet flag set.'
' Ignored.')
self.qteLogger.warning(msg)
if self._qteMiniApplet not in self._qteAppletList:
# Something is wrong because the mini applet is not part
# of the applet list.
msg = 'Custom mini applet not in applet list --> Bug.'
self.qteLogger.warning(msg)
else:
# Inform the mini applet that it is about to be killed.
try:
self._qteMiniApplet.qteToBeKilled()
except Exception:
msg = 'qteToBeKilledRoutine is faulty'
self.qteLogger.exception(msg, exc_info=True, stack_info=True)
# Shorthands to calling window.
win = self._qteMiniApplet._qteCallingWindow
# We need to move the focus from the mini applet back to a
# regular applet. Therefore, first look for the next
# visible applet in the current window (ie. the last one
# that was made active).
app = self.qteNextApplet(windowObj=win)
if app is not None:
# Found another (visible or invisible) applet --> make
# it active/visible.
self.qteMakeAppletActive(app)
else:
# No visible applet available in this window --> look
# for an invisible one.
app = self.qteNextApplet(skipInvisible=False, skipVisible=True)
if app is not None:
# Found an invisible applet --> make it
# active/visible.
self.qteMakeAppletActive(app)
else:
# There is no other visible applet in this window.
# The focus manager will therefore make a new applet
# active.
self._qteActiveApplet = None
self._qteAppletList.remove(self._qteMiniApplet)
# Close the mini applet applet and schedule it for deletion.
self._qteMiniApplet.close()
self._qteMiniApplet.deleteLater()
# Clear the handle to the mini applet.
self._qteMiniApplet = None
|
Remove the mini applet.
If a different applet is to be restored/focused then call
``qteMakeAppletActive`` for that applet *after* calling this
method.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
|
def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK)
|
Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
|
def update(self, resource, id_, updates):
"""Update document in index."""
args = self._es_args(resource, refresh=True)
if self._get_retry_on_conflict():
args['retry_on_conflict'] = self._get_retry_on_conflict()
updates.pop('_id', None)
updates.pop('_type', None)
self._update_parent_args(resource, args, updates)
return self.elastic(resource).update(id=id_, body={'doc': updates}, **args)
|
Update document in index.
|
def waitForVMState(rh, userid, desiredState, maxQueries=90, sleepSecs=5):
"""
Wait for the virtual machine to go into the indicated state.
Input:
Request Handle
userid whose state is to be monitored
Desired state, 'on' or 'off', case sensitive
Maximum attempts to wait for desired state before giving up
Sleep duration between waits
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
Note:
"""
rh.printSysLog("Enter vmUtils.waitForVMState, userid: " + userid +
" state: " + desiredState +
" maxWait: " + str(maxQueries) +
" sleepSecs: " + str(sleepSecs))
results = {}
cmd = ["sudo", "/sbin/vmcp", "query", "user", userid]
strCmd = " ".join(cmd)
stateFnd = False
for i in range(1, maxQueries + 1):
rh.printSysLog("Invoking: " + strCmd)
try:
out = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(out, bytes):
out = bytes.decode(out)
if desiredState == 'on':
stateFnd = True
break
except CalledProcessError as e:
match = re.search('(^HCP\w\w\w045E|^HCP\w\w\w361E)', e.output)
if match:
# Logged off
if desiredState == 'off':
stateFnd = True
break
else:
# Abnormal failure
out = e.output
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, out))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
break
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
if i < maxQueries:
# Sleep a bit before looping.
time.sleep(sleepSecs)
if stateFnd is True:
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
else:
maxWait = maxQueries * sleepSecs
rh.printLn("ES", msgs.msg['0414'][1] % (modId, userid,
desiredState, maxWait))
results = msgs.msg['0414'][0]
rh.printSysLog("Exit vmUtils.waitForVMState, rc: " +
str(results['overallRC']))
return results
|
Wait for the virtual machine to go into the indicated state.
Input:
Request Handle
userid whose state is to be monitored
Desired state, 'on' or 'off', case sensitive
Maximum attempts to wait for desired state before giving up
Sleep duration between waits
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
Note:
|
def loads(self, string):
"Decompress the passed-in compact script and return the result."
script_class = self.get_script_class()
script = self._load(BytesIO(string), self._protocol, self._version)
return script_class(script)
|
Decompress the passed-in compact script and return the result.
|
def ProgChunks(list_, chunksize, nInput=None, **kwargs):
"""
Yeilds an iterator in chunks and computes progress
Progress version of ut.ichunks
Args:
list_ (list):
chunksize (?):
nInput (None): (default = None)
Kwargs:
length, freq
Returns:
ProgressIter: progiter_
CommandLine:
python -m utool.util_progress ProgChunks --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_progress import * # NOQA
>>> import utool as ut
>>> list_ = range(100)
>>> chunksize = 10
>>> nInput = None
>>> progiter_ = ProgChunks(list_, chunksize, nInput)
>>> iter_ = iter(progiter_)
>>> chunk = six.next(iter_)
>>> assert len(chunk) == 10
>>> rest = ut.flatten(list(progiter_))
>>> assert len(rest) == 90
"""
if nInput is None:
nInput = len(list_)
n_chunks = get_num_chunks(nInput, chunksize)
kwargs['length'] = n_chunks
if 'freq' not in kwargs:
kwargs['freq'] = 1
chunk_iter = util_iter.ichunks(list_, chunksize)
progiter_ = ProgressIter(chunk_iter, **kwargs)
return progiter_
|
Yeilds an iterator in chunks and computes progress
Progress version of ut.ichunks
Args:
list_ (list):
chunksize (?):
nInput (None): (default = None)
Kwargs:
length, freq
Returns:
ProgressIter: progiter_
CommandLine:
python -m utool.util_progress ProgChunks --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_progress import * # NOQA
>>> import utool as ut
>>> list_ = range(100)
>>> chunksize = 10
>>> nInput = None
>>> progiter_ = ProgChunks(list_, chunksize, nInput)
>>> iter_ = iter(progiter_)
>>> chunk = six.next(iter_)
>>> assert len(chunk) == 10
>>> rest = ut.flatten(list(progiter_))
>>> assert len(rest) == 90
|
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
inputs_for_logging = inputs
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
outputs = tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(
distribution="truncated_normal"),
data_format=data_format)
resnet_log_helper.log_conv2d(
input_tensor=inputs_for_logging, output_tensor=outputs, stride=strides,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
return outputs
|
Strided 2-D convolution with explicit padding.
|
def read_file(self, filename):
"""
Guess the filetype and read the file into row sets
"""
#print("Reading file", filename)
try:
fh = open(filename, 'rb')
table_set = any_tableset(fh) # guess the type...
except:
#traceback.print_exc()
# Cannot find the schema.
table_set = None
return table_set
|
Guess the filetype and read the file into row sets
|
def shell(command, **kwargs):
"""
Runs 'command' on the underlying shell and keeps the stdout and
stderr stream separate.
Returns [stdout, stderr, exitCode]
"""
b_stdoutflush = False
b_stderrflush = False
b_waitForChild = True
for key, val in kwargs.items():
if key == 'stdoutflush': b_stdoutflush = val
if key == 'stderrflush': b_stderrflush = val
if key == 'waitForChild': b_waitForChild = val
child = popen2.Popen3(command, 1) # capture stdout and stderr from command
child.tochild.close() # don't need to talk to child
outfile = child.fromchild
outfd = outfile.fileno()
errfile = child.childerr
errfd = errfile.fileno()
makeNonBlocking(outfd) # don't deadlock!
makeNonBlocking(errfd)
outdata = errdata = ''
outeof = erreof = 0
while b_waitForChild:
ready = select.select([outfd,errfd],[],[]) # wait for input
if outfd in ready[0]:
outchunk = outfile.read()
if b_stdoutflush: sys.stdout.write(outchunk)
if outchunk == '': outeof = 1
outdata = outdata + outchunk
if errfd in ready[0]:
errchunk = errfile.read()
if b_stderrflush: sys.stderr.write(errchunk)
if errchunk == '': erreof = 1
errdata = errdata + errchunk
if outeof and erreof: break
select.select([],[],[],.1) # give a little time for buffers to fill
err = child.wait()
return outdata, errdata, err
|
Runs 'command' on the underlying shell and keeps the stdout and
stderr stream separate.
Returns [stdout, stderr, exitCode]
|
def set_sgr_code(self, params):
""" Set attributes based on SGR (Select Graphic Rendition) codes.
Parameters
----------
params : sequence of ints
A list of SGR codes for one or more SGR commands. Usually this
sequence will have one element per command, although certain
xterm-specific commands requires multiple elements.
"""
# Always consume the first parameter.
if not params:
return
code = params.pop(0)
if code == 0:
self.reset_sgr()
elif code == 1:
if self.bold_text_enabled:
self.bold = True
else:
self.intensity = 1
elif code == 2:
self.intensity = 0
elif code == 3:
self.italic = True
elif code == 4:
self.underline = True
elif code == 22:
self.intensity = 0
self.bold = False
elif code == 23:
self.italic = False
elif code == 24:
self.underline = False
elif code >= 30 and code <= 37:
self.foreground_color = code - 30
elif code == 38 and params and params.pop(0) == 5:
# xterm-specific: 256 color support.
if params:
self.foreground_color = params.pop(0)
elif code == 39:
self.foreground_color = None
elif code >= 40 and code <= 47:
self.background_color = code - 40
elif code == 48 and params and params.pop(0) == 5:
# xterm-specific: 256 color support.
if params:
self.background_color = params.pop(0)
elif code == 49:
self.background_color = None
# Recurse with unconsumed parameters.
self.set_sgr_code(params)
|
Set attributes based on SGR (Select Graphic Rendition) codes.
Parameters
----------
params : sequence of ints
A list of SGR codes for one or more SGR commands. Usually this
sequence will have one element per command, although certain
xterm-specific commands requires multiple elements.
|
def Negative(other_param, mode="invert", reroll_count_max=2):
"""
Converts another parameter's results to negative values.
Parameters
----------
other_param : imgaug.parameters.StochasticParameter
Other parameter which's sampled values are to be
modified.
mode : {'invert', 'reroll'}, optional
How to change the signs. Valid values are ``invert`` and ``reroll``.
``invert`` means that wrong signs are simply flipped.
``reroll`` means that all samples with wrong signs are sampled again,
optionally many times, until they randomly end up having the correct
sign.
reroll_count_max : int, optional
If `mode` is set to ``reroll``, this determines how often values may
be rerolled before giving up and simply flipping the sign (as in
``mode="invert"``). This shouldn't be set too high, as rerolling is
expensive.
Examples
--------
>>> param = Negative(Normal(0, 1), mode="reroll")
Generates a normal distribution that has only negative values.
"""
return ForceSign(
other_param=other_param,
positive=False,
mode=mode,
reroll_count_max=reroll_count_max
)
|
Converts another parameter's results to negative values.
Parameters
----------
other_param : imgaug.parameters.StochasticParameter
Other parameter which's sampled values are to be
modified.
mode : {'invert', 'reroll'}, optional
How to change the signs. Valid values are ``invert`` and ``reroll``.
``invert`` means that wrong signs are simply flipped.
``reroll`` means that all samples with wrong signs are sampled again,
optionally many times, until they randomly end up having the correct
sign.
reroll_count_max : int, optional
If `mode` is set to ``reroll``, this determines how often values may
be rerolled before giving up and simply flipping the sign (as in
``mode="invert"``). This shouldn't be set too high, as rerolling is
expensive.
Examples
--------
>>> param = Negative(Normal(0, 1), mode="reroll")
Generates a normal distribution that has only negative values.
|
def _send_command(self, command):
""" Send a command to the Chromecast on media channel. """
if self.status is None or self.status.media_session_id is None:
self.logger.warning(
"%s command requested but no session is active.",
command[MESSAGE_TYPE])
return
command['mediaSessionId'] = self.status.media_session_id
self.send_message(command, inc_session_id=True)
|
Send a command to the Chromecast on media channel.
|
def make_importfrom_alias(queue, body, context, name):
"""
Make an ast.alias node for the names list of an ast.ImportFrom.
Parameters
----------
queue : deque
Instruction Queue
body : list
Current body.
context : DecompilationContext
name : str
Expected name of the IMPORT_FROM node to be popped.
Returns
-------
alias : ast.alias
Side Effects
------------
Consumes IMPORT_FROM and STORE_NAME instructions from queue.
"""
import_from, store = queue.popleft(), queue.popleft()
expect(import_from, instrs.IMPORT_FROM, "after IMPORT_NAME")
if not import_from.arg == name:
raise DecompilationError(
"IMPORT_FROM name mismatch. Expected %r, but got %s." % (
name, import_from,
)
)
return ast.alias(
name=name,
asname=store.arg if store.arg != name else None,
)
|
Make an ast.alias node for the names list of an ast.ImportFrom.
Parameters
----------
queue : deque
Instruction Queue
body : list
Current body.
context : DecompilationContext
name : str
Expected name of the IMPORT_FROM node to be popped.
Returns
-------
alias : ast.alias
Side Effects
------------
Consumes IMPORT_FROM and STORE_NAME instructions from queue.
|
def cal(self, opttype, strike, exp1, exp2):
"""
Metrics for evaluating a calendar spread.
Parameters
------------
opttype : str ('call' or 'put')
Type of option on which to collect data.
strike : numeric
Strike price.
exp1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
exp2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread.
"""
assert pd.Timestamp(exp1) < pd.Timestamp(exp2)
_row1 = _relevant_rows(self.data, (strike, exp1, opttype,),
"No key for {} strike {} {}".format(exp1, strike, opttype))
_row2 = _relevant_rows(self.data, (strike, exp2, opttype,),
"No key for {} strike {} {}".format(exp2, strike, opttype))
_price1 = _getprice(_row1)
_price2 = _getprice(_row2)
_eq = _row1.loc[:, 'Underlying_Price'].values[0]
_qt = _row1.loc[:, 'Quote_Time'].values[0]
_index = ['Near', 'Far', 'Debit', 'Underlying_Price', 'Quote_Time']
_vals = np.array([_price1, _price2, _price2 - _price1, _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=['Value'])
|
Metrics for evaluating a calendar spread.
Parameters
------------
opttype : str ('call' or 'put')
Type of option on which to collect data.
strike : numeric
Strike price.
exp1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
exp2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread.
|
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
"""
x_means_hot = self.nearest_neighbor(x, means)
x_means_hot_flat = tf.reshape(
x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
x_means = tf.transpose(x_means, [1, 0, 2])
q_loss = tf.reduce_mean(
tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_means, q_loss, e_loss
|
Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
|
def parse(url_or_path, encoding=None, handler_class=DrillHandler):
"""
:param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement`
"""
handler = handler_class()
parser = expat.ParserCreate(encoding)
parser.buffer_text = 1
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
if isinstance(url_or_path, basestring):
if '://' in url_or_path[:20]:
with contextlib.closing(url_lib.urlopen(url_or_path)) as f:
parser.ParseFile(f)
elif url_or_path[:100].strip().startswith('<'):
if isinstance(url_or_path, unicode):
if encoding is None:
encoding = 'utf-8'
url_or_path = url_or_path.encode(encoding)
parser.Parse(url_or_path, True)
else:
with open(url_or_path, 'rb') as f:
parser.ParseFile(f)
elif PY3 and isinstance(url_or_path, bytes):
parser.ParseFile(bytes_io(url_or_path))
else:
parser.ParseFile(url_or_path)
return handler.root
|
:param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement`
|
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
|
Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
|
def route(self, path_regex, methods=['GET'], doc=True):
"""
Decorator to register a handler
Parameters:
* path_regex: Request path regex to match against for running the handler
* methods: HTTP methods to use this handler for
* doc: Add to internal doc structure
"""
def register_func(func):
"""
Decorator implementation
"""
if doc:
self.env['doc'].append({'url': path_regex, 'methods': ', '.join(methods), 'doc': func.__doc__})
for method in methods:
self._handlers[method].append((re.compile(path_regex), func))
return func # Return the original function
return register_func
|
Decorator to register a handler
Parameters:
* path_regex: Request path regex to match against for running the handler
* methods: HTTP methods to use this handler for
* doc: Add to internal doc structure
|
def _handle_option_deprecations(options):
"""Issue appropriate warnings when deprecated options are present in the
options dictionary. Removes deprecated option key, value pairs if the
options dictionary is found to also have the renamed option."""
undeprecated_options = _CaseInsensitiveDictionary()
for key, value in iteritems(options):
optname = str(key).lower()
if optname in URI_OPTIONS_DEPRECATION_MAP:
renamed_key = URI_OPTIONS_DEPRECATION_MAP[optname]
if renamed_key.lower() in options:
warnings.warn("Deprecated option '%s' ignored in favor of "
"'%s'." % (str(key), renamed_key))
continue
warnings.warn("Option '%s' is deprecated, use '%s' instead." % (
str(key), renamed_key))
undeprecated_options[str(key)] = value
return undeprecated_options
|
Issue appropriate warnings when deprecated options are present in the
options dictionary. Removes deprecated option key, value pairs if the
options dictionary is found to also have the renamed option.
|
def waitStarted(self):
"""wait until name server is started."""
ns = None
while not ns:
try:
time.sleep(3)
ns = Pyro.naming.NameServerLocator(
identification=self.identification).getNS()
except Pyro.errors.NamingError as er:
pass
|
wait until name server is started.
|
def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")]
|
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
|
def mount_iso_image(self, image, image_name, ins_file_name):
"""
Upload an ISO image and associate it to this Partition
using the HMC operation 'Mount ISO Image'.
When the partition already has an ISO image associated,
the newly uploaded image replaces the current one.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
image (:term:`byte string` or file-like object):
The content of the ISO image.
Images larger than 2GB cannot be specified as a Byte string; they
must be specified as a file-like object.
File-like objects must have opened the file in binary mode.
image_name (:term:`string`): The displayable name of the image.
This value must be a valid Linux file name without directories,
must not contain blanks, and must end with '.iso' in lower case.
This value will be shown in the 'boot-iso-image-name' property of
this partition.
ins_file_name (:term:`string`): The path name of the INS file within
the file system of the ISO image.
This value will be shown in the 'boot-iso-ins-file' property of
this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms_str = '?image-name={}&ins-file-name={}'. \
format(quote(image_name, safe=''), quote(ins_file_name, safe=''))
self.manager.session.post(
self.uri + '/operations/mount-iso-image' + query_parms_str,
body=image)
|
Upload an ISO image and associate it to this Partition
using the HMC operation 'Mount ISO Image'.
When the partition already has an ISO image associated,
the newly uploaded image replaces the current one.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
image (:term:`byte string` or file-like object):
The content of the ISO image.
Images larger than 2GB cannot be specified as a Byte string; they
must be specified as a file-like object.
File-like objects must have opened the file in binary mode.
image_name (:term:`string`): The displayable name of the image.
This value must be a valid Linux file name without directories,
must not contain blanks, and must end with '.iso' in lower case.
This value will be shown in the 'boot-iso-image-name' property of
this partition.
ins_file_name (:term:`string`): The path name of the INS file within
the file system of the ISO image.
This value will be shown in the 'boot-iso-ins-file' property of
this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
def rename_scored_calls(self,change):
"""
Change the names of scored call names, input dictionary change with {<current name>:<new name>} format, new name must not already exist
Args:
change (dict): a dictionary of current name keys and new name values
Returns:
CellDataFrame: The CellDataFrame modified.
"""
output = self.copy()
output['scored_calls'] = output.apply(lambda x:
_dict_rename(x['scored_calls'],change)
,1)
return output
|
Change the names of scored call names, input dictionary change with {<current name>:<new name>} format, new name must not already exist
Args:
change (dict): a dictionary of current name keys and new name values
Returns:
CellDataFrame: The CellDataFrame modified.
|
def get_graphs(self, run_key, debug=False):
"""Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos.
"""
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
graph_wrappers = graph_dict.get(run_key, {})
graph_defs = dict()
for device_name, wrapper in graph_wrappers.items():
graph_defs[device_name] = wrapper.graph_def
return graph_defs
|
Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos.
|
def pixel_to_utm(row, column, transform):
""" Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float
"""
east = transform[0] + column * transform[1]
north = transform[3] + row * transform[5]
return east, north
|
Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float
|
def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset):
"""Wrapper for mmap2"""
return self.sys_mmap2(address, size, prot, flags, fd, offset)
|
Wrapper for mmap2
|
def get_domain(url):
"""
Get domain part of an url.
For example: https://www.python.org/doc/ -> https://www.python.org
"""
parse_result = urlparse(url)
domain = "{schema}://{netloc}".format(
schema=parse_result.scheme, netloc=parse_result.netloc)
return domain
|
Get domain part of an url.
For example: https://www.python.org/doc/ -> https://www.python.org
|
def syllabifyTextgrid(isleDict, tg, wordTierName, phoneTierName,
skipLabelList=None, startT=None, stopT=None):
'''
Given a textgrid, syllabifies the phones in the textgrid
skipLabelList allows you to skip labels without generating warnings
(e.g. '', 'sp', etc.)
The textgrid must have a word tier and a phone tier.
Returns a textgrid with only two tiers containing syllable information
(syllabification of the phone tier and a tier marking word-stress).
'''
minT = tg.minTimestamp
maxT = tg.maxTimestamp
wordTier = tg.tierDict[wordTierName]
phoneTier = tg.tierDict[phoneTierName]
if skipLabelList is None:
skipLabelList = []
syllableEntryList = []
tonicSEntryList = []
tonicPEntryList = []
if startT is not None or stopT is not None:
if startT is None:
startT = minT
if stopT is None:
stopT = maxT
wordTier = wordTier.crop(startT, stopT, "truncated", False)
for start, stop, word in wordTier.entryList:
if word in skipLabelList:
continue
subPhoneTier = phoneTier.crop(start, stop, "strict", False)
# entry = (start, stop, phone)
phoneList = [entry[2] for entry in subPhoneTier.entryList
if entry[2] != '']
phoneList = [phoneList, ]
try:
sylTmp = pronunciationtools.findBestSyllabification(isleDict,
word,
phoneList)
except isletool.WordNotInISLE:
print("Word ('%s') not is isle -- skipping syllabification" % word)
continue
except (pronunciationtools.NullPronunciationError):
print("Word ('%s') has no provided pronunciation" % word)
continue
except AssertionError:
print("Unable to syllabify '%s'" % word)
continue
for syllabificationResultList in sylTmp:
stressI = syllabificationResultList[0]
stressJ = syllabificationResultList[1]
syllableList = syllabificationResultList[2]
stressedPhone = None
if stressI is not None and stressJ is not None:
stressedPhone = syllableList[stressI][stressJ]
syllableList[stressI][stressJ] += u"ˈ"
i = 0
# print(syllableList)
for k, syllable in enumerate(syllableList):
# Create the syllable tier entry
j = len(syllable)
stubEntryList = subPhoneTier.entryList[i:i + j]
i += j
# The whole syllable was deleted
if len(stubEntryList) == 0:
continue
syllableStart = stubEntryList[0][0]
syllableEnd = stubEntryList[-1][1]
label = "-".join([entry[2] for entry in stubEntryList])
syllableEntryList.append((syllableStart, syllableEnd, label))
# Create the tonic syllable tier entry
if k == stressI:
tonicSEntryList.append((syllableStart, syllableEnd, 'T'))
# Create the tonic phone tier entry
if k == stressI:
syllablePhoneTier = phoneTier.crop(syllableStart,
syllableEnd,
"strict", False)
phoneList = [entry for entry in syllablePhoneTier.entryList
if entry[2] != '']
justPhones = [phone for _, _, phone in phoneList]
cvList = pronunciationtools._prepPronunciation(justPhones)
try:
tmpStressJ = cvList.index('V')
except ValueError:
for char in [u'r', u'n', u'l']:
if char in cvList:
tmpStressJ = cvList.index(char)
break
phoneStart, phoneEnd = phoneList[tmpStressJ][:2]
tonicPEntryList.append((phoneStart, phoneEnd, 'T'))
# Create a textgrid with the two syllable-level tiers
syllableTier = tgio.IntervalTier('syllable', syllableEntryList,
minT, maxT)
tonicSTier = tgio.IntervalTier('tonicSyllable', tonicSEntryList,
minT, maxT)
tonicPTier = tgio.IntervalTier('tonicVowel', tonicPEntryList,
minT, maxT)
syllableTG = tgio.Textgrid()
syllableTG.addTier(syllableTier)
syllableTG.addTier(tonicSTier)
syllableTG.addTier(tonicPTier)
return syllableTG
|
Given a textgrid, syllabifies the phones in the textgrid
skipLabelList allows you to skip labels without generating warnings
(e.g. '', 'sp', etc.)
The textgrid must have a word tier and a phone tier.
Returns a textgrid with only two tiers containing syllable information
(syllabification of the phone tier and a tier marking word-stress).
|
def get_comment(self, name):
"""
Banana banana
"""
comment = self.__comments.get(name)
if comment:
return comment
aliases = self.__get_aliases(name)
for alias in aliases:
comment = self.__comments.get(alias)
if comment:
return comment
return None
|
Banana banana
|
def formatted_command(self):
"""Build and return the formatted command for this `Link`.
This is exactly the command as called from the Unix command line.
"""
# FIXME, this isn't really great as it force you to have all the arguments
command_template = self.command_template()
format_dict = self.args.copy()
for key, value in format_dict.items():
# protect whitespace
if isinstance(value, list):
outstr = ""
if key == 'args':
outkey = ""
else:
outkey = "--%s "
for lval in value:
outstr += ' '
outstr += outkey
outstr += lval
format_dict[key] = '"%s"' % outstr
elif isinstance(value, str) and value.find(' ') >= 0 and key != 'args':
format_dict[key] = '"%s"' % value
elif value is None:
format_dict[key] = 'none'
command = command_template.format(**format_dict)
return command
|
Build and return the formatted command for this `Link`.
This is exactly the command as called from the Unix command line.
|
def _clone_node(self) -> 'Tag':
"""Need to copy class, not tag.
So need to re-implement copy.
"""
clone = type(self)()
for attr in self.attributes:
clone.setAttribute(attr, self.getAttribute(attr))
for c in self.classList:
clone.addClass(c)
clone.style.update(self.style)
# TODO: should clone event listeners???
return clone
|
Need to copy class, not tag.
So need to re-implement copy.
|
def list_dvs(kwargs=None, call=None):
'''
List all the distributed virtual switches for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_dvs my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_dvs function must be called with '
'-f or --function.'
)
return {'Distributed Virtual Switches': salt.utils.vmware.list_dvs(_get_si())}
|
List all the distributed virtual switches for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_dvs my-vmware-config
|
def all(guideids=None, filter=None, order=None):
'''
Fetch all guides.
:param iterable guideids: Only return Guides corresponding to these ids.
:param string filter: Only return guides of this type. Choices:
installation, repair, disassembly, teardown,
technique, maintenance.
:param string order: Instead of ordering by guideid, order alphabetically.
Choices: ASC, DESC.
:rtype: generator of :class:`pyfixit.guide.Guide` objects.
'''
parameters = []
if guideids:
parameters.append('guideids=%s' % ','.join(map(str, guideids)))
if filter:
parameters.append('filter=%s' % filter)
if order:
parameters.append('order=%s' % order)
parameters = '&'.join(parameters)
offset = 0
limit = 5 # Tune this to balance memory vs. frequent network trips.
guideJSONs = []
while True:
if not guideJSONs:
url = '%s/guides?offset=%s&limit=%s&%s' \
% (API_BASE_URL, offset, limit, parameters)
response = requests.get(url)
guideJSONs = response.json()
# Are we at the end of pagination?
if not guideJSONs:
return
offset += limit
yield Guide(guideJSONs.pop(0)['guideid'])
|
Fetch all guides.
:param iterable guideids: Only return Guides corresponding to these ids.
:param string filter: Only return guides of this type. Choices:
installation, repair, disassembly, teardown,
technique, maintenance.
:param string order: Instead of ordering by guideid, order alphabetically.
Choices: ASC, DESC.
:rtype: generator of :class:`pyfixit.guide.Guide` objects.
|
def get_provider(self, provider_name='default'):
"""Fetch provider with the name specified in Configuration file"""
try:
if self._providers is None:
self._providers = self._initialize_providers()
return self._providers[provider_name]
except KeyError:
raise AssertionError(f'No Provider registered with name {provider_name}')
|
Fetch provider with the name specified in Configuration file
|
def _validate_features(features, column_type_map, valid_types, label):
"""
Identify the subset of desired `features` that are valid for the Kmeans
model. A warning is emitted for each feature that is excluded.
Parameters
----------
features : list[str]
Desired feature names.
column_type_map : dict[str, type]
Dictionary mapping each column name to the type of values in the
column.
valid_types : list[type]
Exclude features whose type is not in this list.
label : str
Name of the row label column.
Returns
-------
valid_features : list[str]
Names of features to include in the model.
"""
if not isinstance(features, list):
raise TypeError("Input 'features' must be a list, if specified.")
if len(features) == 0:
raise ValueError("If specified, input 'features' must contain " +
"at least one column name.")
## Remove duplicates
num_original_features = len(features)
features = set(features)
if len(features) < num_original_features:
_logging.warning("Duplicates have been removed from the list of features")
## Remove the row label
if label in features:
features.remove(label)
_logging.warning("The row label has been removed from the list of features.")
## Check the type of each feature against the list of valid types
valid_features = []
for ftr in features:
if not isinstance(ftr, str):
_logging.warning("Feature '{}' excluded. ".format(ftr) +
"Features must be specified as strings " +
"corresponding to column names in the input dataset.")
elif ftr not in column_type_map.keys():
_logging.warning("Feature '{}' excluded because ".format(ftr) +
"it is not in the input dataset.")
elif column_type_map[ftr] not in valid_types:
_logging.warning("Feature '{}' excluded because of its type. ".format(ftr) +
"Kmeans features must be int, float, dict, or array.array type.")
else:
valid_features.append(ftr)
if len(valid_features) == 0:
raise _ToolkitError("All specified features have been excluded. " +
"Please specify valid features.")
return valid_features
|
Identify the subset of desired `features` that are valid for the Kmeans
model. A warning is emitted for each feature that is excluded.
Parameters
----------
features : list[str]
Desired feature names.
column_type_map : dict[str, type]
Dictionary mapping each column name to the type of values in the
column.
valid_types : list[type]
Exclude features whose type is not in this list.
label : str
Name of the row label column.
Returns
-------
valid_features : list[str]
Names of features to include in the model.
|
def _convert_reftype_to_jaeger_reftype(ref):
"""Convert opencensus reference types to jaeger reference types."""
if ref == link_module.Type.CHILD_LINKED_SPAN:
return jaeger.SpanRefType.CHILD_OF
if ref == link_module.Type.PARENT_LINKED_SPAN:
return jaeger.SpanRefType.FOLLOWS_FROM
return None
|
Convert opencensus reference types to jaeger reference types.
|
def resolve_aonly(self,tables_dict,table_ctor):
"circular depends on pgmock.Table. refactor."
for alias,selectx in self.aonly.items():
table = table_ctor(alias,infer_columns(selectx,tables_dict),None)
table.rows = run_select(selectx,tables_dict,table_ctor)
self.aonly[alias] = table
self.aonly_resolved = True
|
circular depends on pgmock.Table. refactor.
|
def os_application_version_set(package):
'''Set version of application for Juju 2.0 and later'''
application_version = get_upstream_version(package)
# NOTE(jamespage) if not able to figure out package version, fallback to
# openstack codename version detection.
if not application_version:
application_version_set(os_release(package))
else:
application_version_set(application_version)
|
Set version of application for Juju 2.0 and later
|
def plot_eq(fignum, DIblock, s):
"""
plots directions on eqarea projection
Parameters
__________
fignum : matplotlib figure number
DIblock : nested list of dec/inc pairs
s : specimen name
"""
# make the stereonet
plt.figure(num=fignum)
if len(DIblock) < 1:
return
# plt.clf()
if not isServer:
plt.figtext(.02, .01, version_num)
plot_net(fignum)
#
# put on the directions
#
plot_di(fignum, DIblock) # plot directions
plt.axis("equal")
plt.text(-1.1, 1.15, s)
plt.draw()
|
plots directions on eqarea projection
Parameters
__________
fignum : matplotlib figure number
DIblock : nested list of dec/inc pairs
s : specimen name
|
def version_check(self):
"""
Check if the version entry is in the proper format
"""
try:
version_info = self['Version']
except KeyError:
raise ValidateError('Config file has to have a Version section')
try:
float(version_info['version'])
except KeyError:
raise ValidateError('Config file has to have a version section')
except ValueError:
raise ValidateError('Version has to be a float.')
try:
version_info['name']
except KeyError:
raise ValidateError("Config file has to have a name")
return
|
Check if the version entry is in the proper format
|
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
|
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
|
def pick(rest):
"Pick between a few options"
question = rest.strip()
choices = util.splitem(question)
if len(choices) == 1:
return "I can't pick if you give me only one choice!"
else:
pick = random.choice(choices)
certainty = random.sample(phrases.certainty_opts, 1)[0]
return "%s... %s %s" % (pick, certainty, pick)
|
Pick between a few options
|
def append(self, node):
"""To append a new child."""
if node.parent == self.key and not self.elapsed_time:
self.children.append(node)
else:
# Recursive call
for child in self.children:
if not child.elapsed_time:
child.append(node)
|
To append a new child.
|
def _email(name, *, allow_unverified=False):
"""
This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object.
"""
def inner(fn):
@functools.wraps(fn)
def wrapper(request, user_or_users, **kwargs):
if isinstance(user_or_users, (list, set)):
recipients = user_or_users
else:
recipients = [user_or_users]
context = fn(request, user_or_users, **kwargs)
msg = EmailMessage.from_template(name, context, request=request)
for recipient in recipients:
if isinstance(recipient, tuple):
user, email = recipient
else:
user, email = recipient, None
_send_email_to_user(
request, user, msg, email=email, allow_unverified=allow_unverified
)
return context
return wrapper
return inner
|
This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object.
|
def enabled(name,
skip_verify=False,
**kwargs):
'''
Ensure that the service is enabled on boot, only use this state if you
don't want to manage the running process, remember that if you want to
enable a running service to use the enable: True option for the running
or dead function.
name
The name of the init or rc script used to manage the service
skip_verify
Skip verifying that the service is available before enabling it.
``True`` will skip the verification. The default is ``False``,
which will ensure the service is available before enabling it.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
ret.update(_enable(name, None, skip_verify=skip_verify, **kwargs))
return ret
|
Ensure that the service is enabled on boot, only use this state if you
don't want to manage the running process, remember that if you want to
enable a running service to use the enable: True option for the running
or dead function.
name
The name of the init or rc script used to manage the service
skip_verify
Skip verifying that the service is available before enabling it.
``True`` will skip the verification. The default is ``False``,
which will ensure the service is available before enabling it.
|
def expand_param_list(self, paramlist):
""" expands the parameters list according to one of these schemes:
grid: every list item is combined with every other list item
list: every n-th list item of parameter lists are combined
"""
# for one single experiment, still wrap it in list
if type(paramlist) == types.DictType:
paramlist = [paramlist]
# get all options that are iteratable and build all combinations (grid) or tuples (list)
iparamlist = []
for params in paramlist:
if ('experiment' in params and params['experiment'] == 'single'):
iparamlist.append(params)
else:
iterparams = [p for p in params if hasattr(params[p], '__iter__')]
if len(iterparams) > 0:
# write intermediate config file
self.mkdir(os.path.join(params['path'], params['name']))
self.write_config_file(params, os.path.join(params['path'], params['name']))
# create sub experiments (check if grid or list is requested)
if 'experiment' in params and params['experiment'] == 'list':
iterfunc = itertools.izip
elif ('experiment' not in params) or ('experiment' in params and params['experiment'] == 'grid'):
iterfunc = itertools.product
else:
raise SystemExit("unexpected value '%s' for parameter 'experiment'. Use 'grid', 'list' or 'single'."%params['experiment'])
for il in iterfunc(*[params[p] for p in iterparams]):
par = params.copy()
converted = str(zip(iterparams, map(convert_param_to_dirname, il)))
par['name'] = par['name'] + '/' + re.sub("[' \[\],()]", '', converted)
for i, ip in enumerate(iterparams):
par[ip] = il[i]
iparamlist.append(par)
else:
iparamlist.append(params)
return iparamlist
|
expands the parameters list according to one of these schemes:
grid: every list item is combined with every other list item
list: every n-th list item of parameter lists are combined
|
def auth_timeout(self):
"""Handle legacy authentication timeout.
[client only]"""
self.lock.acquire()
try:
self.__logger.debug("Timeout while waiting for jabber:iq:auth result")
if self._auth_methods_left:
self._auth_methods_left.pop(0)
finally:
self.lock.release()
|
Handle legacy authentication timeout.
[client only]
|
def fly(cls,
conf_path,
docname,
source,
maxdepth=1): # pragma: no cover
"""
Generate toctree directive for rst file.
:param conf_path: conf.py file absolute path
:param docname: the rst file relpath from conf.py directory.
:param source: rst content.
:param maxdepth: int, max toc tree depth.
"""
msg = ("``.. articles::`` directive is going to be deprecated. "
"use ``.. autodoctree`` instead.")
warnings.warn(msg, FutureWarning)
directive_pattern = ".. articles::"
if directive_pattern not in source:
return source
af = ArticleFolder(
dir_path=Path(Path(conf_path).parent, docname).parent.abspath)
toc_directive = af.toc_directive(maxdepth)
lines = list()
for line in source.split("\n"):
if directive_pattern in line.strip():
if line.strip().startswith(directive_pattern):
line = line.replace(directive_pattern, toc_directive, 1)
lines.append(line)
continue
lines.append(line)
return "\n".join(lines)
|
Generate toctree directive for rst file.
:param conf_path: conf.py file absolute path
:param docname: the rst file relpath from conf.py directory.
:param source: rst content.
:param maxdepth: int, max toc tree depth.
|
def get_tok(self, tok):
'''
Return the name associated with the token, or False if the token is
not valid
'''
tdata = self.tokens["{0}.get_token".format(self.opts['eauth_tokens'])](self.opts, tok)
if not tdata:
return {}
rm_tok = False
if 'expire' not in tdata:
# invalid token, delete it!
rm_tok = True
if tdata.get('expire', '0') < time.time():
rm_tok = True
if rm_tok:
self.rm_token(tok)
return tdata
|
Return the name associated with the token, or False if the token is
not valid
|
def get_response_structure(name):
"""
Returns the response structure for a know list of create context
responses.
:param name: The constant value above
:return: The response structure or None if unknown
"""
return {
CreateContextName.SMB2_CREATE_DURABLE_HANDLE_REQUEST:
SMB2CreateDurableHandleResponse(),
CreateContextName.SMB2_CREATE_DURABLE_HANDLE_RECONNECT:
SMB2CreateDurableHandleReconnect(),
CreateContextName.SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST:
SMB2CreateQueryMaximalAccessResponse(),
CreateContextName.SMB2_CREATE_REQUEST_LEASE:
SMB2CreateResponseLease(),
CreateContextName.SMB2_CREATE_QUERY_ON_DISK_ID:
SMB2CreateQueryOnDiskIDResponse(),
CreateContextName.SMB2_CREATE_REQUEST_LEASE_V2:
SMB2CreateResponseLeaseV2(),
CreateContextName.SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2:
SMB2CreateDurableHandleResponseV2(),
CreateContextName.SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2:
SMB2CreateDurableHandleReconnectV2,
CreateContextName.SMB2_CREATE_APP_INSTANCE_ID:
SMB2CreateAppInstanceId(),
CreateContextName.SMB2_CREATE_APP_INSTANCE_VERSION:
SMB2CreateAppInstanceVersion()
}.get(name, None)
|
Returns the response structure for a know list of create context
responses.
:param name: The constant value above
:return: The response structure or None if unknown
|
def trees_to_dataframe(self, fmap=''):
"""Parse a boosted tree model text dump into a pandas DataFrame structure.
This feature is only defined when the decision tree model is chosen as base
learner (`booster in {gbtree, dart}`). It is not defined for other base learner
types, such as linear learners (`booster=gblinear`).
Parameters
----------
fmap: str (optional)
The name of feature map file.
"""
# pylint: disable=too-many-locals
if not PANDAS_INSTALLED:
raise Exception(('pandas must be available to use this method.'
'Install pandas before calling again.'))
if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:
raise ValueError('This method is not defined for Booster type {}'
.format(self.booster))
tree_ids = []
node_ids = []
fids = []
splits = []
y_directs = []
n_directs = []
missings = []
gains = []
covers = []
trees = self.get_dump(fmap, with_stats=True)
for i, tree in enumerate(trees):
for line in tree.split('\n'):
arr = line.split('[')
# Leaf node
if len(arr) == 1:
# Last element of line.split is an empy string
if arr == ['']:
continue
# parse string
parse = arr[0].split(':')
stats = re.split('=|,', parse[1])
# append to lists
tree_ids.append(i)
node_ids.append(int(re.findall(r'\b\d+\b', parse[0])[0]))
fids.append('Leaf')
splits.append(float('NAN'))
y_directs.append(float('NAN'))
n_directs.append(float('NAN'))
missings.append(float('NAN'))
gains.append(float(stats[1]))
covers.append(float(stats[3]))
# Not a Leaf Node
else:
# parse string
fid = arr[1].split(']')
parse = fid[0].split('<')
stats = re.split('=|,', fid[1])
# append to lists
tree_ids.append(i)
node_ids.append(int(re.findall(r'\b\d+\b', arr[0])[0]))
fids.append(parse[0])
splits.append(float(parse[1]))
str_i = str(i)
y_directs.append(str_i + '-' + stats[1])
n_directs.append(str_i + '-' + stats[3])
missings.append(str_i + '-' + stats[5])
gains.append(float(stats[7]))
covers.append(float(stats[9]))
ids = [str(t_id) + '-' + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)]
df = DataFrame({'Tree': tree_ids, 'Node': node_ids, 'ID': ids,
'Feature': fids, 'Split': splits, 'Yes': y_directs,
'No': n_directs, 'Missing': missings, 'Gain': gains,
'Cover': covers})
if callable(getattr(df, 'sort_values', None)):
# pylint: disable=no-member
return df.sort_values(['Tree', 'Node']).reset_index(drop=True)
# pylint: disable=no-member
return df.sort(['Tree', 'Node']).reset_index(drop=True)
|
Parse a boosted tree model text dump into a pandas DataFrame structure.
This feature is only defined when the decision tree model is chosen as base
learner (`booster in {gbtree, dart}`). It is not defined for other base learner
types, such as linear learners (`booster=gblinear`).
Parameters
----------
fmap: str (optional)
The name of feature map file.
|
def get_token(self):
"""
Gets the authorization token
"""
payload = {'grant_type': 'client_credentials', 'client_id': self.client_id, 'client_secret': self.client_secret}
r = requests.post(OAUTH_ENDPOINT, data=json.dumps(payload), headers={'content-type': 'application/json'})
response = r.json()
if r.status_code != 200 and not ERROR_KEY in response:
raise GfycatClientError('Error fetching the OAUTH URL', r.status_code)
elif ERROR_KEY in response:
raise GfycatClientError(response[ERROR_KEY], r.status_code)
self.token_type = response['token_type']
self.access_token = response['access_token']
self.expires_in = response['expires_in']
self.expires_at = time.time() + self.expires_in - 5
self.headers = {'content-type': 'application/json','Authorization': self.token_type + ' ' + self.access_token}
|
Gets the authorization token
|
def _delete_element(name, element_type, data, server=None):
'''
Delete an element
'''
_api_delete('{0}/{1}'.format(element_type, quote(name, safe='')), data, server)
return name
|
Delete an element
|
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
|
Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
|
def run_loop(self):
"keep rendering until the user says quit"
self.running = True
event = SDL_Event()
try:
while self.running:
while SDL_PollEvent(ctypes.byref(event)) != 0:
f = self._sdl_event_handlers.get(event.type)
if f is not None:
f ( event )
self.render_scene()
except SdlAppQuit as e:
pass
|
keep rendering until the user says quit
|
def in6_iseui64(x):
"""
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
"""
eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0')
x = in6_and(inet_pton(socket.AF_INET6, x), eui64)
return x == eui64
|
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
|
def get_degenerate_statements(self):
"""Get all degenerate BEL statements.
Stores the results of the query in self.degenerate_stmts.
"""
logger.info("Checking for 'degenerate' statements...\n")
# Get rules of type protein X -> activity Y
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasSubject ?subj .
?stmt belvoc:hasObject ?obj .
{
{ ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }
}
{
{ ?subj a belvoc:ProteinAbundance . }
UNION
{ ?subj a belvoc:ModifiedProteinAbundance . }
}
?subj belvoc:hasConcept ?xName .
{
{
?obj a belvoc:ProteinAbundance .
?obj belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:ModifiedProteinAbundance .
?obj belvoc:hasChild ?proteinY .
?proteinY belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:AbundanceActivity .
?obj belvoc:hasChild ?objChild .
?objChild a belvoc:ProteinAbundance .
?objChild belvoc:hasConcept ?yName .
}
}
FILTER (?xName != ?yName)
}
"""
res_stmts = self.g.query(q_stmts)
logger.info("Protein -> Protein/Activity statements:")
logger.info("---------------------------------------")
for stmt in res_stmts:
stmt_str = strip_statement(stmt[0])
logger.info(stmt_str)
self.degenerate_stmts.append(stmt_str)
|
Get all degenerate BEL statements.
Stores the results of the query in self.degenerate_stmts.
|
def fetch(self, method, params=None):
"""
Fetch an url.
"""
# Encode params if they exist
if params:
params = urllib.parse.urlencode(params, doseq=True).encode("utf-8")
content = self._make_request(
self.BASE_URI + method,
params,
)
# Convert its JSON to a Python dictionary and return
return json.loads(content.decode("utf-8"))
|
Fetch an url.
|
def fit_zyz(target_gate):
"""
Tensorflow eager mode example. Given an arbitrary one-qubit gate, use
gradient descent to find corresponding parameters of a universal ZYZ
gate.
"""
assert bk.BACKEND == 'eager'
tf = bk.TL
tfe = bk.tfe
steps = 4000
dev = '/gpu:0' if bk.DEVICE == 'gpu' else '/cpu:0'
with tf.device(dev):
t = tfe.Variable(np.random.normal(size=[3]), name='t')
def loss_fn():
"""Loss"""
gate = qf.ZYZ(t[0], t[1], t[2])
ang = qf.fubini_study_angle(target_gate.vec, gate.vec)
return ang
loss_and_grads = tfe.implicit_value_and_gradients(loss_fn)
# opt = tf.train.GradientDescentOptimizer(learning_rate=0.005)
opt = tf.train.AdamOptimizer(learning_rate=0.001)
# train = opt.minimize(ang, var_list=[t])
for step in range(steps):
loss, grads_and_vars = loss_and_grads()
sys.stdout.write('\r')
sys.stdout.write("step: {:3d} loss: {:10.9f}".format(step,
loss.numpy()))
if loss < 0.0001:
break
opt.apply_gradients(grads_and_vars)
print()
return bk.evaluate(t)
|
Tensorflow eager mode example. Given an arbitrary one-qubit gate, use
gradient descent to find corresponding parameters of a universal ZYZ
gate.
|
def check_gap(xpub, api_key):
"""Call the 'v2/receive/checkgap' endpoint and returns the callback log
for a given callback URI with parameters.
:param str xpub: extended public key
:param str api_key: Blockchain.info API V2 key
:return: an int
"""
params = {'key': api_key, 'xpub': xpub}
resource = 'v2/receive/checkgap?' + util.urlencode(params)
resp = util.call_api(resource, base_url='https://api.blockchain.info/')
json_resp = json.loads(resp)
return json_resp['gap']
|
Call the 'v2/receive/checkgap' endpoint and returns the callback log
for a given callback URI with parameters.
:param str xpub: extended public key
:param str api_key: Blockchain.info API V2 key
:return: an int
|
def query_all_issues(after):
"""Hits the github API for all closed issues after the given date, returns the data."""
page = count(1)
data = []
while True:
page_data = query_issues(next(page), after)
if not page_data:
break
data.extend(page_data)
return data
|
Hits the github API for all closed issues after the given date, returns the data.
|
def remove_filter_set(self, filter_name):
"""
Remove filter set by name
:param filter_name: str
"""
if filter_name in self._filter_sets:
del self._filter_sets[filter_name]
else:
raise ValueError('Unknown filter set name.')
|
Remove filter set by name
:param filter_name: str
|
def _call_multi(self, clients, command, *args):
""" Call multi """
responses, errors = {}, {}
for addr, client in clients.items():
res, err = self._call_single(client, command, *args)
responses[addr] = res
errors[addr] = err
return responses, errors
|
Call multi
|
def from_rotation_matrix(rot, nonorthogonal=True):
"""Convert input 3x3 rotation matrix to unit quaternion
By default, if scipy.linalg is available, this function uses
Bar-Itzhack's algorithm to allow for non-orthogonal matrices.
[J. Guidance, Vol. 23, No. 6, p. 1085 <http://dx.doi.org/10.2514/2.4654>]
This will almost certainly be quite a bit slower than simpler versions,
though it will be more robust to numerical errors in the rotation matrix.
Also note that Bar-Itzhack uses some pretty weird conventions. The last
component of the quaternion appears to represent the scalar, and the
quaternion itself is conjugated relative to the convention used
throughout this module.
If scipy.linalg is not available or if the optional
`nonorthogonal` parameter is set to `False`, this function falls
back to the possibly faster, but less robust, algorithm of Markley
[J. Guidance, Vol. 31, No. 2, p. 440
<http://dx.doi.org/10.2514/1.31730>].
Parameters
----------
rot: (...Nx3x3) float array
Each 3x3 matrix represents a rotation by multiplying (from the left)
a column vector to produce a rotated column vector. Note that this
input may actually have ndims>3; it is just assumed that the last
two dimensions have size 3, representing the matrix.
nonorthogonal: bool, optional
If scipy.linalg is available, use the more robust algorithm of
Bar-Itzhack. Default value is True.
Returns
-------
q: array of quaternions
Unit quaternions resulting in rotations corresponding to input
rotations. Output shape is rot.shape[:-2].
Raises
------
LinAlgError
If any of the eigenvalue solutions does not converge
"""
try:
from scipy import linalg
except ImportError:
linalg = False
rot = np.array(rot, copy=False)
shape = rot.shape[:-2]
if linalg and nonorthogonal:
from operator import mul
from functools import reduce
K3 = np.empty(shape+(4, 4))
K3[..., 0, 0] = (rot[..., 0, 0] - rot[..., 1, 1] - rot[..., 2, 2])/3.0
K3[..., 0, 1] = (rot[..., 1, 0] + rot[..., 0, 1])/3.0
K3[..., 0, 2] = (rot[..., 2, 0] + rot[..., 0, 2])/3.0
K3[..., 0, 3] = (rot[..., 1, 2] - rot[..., 2, 1])/3.0
K3[..., 1, 0] = K3[..., 0, 1]
K3[..., 1, 1] = (rot[..., 1, 1] - rot[..., 0, 0] - rot[..., 2, 2])/3.0
K3[..., 1, 2] = (rot[..., 2, 1] + rot[..., 1, 2])/3.0
K3[..., 1, 3] = (rot[..., 2, 0] - rot[..., 0, 2])/3.0
K3[..., 2, 0] = K3[..., 0, 2]
K3[..., 2, 1] = K3[..., 1, 2]
K3[..., 2, 2] = (rot[..., 2, 2] - rot[..., 0, 0] - rot[..., 1, 1])/3.0
K3[..., 2, 3] = (rot[..., 0, 1] - rot[..., 1, 0])/3.0
K3[..., 3, 0] = K3[..., 0, 3]
K3[..., 3, 1] = K3[..., 1, 3]
K3[..., 3, 2] = K3[..., 2, 3]
K3[..., 3, 3] = (rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2])/3.0
if not shape:
q = zero.copy()
eigvals, eigvecs = linalg.eigh(K3.T, eigvals=(3, 3))
q.components[0] = eigvecs[-1]
q.components[1:] = -eigvecs[:-1].flatten()
return q
else:
q = np.empty(shape+(4,), dtype=np.float)
for flat_index in range(reduce(mul, shape)):
multi_index = np.unravel_index(flat_index, shape)
eigvals, eigvecs = linalg.eigh(K3[multi_index], eigvals=(3, 3))
q[multi_index+(0,)] = eigvecs[-1]
q[multi_index+(slice(1,None),)] = -eigvecs[:-1].flatten()
return as_quat_array(q)
else: # No scipy.linalg or not `nonorthogonal`
diagonals = np.empty(shape+(4,))
diagonals[..., 0] = rot[..., 0, 0]
diagonals[..., 1] = rot[..., 1, 1]
diagonals[..., 2] = rot[..., 2, 2]
diagonals[..., 3] = rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]
indices = np.argmax(diagonals, axis=-1)
q = diagonals # reuse storage space
indices_i = (indices == 0)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 1] = 1 + rot_i[..., 0, 0] - rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 2] = rot_i[..., 0, 1] + rot_i[..., 1, 0]
q[indices_i, 3] = rot_i[..., 0, 2] + rot_i[..., 2, 0]
indices_i = (indices == 1)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 1] = rot_i[..., 1, 0] + rot_i[..., 0, 1]
q[indices_i, 2] = 1 - rot_i[..., 0, 0] + rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 3] = rot_i[..., 1, 2] + rot_i[..., 2, 1]
indices_i = (indices == 2)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q[indices_i, 1] = rot_i[..., 2, 0] + rot_i[..., 0, 2]
q[indices_i, 2] = rot_i[..., 2, 1] + rot_i[..., 1, 2]
q[indices_i, 3] = 1 - rot_i[..., 0, 0] - rot_i[..., 1, 1] + rot_i[..., 2, 2]
indices_i = (indices == 3)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = 1 + rot_i[..., 0, 0] + rot_i[..., 1, 1] + rot_i[..., 2, 2]
q[indices_i, 1] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 2] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 3] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]
return as_quat_array(q)
|
Convert input 3x3 rotation matrix to unit quaternion
By default, if scipy.linalg is available, this function uses
Bar-Itzhack's algorithm to allow for non-orthogonal matrices.
[J. Guidance, Vol. 23, No. 6, p. 1085 <http://dx.doi.org/10.2514/2.4654>]
This will almost certainly be quite a bit slower than simpler versions,
though it will be more robust to numerical errors in the rotation matrix.
Also note that Bar-Itzhack uses some pretty weird conventions. The last
component of the quaternion appears to represent the scalar, and the
quaternion itself is conjugated relative to the convention used
throughout this module.
If scipy.linalg is not available or if the optional
`nonorthogonal` parameter is set to `False`, this function falls
back to the possibly faster, but less robust, algorithm of Markley
[J. Guidance, Vol. 31, No. 2, p. 440
<http://dx.doi.org/10.2514/1.31730>].
Parameters
----------
rot: (...Nx3x3) float array
Each 3x3 matrix represents a rotation by multiplying (from the left)
a column vector to produce a rotated column vector. Note that this
input may actually have ndims>3; it is just assumed that the last
two dimensions have size 3, representing the matrix.
nonorthogonal: bool, optional
If scipy.linalg is available, use the more robust algorithm of
Bar-Itzhack. Default value is True.
Returns
-------
q: array of quaternions
Unit quaternions resulting in rotations corresponding to input
rotations. Output shape is rot.shape[:-2].
Raises
------
LinAlgError
If any of the eigenvalue solutions does not converge
|
def tags(self):
"""Creates a list of all the tags of the contained items
# Returns
`list [str]`
> A list of all the tags
"""
tags = set()
for i in self:
tags |= set(i.keys())
return tags
|
Creates a list of all the tags of the contained items
# Returns
`list [str]`
> A list of all the tags
|
def norm(A):
"""computes the L2-norm along axis 1 (e.g. genes or embedding dimensions) equivalent to np.linalg.norm(A, axis=1)
"""
return np.sqrt(A.multiply(A).sum(1).A1) if issparse(A) else np.sqrt(np.einsum('ij, ij -> i', A, A))
|
computes the L2-norm along axis 1 (e.g. genes or embedding dimensions) equivalent to np.linalg.norm(A, axis=1)
|
def ordered_by_replica(self, request_key):
"""
Should be called by each replica when request is ordered or replica is removed.
"""
state = self.get(request_key)
if not state:
return
state.unordered_by_replicas_num -= 1
|
Should be called by each replica when request is ordered or replica is removed.
|
def add(self, member, score):
"""Add the specified member to the sorted set, or update the score
if it already exist."""
return self.client.zadd(self.name, member, score)
|
Add the specified member to the sorted set, or update the score
if it already exist.
|
def ComplementEquivalence(*args, **kwargs):
"""Change x != y to not(x == y)."""
return ast.Complement(
ast.Equivalence(*args, **kwargs), **kwargs)
|
Change x != y to not(x == y).
|
def validate_xml(file):
"""Validate an XML file."""
max_file_size = current_app.config.get(
'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)
if file.size > max_file_size:
return False
with file.open() as fp:
try:
content = fp.read().decode('utf-8')
xml.dom.minidom.parseString(content)
return True
except:
return False
|
Validate an XML file.
|
def _set_status_data(self, userdata):
"""Set status properties from userdata response.
Response values:
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
"""
self._on_mask = userdata['d3']
self._off_mask = userdata['d4']
self._x10_house_code = userdata['d5']
self._x10_unit = userdata['d6']
self._ramp_rate = userdata['d7']
self._on_level = userdata['d8']
self._led_brightness = userdata['d9']
self._non_toggle_mask = userdata['d10']
self._led_bit_mask = userdata['d11']
self._x10_all_bit_mask = userdata['d12']
self._on_off_bit_mask = userdata['d13']
self._trigger_group_bit_mask = userdata['d14']
|
Set status properties from userdata response.
Response values:
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
|
def generate_terms(self, ref, root, file_type=None):
"""An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
"""
last_section = root
t = None
if isinstance(ref, Source):
row_gen = ref
ref_path = row_gen.__class__.__name__
else:
row_gen = get_generator(ref)
ref_path = ref.path
try:
for line_n, row in enumerate(row_gen, 1):
if not row or not row[0] or not row[0].strip() or row[0].strip().startswith('#'):
continue
tt = Term(row[0], None) # Just to get the qualified name constructed property
term_class = self.get_term_class(tt.join_lc)
t = term_class(tt.join_lc,
row[1] if len(row) > 1 else '',
row[2:] if len(row) > 2 else [],
row=line_n,
col=1,
file_name=ref_path, file_type=file_type, doc=self.doc)
# Why did we remove comments from values? It strips out Markdown
#if t.value and str(t.value).startswith('#'): # Comments are ignored
# continue
if t.term_is('include') or t.term_is('declare'):
if t.term_is('include'):
resolved = self.find_include_doc(dirname(ref_path), t.value.strip())
else:
resolved = self.find_declare_doc(dirname(ref_path), t.value.strip())
if row_gen.ref == resolved:
raise IncludeError("Include loop for '{}' ".format(resolved))
yield t
try:
sub_gen = get_generator(resolved.get_resource().get_target())
for t in self.generate_terms(sub_gen, root, file_type=t.record_term_lc):
yield t
if last_section:
yield last_section # Re-assert the last section
except IncludeError as e:
e.term = t
raise
except (OSError, FileNotFoundError, GenerateError, DownloadError) as e:
e = IncludeError("Failed to Include; {}".format(e))
e.term = t
raise e
continue # Already yielded the include/declare term, and includes can't have children
elif t.term_is('section'):
# If there is already a section in the document, emit the existing section,
# rather than a new one.
try:
last_section = self.doc[t.name]
t = last_section
except (KeyError, TypeError): # TypeError -> self.doc is None
last_section = t
yield t
# Yield any child terms, from the term row arguments
if not t.term_is('section') and not t.term_is('header'):
for col, value in enumerate(t.args, 0):
if str(value).strip():
term_name = t.record_term_lc + '.' + str(col)
term_class = self.get_term_class(term_name)
yield term_class(term_name, str(value), [],
row=line_n,
col=col + 2, # The 0th argument starts in col 2
file_name=ref_path,
file_type=file_type,
parent=t) #,
#doc=None,
#section=last_section)
except IncludeError as e:
exc = IncludeError(str(e) + "; in '{}' ".format(ref_path))
exc.term = e.term if hasattr(e, 'term') else None
raise exc
|
An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
|
def get_assessment_part_item_design_session(self, proxy):
"""Gets the ``OsidSession`` associated with the assessment part item design service.
return: (osid.assessment.authoring.AssessmentPartItemDesignSession)
- an ``AssessmentPartItemDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_part_item_design()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_part_lookup()`` is ``true``.*
"""
if not self.supports_assessment_part_lookup(): # This is kludgy, but only until Tom fixes spec
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentPartItemDesignSession(proxy=proxy, runtime=self._runtime)
|
Gets the ``OsidSession`` associated with the assessment part item design service.
return: (osid.assessment.authoring.AssessmentPartItemDesignSession)
- an ``AssessmentPartItemDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_part_item_design()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_part_lookup()`` is ``true``.*
|
def get_attribute_values_string(self):
"""Retrieves a comparable string of the attribute values.
Returns:
str: comparable string of the attribute values.
"""
attributes = []
for attribute_name, attribute_value in sorted(self.__dict__.items()):
# Not using startswith to improve performance.
if attribute_name[0] == '_' or attribute_value is None:
continue
if isinstance(attribute_value, dict):
attribute_value = sorted(attribute_value.items())
elif isinstance(attribute_value, six.binary_type):
attribute_value = repr(attribute_value)
attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)
attributes.append(attribute_string)
return ', '.join(attributes)
|
Retrieves a comparable string of the attribute values.
Returns:
str: comparable string of the attribute values.
|
def gt_type(self):
"""The type of genotype, returns one of ``HOM_REF``, ``HOM_ALT``, and
``HET``.
"""
if not self.called:
return None # not called
elif all(a == 0 for a in self.gt_alleles):
return HOM_REF
elif len(set(self.gt_alleles)) == 1:
return HOM_ALT
else:
return HET
|
The type of genotype, returns one of ``HOM_REF``, ``HOM_ALT``, and
``HET``.
|
def begin_tag(self, name: str) -> Node:
"""Save the current index under the given name."""
# Check if we could attach tag cache to current rule_nodes scope
self.tag_cache[name] = Tag(self._stream, self._stream.index)
return True
|
Save the current index under the given name.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.