code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_supercell_matrix(self, supercell, struct):
"""
Returns the matrix for transforming struct to supercell. This
can be used for very distorted 'supercells' where the primitive cell
is impossible to find
"""
if self._primitive_cell:
raise ValueError("get_supercell_matrix cannot be used with the "
"primitive cell option")
struct, supercell, fu, s1_supercell = self._preprocess(struct,
supercell, False)
if not s1_supercell:
raise ValueError("The non-supercell must be put onto the basis"
" of the supercell, not the other way around")
match = self._match(struct, supercell, fu, s1_supercell, use_rms=True,
break_on_match=False)
if match is None:
return None
return match[2]
|
Returns the matrix for transforming struct to supercell. This
can be used for very distorted 'supercells' where the primitive cell
is impossible to find
|
def GetSOAPEnvUri(self, version):
"""Return the appropriate SOAP envelope uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENV_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
|
Return the appropriate SOAP envelope uri for a given
human-friendly SOAP version string (e.g. '1.1').
|
def name(value, known_modules=[]):
'''Return a name that can be imported, to serialize/deserialize an object'''
if value is None:
return 'None'
if not isinstance(value, type): # Get the class name first
value = type(value)
tname = value.__name__
if hasattr(builtins, tname):
return tname
modname = value.__module__
if modname == '__main__':
return tname
if known_modules and modname in known_modules:
return tname
for kmod in known_modules:
if not kmod:
continue
module = importlib.import_module(kmod)
if hasattr(module, tname):
return tname
return '{}.{}'.format(modname, tname)
|
Return a name that can be imported, to serialize/deserialize an object
|
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii()
|
Generate components of hidden layer given X
|
def dispatch(self, command, app):
"""
Function runs the active command.
Args
----
command (glim.command.Command): the command object.
app (glim.app.App): the glim app object.
Note:
Exception handling should be done in Command class
itself. If not, an unhandled exception may result
in app crash!
"""
if self.is_glimcommand(command):
command.run(app)
else:
command.run()
|
Function runs the active command.
Args
----
command (glim.command.Command): the command object.
app (glim.app.App): the glim app object.
Note:
Exception handling should be done in Command class
itself. If not, an unhandled exception may result
in app crash!
|
def _dump_spec(spec):
"""Dump bel specification dictionary using YAML
Formats this with an extra indentation for lists to make it easier to
use cold folding on the YAML version of the spec dictionary.
"""
with open("spec.yaml", "w") as f:
yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)
|
Dump bel specification dictionary using YAML
Formats this with an extra indentation for lists to make it easier to
use cold folding on the YAML version of the spec dictionary.
|
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta)
hess = bovy_coords.Rz_to_lambdanu_hess(R,z, Delta=self._Delta)
dldz = jac[0,1]
dndz = jac[1,1]
d2ldz2 = hess[0,1,1]
d2ndz2 = hess[1,1,1]
return d2ldz2 * self._lderiv(l,n) + \
d2ndz2 * self._nderiv(l,n) + \
(dldz)**2 * self._l2deriv(l,n) + \
(dndz)**2 * self._n2deriv(l,n) + \
2.*dldz*dndz * self._lnderiv(l,n)
|
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2015-02-13 - Written - Trick (MPIA)
|
def mrca_matrix(self):
'''Return a dictionary storing all pairwise MRCAs. ``M[u][v]`` = MRCA of nodes ``u`` and ``v``. Excludes ``M[u][u]`` because MRCA of node and itself is itself
Returns:
``dict``: ``M[u][v]`` = MRCA of nodes ``u`` and ``v``
'''
M = dict()
leaves_below = dict()
for node in self.traverse_postorder():
leaves_below[node] = list()
if node.is_leaf():
leaves_below[node].append(node); M[node] = dict()
else:
for i in range(len(node.children)-1):
for l1 in leaves_below[node.children[i]]:
leaves_below[node].append(l1)
for j in range(i+1, len(node.children)):
for l2 in leaves_below[node.children[j]]:
M[l1][l2] = node; M[l2][l1] = node
if len(node.children) != 1:
for l2 in leaves_below[node.children[-1]]:
leaves_below[node].append(l2)
return M
|
Return a dictionary storing all pairwise MRCAs. ``M[u][v]`` = MRCA of nodes ``u`` and ``v``. Excludes ``M[u][u]`` because MRCA of node and itself is itself
Returns:
``dict``: ``M[u][v]`` = MRCA of nodes ``u`` and ``v``
|
def p_concat_list(p):
"""
concat_list : expr_list SEMI expr_list
| concat_list SEMI expr_list
"""
if p[1].__class__ == node.expr_list:
p[0] = node.concat_list([p[1], p[3]])
else:
p[0] = p[1]
p[0].append(p[3])
|
concat_list : expr_list SEMI expr_list
| concat_list SEMI expr_list
|
def param_projection(self, x_param, y_param, metric):
"""
Projects the grid search results onto 2 dimensions.
The wrapped GridSearch object is assumed to be fit already.
The display value is taken as the max over the non-displayed dimensions.
Parameters
----------
x_param : string
The name of the parameter to be visualized on the horizontal axis.
y_param : string
The name of the parameter to be visualized on the vertical axis.
metric : string (default 'mean_test_score')
The field from the grid search's `cv_results` that we want to display.
Returns
-------
unique_x_vals : list
The parameter values that will be used to label the x axis.
unique_y_vals: list
The parameter values that will be used to label the y axis.
best_scores: 2D numpy array (n_y by n_x)
Array of scores to be displayed for each parameter value pair.
"""
return param_projection(self.estimator.cv_results_, x_param, y_param, metric)
|
Projects the grid search results onto 2 dimensions.
The wrapped GridSearch object is assumed to be fit already.
The display value is taken as the max over the non-displayed dimensions.
Parameters
----------
x_param : string
The name of the parameter to be visualized on the horizontal axis.
y_param : string
The name of the parameter to be visualized on the vertical axis.
metric : string (default 'mean_test_score')
The field from the grid search's `cv_results` that we want to display.
Returns
-------
unique_x_vals : list
The parameter values that will be used to label the x axis.
unique_y_vals: list
The parameter values that will be used to label the y axis.
best_scores: 2D numpy array (n_y by n_x)
Array of scores to be displayed for each parameter value pair.
|
def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
# Checking that the ImageBase field of the OptionalHeader is above or
# equal to 0x80000000 (that is, whether it lies in the upper 2GB of
# the address space, normally belonging to the kernel) is not a
# reliable enough indicator. For instance, PEs that play the invalid
# ImageBase trick to get relocated could be incorrectly assumed to be
# drivers.
# This is not reliable either...
#
# if any( (section.Characteristics & SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for section in self.sections ):
# return True
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
# If it imports from "ntoskrnl.exe" or other kernel components it should be a driver
#
if set( ('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll' ) ).intersection( [ imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT ] ):
return True
return False
|
Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
|
def thousands(x):
"""
>>> thousands(12345)
'12,345'
"""
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
except Exception:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
finally:
s = '%d' % x
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
return locale.format('%d', x, True)
|
>>> thousands(12345)
'12,345'
|
def minimal_raw_seqs(self):
''' m.minimal_raw_seqs() -- Return minimal list of seqs that represent consensus '''
seqs = [[], []]
for letter in self.oneletter:
if one2two.has_key(letter):
seqs[0].append(one2two[letter][0])
seqs[1].append(one2two[letter][1])
else:
seqs[0].append(letter)
seqs[1].append(letter)
if ''.join(seqs[0]) == ''.join(seqs[1]):
return( [''.join(seqs[0])] )
else:
return( [''.join(seqs[0]), ''.join(seqs[0])] )
|
m.minimal_raw_seqs() -- Return minimal list of seqs that represent consensus
|
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
|
Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
|
def loaded(self, request, *args, **kwargs):
"""Return a list of loaded Packs.
"""
serializer = self.get_serializer(list(Pack.objects.all()),
many=True)
return Response(serializer.data)
|
Return a list of loaded Packs.
|
def _encrypt_asymmetric(self,
encryption_algorithm,
encryption_key,
plain_text,
padding_method,
hashing_algorithm=None):
"""
Encrypt data using asymmetric encryption.
Args:
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the asymmetric encryption algorithm to use for
encryption. Required.
encryption_key (bytes): The bytes of the public key to use for
encryption. Required.
plain_text (bytes): The bytes to be encrypted. Required.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use with the asymmetric encryption
algorithm. Required.
hashing_algorithm (HashingAlgorithm): An enumeration specifying
the hashing algorithm to use with the encryption padding
method. Required, if the padding method is OAEP. Optional
otherwise, defaults to None.
Returns:
dict: A dictionary containing the encrypted data, with at least
the following key/value field:
* cipher_text - the bytes of the encrypted data
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
"""
if encryption_algorithm == enums.CryptographicAlgorithm.RSA:
if padding_method == enums.PaddingMethod.OAEP:
hash_algorithm = self._encryption_hash_algorithms.get(
hashing_algorithm
)
if hash_algorithm is None:
raise exceptions.InvalidField(
"The hashing algorithm '{0}' is not supported for "
"asymmetric encryption.".format(hashing_algorithm)
)
padding_method = asymmetric_padding.OAEP(
mgf=asymmetric_padding.MGF1(
algorithm=hash_algorithm()
),
algorithm=hash_algorithm(),
label=None
)
elif padding_method == enums.PaddingMethod.PKCS1v15:
padding_method = asymmetric_padding.PKCS1v15()
else:
raise exceptions.InvalidField(
"The padding method '{0}' is not supported for asymmetric "
"encryption.".format(padding_method)
)
backend = default_backend()
try:
public_key = backend.load_der_public_key(encryption_key)
except Exception:
try:
public_key = backend.load_pem_public_key(encryption_key)
except Exception:
raise exceptions.CryptographicFailure(
"The public key bytes could not be loaded."
)
cipher_text = public_key.encrypt(
plain_text,
padding_method
)
return {'cipher_text': cipher_text}
else:
raise exceptions.InvalidField(
"The cryptographic algorithm '{0}' is not supported for "
"asymmetric encryption.".format(encryption_algorithm)
)
|
Encrypt data using asymmetric encryption.
Args:
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the asymmetric encryption algorithm to use for
encryption. Required.
encryption_key (bytes): The bytes of the public key to use for
encryption. Required.
plain_text (bytes): The bytes to be encrypted. Required.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use with the asymmetric encryption
algorithm. Required.
hashing_algorithm (HashingAlgorithm): An enumeration specifying
the hashing algorithm to use with the encryption padding
method. Required, if the padding method is OAEP. Optional
otherwise, defaults to None.
Returns:
dict: A dictionary containing the encrypted data, with at least
the following key/value field:
* cipher_text - the bytes of the encrypted data
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
|
def get_graphql_schema_from_orientdb_schema_data(schema_data, class_to_field_type_overrides=None,
hidden_classes=None):
"""Construct a GraphQL schema from an OrientDB schema.
Args:
schema_data: list of dicts describing the classes in the OrientDB schema. The following
format is the way the data is structured in OrientDB 2. See
the README.md file for an example of how to query this data.
Each dict has the following string fields:
- name: string, the name of the class.
- superClasses (optional): list of strings, the name of the class's
superclasses.
- superClass (optional): string, the name of the class's superclass. May be
used instead of superClasses if there is only one
superClass. Used for backwards compatibility with
OrientDB.
- customFields (optional): dict, string -> string, data defined on the class
instead of instances of the class.
- abstract: bool, true if the class is abstract.
- properties: list of dicts, describing the class's properties.
Each property dictionary has the following string fields:
- name: string, the name of the property.
- type: int, builtin OrientDB type ID of the property.
See schema_properties.py for the mapping.
- linkedType (optional): int, if the property is a
collection of builtin OrientDB
objects, then it indicates their
type ID.
- linkedClass (optional): string, if the property is a
collection of class instances,
then it indicates the name of
the class. If class is an edge
class, and the field name is
either 'in' or 'out', then it
describes the name of an
endpoint of the edge.
- defaultValue: string, the textual representation of the
default value for the property, as
returned by OrientDB's schema
introspection code, e.g., '{}' for
the embedded set type. Note that if the
property is a collection type, it must
have a default value.
class_to_field_type_overrides: optional dict, class name -> {field name -> field type},
(string -> {string -> GraphQLType}). Used to override the
type of a field in the class where it's first defined and all
the class's subclasses.
hidden_classes: optional set of strings, classes to not include in the GraphQL schema.
Returns:
tuple of (GraphQL schema object, GraphQL type equivalence hints dict).
The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
"""
if class_to_field_type_overrides is None:
class_to_field_type_overrides = dict()
if hidden_classes is None:
hidden_classes = set()
schema_graph = SchemaGraph(schema_data)
return get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides,
hidden_classes)
|
Construct a GraphQL schema from an OrientDB schema.
Args:
schema_data: list of dicts describing the classes in the OrientDB schema. The following
format is the way the data is structured in OrientDB 2. See
the README.md file for an example of how to query this data.
Each dict has the following string fields:
- name: string, the name of the class.
- superClasses (optional): list of strings, the name of the class's
superclasses.
- superClass (optional): string, the name of the class's superclass. May be
used instead of superClasses if there is only one
superClass. Used for backwards compatibility with
OrientDB.
- customFields (optional): dict, string -> string, data defined on the class
instead of instances of the class.
- abstract: bool, true if the class is abstract.
- properties: list of dicts, describing the class's properties.
Each property dictionary has the following string fields:
- name: string, the name of the property.
- type: int, builtin OrientDB type ID of the property.
See schema_properties.py for the mapping.
- linkedType (optional): int, if the property is a
collection of builtin OrientDB
objects, then it indicates their
type ID.
- linkedClass (optional): string, if the property is a
collection of class instances,
then it indicates the name of
the class. If class is an edge
class, and the field name is
either 'in' or 'out', then it
describes the name of an
endpoint of the edge.
- defaultValue: string, the textual representation of the
default value for the property, as
returned by OrientDB's schema
introspection code, e.g., '{}' for
the embedded set type. Note that if the
property is a collection type, it must
have a default value.
class_to_field_type_overrides: optional dict, class name -> {field name -> field type},
(string -> {string -> GraphQLType}). Used to override the
type of a field in the class where it's first defined and all
the class's subclasses.
hidden_classes: optional set of strings, classes to not include in the GraphQL schema.
Returns:
tuple of (GraphQL schema object, GraphQL type equivalence hints dict).
The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
|
def _data_block(stream):
"""Process data block of ``CTfile``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.DataHeader` or :class:`~ctfile.tokenizer.DataItem`
"""
while len(stream) > 0:
line = stream.popleft()
if line.startswith('>'):
yield DataHeader(line[1:].strip())
else:
data_item = line.strip()
if data_item:
yield DataItem(line)
else:
continue
|
Process data block of ``CTfile``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.DataHeader` or :class:`~ctfile.tokenizer.DataItem`
|
def CompressStream(in_stream, length=None, compresslevel=2,
chunksize=16777216):
"""Compresses an input stream into a file-like buffer.
This reads from the input stream until either we've stored at least length
compressed bytes, or the input stream has been exhausted.
This supports streams of unknown size.
Args:
in_stream: The input stream to read from.
length: The target number of compressed bytes to buffer in the output
stream. If length is none, the input stream will be compressed
until it's exhausted.
The actual length of the output buffer can vary from the target.
If the input stream is exhaused, the output buffer may be smaller
than expected. If the data is incompressible, the maximum length
can be exceeded by can be calculated to be:
chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17
This accounts for additional header data gzip adds. For the default
16MiB chunksize, this results in the max size of the output buffer
being:
length + 16Mib + 5142 bytes
compresslevel: Optional, defaults to 2. The desired compression level.
chunksize: Optional, defaults to 16MiB. The chunk size used when
reading data from the input stream to write into the output
buffer.
Returns:
A file-like output buffer of compressed bytes, the number of bytes read
from the input stream, and a flag denoting if the input stream was
exhausted.
"""
in_read = 0
in_exhausted = False
out_stream = StreamingBuffer()
with gzip.GzipFile(mode='wb',
fileobj=out_stream,
compresslevel=compresslevel) as compress_stream:
# Read until we've written at least length bytes to the output stream.
while not length or out_stream.length < length:
data = in_stream.read(chunksize)
data_length = len(data)
compress_stream.write(data)
in_read += data_length
# If we read less than requested, the stream is exhausted.
if data_length < chunksize:
in_exhausted = True
break
return out_stream, in_read, in_exhausted
|
Compresses an input stream into a file-like buffer.
This reads from the input stream until either we've stored at least length
compressed bytes, or the input stream has been exhausted.
This supports streams of unknown size.
Args:
in_stream: The input stream to read from.
length: The target number of compressed bytes to buffer in the output
stream. If length is none, the input stream will be compressed
until it's exhausted.
The actual length of the output buffer can vary from the target.
If the input stream is exhaused, the output buffer may be smaller
than expected. If the data is incompressible, the maximum length
can be exceeded by can be calculated to be:
chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17
This accounts for additional header data gzip adds. For the default
16MiB chunksize, this results in the max size of the output buffer
being:
length + 16Mib + 5142 bytes
compresslevel: Optional, defaults to 2. The desired compression level.
chunksize: Optional, defaults to 16MiB. The chunk size used when
reading data from the input stream to write into the output
buffer.
Returns:
A file-like output buffer of compressed bytes, the number of bytes read
from the input stream, and a flag denoting if the input stream was
exhausted.
|
def get_endpoint_map(self):
"""
returns API version and endpoint map
"""
log.debug("getting end points...")
cmd, url = DEVICE_URLS["get_endpoint_map"]
return self._exec(cmd, url)
|
returns API version and endpoint map
|
def joint_plot(x, y, marginalBins=50, gridsize=50, plotlimits=None, logscale_cmap=False, logscale_marginals=False, alpha_hexbin=0.75, alpha_marginals=0.75, cmap="inferno_r", marginalCol=None, figsize=(8, 8), fontsize=8, *args, **kwargs):
"""
Plots some x and y data using hexbins along with a colorbar
and marginal distributions (X and Y histograms).
Parameters
----------
x : ndarray
The x data
y : ndarray
The y data
marginalBins : int, optional
The number of bins to use in calculating the marginal
histograms of x and y
gridsize : int, optional
The grid size to be passed to matplotlib.pyplot.hexbins
which sets the gridsize in calculating the hexbins
plotlimits : float, optional
The limit of the plot in x and y (it produces a square
area centred on zero. Defaults to max range of data.
logscale_cmap : bool, optional
Sets whether to use a logscale for the colormap.
Defaults to False.
logscale_marginals : bool, optional
Sets whether to use a logscale for the marignals.
Defaults to False.
alpha_hexbin : float
Alpha value to use for hexbins and color map
alpha_marginals : float
Alpha value to use for marginal histograms
cmap : string, optional
Specifies the colormap to use, see
https://matplotlib.org/users/colormaps.html
for options. Defaults to 'inferno_r'
marginalCol : string, optional
Specifies color to use for marginals,
defaults to middle color of colormap
for a linear colormap and 70% for a
logarithmic colormap.
figsize : tuple of 2 values, optional
Sets the figsize, defaults to (8, 8)
fontsize : int, optional
Sets the fontsize for all text and axis ticks.
Defaults to 8.
*args, **kwargs : optional
args and kwargs passed to matplotlib.pyplot.hexbins
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created to house the joint_plot
axHexBin : matplotlib.axes.Axes object
The axis for the hexbin plot
axHistx : matplotlib.axes.Axes object
The axis for the x marginal plot
axHisty : matplotlib.axes.Axes object
The axis for the y marginal plot
cbar : matplotlib.colorbar.Colorbar
The color bar object
"""
with _plt.rc_context({'font.size': fontsize,}):
# definitions for the axes
hexbin_marginal_seperation = 0.01
left, width = 0.2, 0.65-0.1 # left = left side of hexbin and hist_x
bottom, height = 0.1, 0.65-0.1 # bottom = bottom of hexbin and hist_y
bottom_h = height + bottom + hexbin_marginal_seperation
left_h = width + left + hexbin_marginal_seperation
cbar_pos = [0.03, bottom, 0.05, 0.02+width]
rect_hexbin = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
fig = _plt.figure(figsize=figsize)
axHexBin = _plt.axes(rect_hexbin)
axHistx = _plt.axes(rect_histx)
axHisty = _plt.axes(rect_histy)
axHisty.set_xticklabels(axHisty.xaxis.get_ticklabels(), y=0, rotation=-90)
# scale specific settings
if logscale_cmap == True:
hexbinscale = 'log'
else:
hexbinscale = None
if logscale_marginals == True:
scale='log'
else:
scale='linear'
# set up colors
cmapOb = _mpl.cm.get_cmap(cmap)
cmapOb.set_under(color='white')
if marginalCol == None:
if logscale_cmap == True:
marginalCol = cmapOb(0.7)
cbarlabel = 'log10(N)'
else:
marginalCol = cmapOb(0.5)
cbarlabel = 'N'
# set up limits
if plotlimits == None:
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
if xmax > ymax:
plotlimits = xmax * 1.1
else:
plotlimits = ymax * 1.1
# the hexbin plot:
hb = axHexBin.hexbin(x, y, gridsize=gridsize, bins=hexbinscale, cmap=cmap, alpha=alpha_hexbin, extent=(-plotlimits, plotlimits, -plotlimits, plotlimits), *args, **kwargs)
axHexBin.axis([-plotlimits, plotlimits, -plotlimits, plotlimits])
cbaraxes = fig.add_axes(cbar_pos) # This is the position for the colorbar
#cbar = _plt.colorbar(axp, cax = cbaraxes)
cbar = fig.colorbar(hb, cax = cbaraxes, drawedges=False) #, orientation="horizontal"
cbar.solids.set_edgecolor("face")
cbar.solids.set_rasterized(True)
cbar.solids.set_alpha(alpha_hexbin)
cbar.ax.set_yticklabels(cbar.ax.yaxis.get_ticklabels(), y=0, rotation=45)
cbar.set_label(cbarlabel, labelpad=-25, y=1.05, rotation=0)
axHexBin.set_xlim((-plotlimits, plotlimits))
axHexBin.set_ylim((-plotlimits, plotlimits))
# now determine bin size
binwidth = (2*plotlimits)/marginalBins
xymax = _np.max([_np.max(_np.fabs(x)), _np.max(_np.fabs(y))])
lim = plotlimits #(int(xymax/binwidth) + 1) * binwidth
bins = _np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins, color=marginalCol, alpha=alpha_marginals, linewidth=0)
axHistx.set_yscale(value=scale)
axHisty.hist(y, bins=bins, orientation='horizontal', color=marginalCol, alpha=alpha_marginals, linewidth=0)
axHisty.set_xscale(value=scale)
_plt.setp(axHistx.get_xticklabels(), visible=False) # sets x ticks to be invisible while keeping gridlines
_plt.setp(axHisty.get_yticklabels(), visible=False) # sets x ticks to be invisible while keeping gridlines
axHistx.set_xlim(axHexBin.get_xlim())
axHisty.set_ylim(axHexBin.get_ylim())
return fig, axHexBin, axHistx, axHisty, cbar
|
Plots some x and y data using hexbins along with a colorbar
and marginal distributions (X and Y histograms).
Parameters
----------
x : ndarray
The x data
y : ndarray
The y data
marginalBins : int, optional
The number of bins to use in calculating the marginal
histograms of x and y
gridsize : int, optional
The grid size to be passed to matplotlib.pyplot.hexbins
which sets the gridsize in calculating the hexbins
plotlimits : float, optional
The limit of the plot in x and y (it produces a square
area centred on zero. Defaults to max range of data.
logscale_cmap : bool, optional
Sets whether to use a logscale for the colormap.
Defaults to False.
logscale_marginals : bool, optional
Sets whether to use a logscale for the marignals.
Defaults to False.
alpha_hexbin : float
Alpha value to use for hexbins and color map
alpha_marginals : float
Alpha value to use for marginal histograms
cmap : string, optional
Specifies the colormap to use, see
https://matplotlib.org/users/colormaps.html
for options. Defaults to 'inferno_r'
marginalCol : string, optional
Specifies color to use for marginals,
defaults to middle color of colormap
for a linear colormap and 70% for a
logarithmic colormap.
figsize : tuple of 2 values, optional
Sets the figsize, defaults to (8, 8)
fontsize : int, optional
Sets the fontsize for all text and axis ticks.
Defaults to 8.
*args, **kwargs : optional
args and kwargs passed to matplotlib.pyplot.hexbins
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created to house the joint_plot
axHexBin : matplotlib.axes.Axes object
The axis for the hexbin plot
axHistx : matplotlib.axes.Axes object
The axis for the x marginal plot
axHisty : matplotlib.axes.Axes object
The axis for the y marginal plot
cbar : matplotlib.colorbar.Colorbar
The color bar object
|
def get_training_data(batch_size):
""" helper function to get dataloader"""
return gluon.data.DataLoader(
CIFAR10(train=True, transform=transformer),
batch_size=batch_size, shuffle=True, last_batch='discard')
|
helper function to get dataloader
|
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
TSKPartitionFileEntry: a file entry or None of not available.
"""
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec)
|
Retrieves the root file entry.
Returns:
TSKPartitionFileEntry: a file entry or None of not available.
|
async def get_response_metadata(response: str) -> str:
"""
Parse transaction response to fetch metadata.
The important use case for this method is validation of Node's response freshens.
Distributed Ledgers can reply with outdated information for consequence read request after write.
To reduce pool load libindy sends read requests to one random node in the pool.
Consensus validation is performed based on validation of nodes multi signature for current ledger Merkle Trie root.
This multi signature contains information about the latest ldeger's transaction ordering time and sequence number that this method returns.
If node that returned response for some reason is out of consensus and has outdated ledger
it can be caught by analysis of the returned latest ledger's transaction ordering time and sequence number.
There are two ways to filter outdated responses:
1) based on "seqNo" - sender knows the sequence number of transaction that he consider as a fresh enough.
2) based on "txnTime" - sender knows the timestamp that he consider as a fresh enough.
Note: response of GET_VALIDATOR_INFO request isn't supported
:param response: response of write or get request.
:return: Response Metadata.
{
"seqNo": Option<u64> - transaction sequence number,
"txnTime": Option<u64> - transaction ordering time,
"lastSeqNo": Option<u64> - the latest transaction seqNo for particular Node,
"lastTxnTime": Option<u64> - the latest transaction ordering time for particular Node
}
"""
logger = logging.getLogger(__name__)
logger.debug("get_response_metadata: >>> response: %r",
response)
if not hasattr(get_response_metadata, "cb"):
logger.debug("get_response_metadata: Creating callback")
get_response_metadata.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_response = c_char_p(response.encode('utf-8'))
response_metadata = await do_call('indy_get_response_metadata',
c_response,
get_response_metadata.cb)
res = response_metadata.decode()
logger.debug("get_response_metadata: <<< res: %r", res)
return res
|
Parse transaction response to fetch metadata.
The important use case for this method is validation of Node's response freshens.
Distributed Ledgers can reply with outdated information for consequence read request after write.
To reduce pool load libindy sends read requests to one random node in the pool.
Consensus validation is performed based on validation of nodes multi signature for current ledger Merkle Trie root.
This multi signature contains information about the latest ldeger's transaction ordering time and sequence number that this method returns.
If node that returned response for some reason is out of consensus and has outdated ledger
it can be caught by analysis of the returned latest ledger's transaction ordering time and sequence number.
There are two ways to filter outdated responses:
1) based on "seqNo" - sender knows the sequence number of transaction that he consider as a fresh enough.
2) based on "txnTime" - sender knows the timestamp that he consider as a fresh enough.
Note: response of GET_VALIDATOR_INFO request isn't supported
:param response: response of write or get request.
:return: Response Metadata.
{
"seqNo": Option<u64> - transaction sequence number,
"txnTime": Option<u64> - transaction ordering time,
"lastSeqNo": Option<u64> - the latest transaction seqNo for particular Node,
"lastTxnTime": Option<u64> - the latest transaction ordering time for particular Node
}
|
def p_tag_ref(self, p):
'tag_ref : ID'
p[0] = AstTagRef(self.path, p.lineno(1), p.lexpos(1), p[1])
|
tag_ref : ID
|
def migrate(uri: str, archive_uri: str, case_id: str, dry: bool, force: bool):
"""Update all information that was manually annotated from a old instance."""
scout_client = MongoClient(uri)
scout_database = scout_client[uri.rsplit('/', 1)[-1]]
scout_adapter = MongoAdapter(database=scout_database)
scout_case = scout_adapter.case(case_id)
if not force and scout_case.get('is_migrated'):
print("case already migrated")
return
archive_client = MongoClient(archive_uri)
archive_database = archive_client[archive_uri.rsplit('/', 1)[-1]]
archive_case = archive_database.case.find_one({
'owner': scout_case['owner'],
'display_name': scout_case['display_name']
})
archive_data = archive_info(archive_database, archive_case)
if dry:
print(ruamel.yaml.safe_dump(archive_data))
else:
#migrate_case(scout_adapter, scout_case, archive_data)
pass
|
Update all information that was manually annotated from a old instance.
|
def handle_bind_iq_set(self, stanza):
"""Handler <iq type="set"/> for resource binding."""
# pylint: disable-msg=R0201
if not self.stream:
logger.error("Got bind stanza before stream feature has been set")
return False
if self.stream.initiator:
return False
peer = self.stream.peer
if peer.resource:
raise ResourceConstraintProtocolError(
u"Only one resource per client supported")
resource = stanza.get_payload(ResourceBindingPayload).resource
jid = None
if resource:
try:
jid = JID(peer.local, peer.domain, resource)
except JIDError:
pass
if jid is None:
resource = unicode(uuid.uuid4())
jid = JID(peer.local, peer.domain, resource)
response = stanza.make_result_response()
payload = ResourceBindingPayload(jid = jid)
response.set_payload(payload)
self.stream.peer = jid
self.stream.event(AuthorizedEvent(jid))
return response
|
Handler <iq type="set"/> for resource binding.
|
def get_layer_heights(heights, depth, *args, **kwargs):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
*args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
bottom = kwargs.pop('bottom', None)
interpolate = kwargs.pop('interpolate', True)
with_agl = kwargs.pop('with_agl', False)
# Make sure pressure and datavars are the same length
for datavar in args:
if len(heights) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(heights)
heights = heights - sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = heights[0]
# Make heights and arguments base units
heights = heights.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
# Mask based on top and bottom
inds = _greater_or_close(heights, bottom) & _less_or_close(heights, top)
heights_interp = heights[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, top)) * heights.units
if bottom not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, bottom)) * heights.units
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
|
Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
*args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
|
def list_messages(self):
"""Output full messages list documentation in ReST format. """
messages = sorted(self._messages_definitions.values(), key=lambda m: m.msgid)
for message in messages:
if not message.may_be_emitted():
continue
print(message.format_help(checkerref=False))
print("")
|
Output full messages list documentation in ReST format.
|
def fastqIterator(fn, verbose=False, allowNameMissmatch=False):
"""
A generator function which yields FastqSequence objects read from a file or
stream. This is a general function which wraps fastqIteratorSimple. In
future releases, we may allow dynamic switching of which base iterator is
used.
:param fn: A file-like stream or a string; if this is a
string, it's treated as a filename specifying
the location of an input fastq file, else it's
treated as a file-like object, which must have a
readline() method.
:param useMustableString: if True, construct sequences from lists of
chars, rather than python string objects, to
allow more efficient editing. Use with caution.
:param verbose: if True, print messages on progress to stderr.
:param debug: if True, print debugging messages to stderr.
:param sanger: if True, assume quality scores are in sanger
format. Otherwise, assume they're in Illumina
format.
:param allowNameMissmatch: don't throw error if name in sequence data and
quality data parts of a read don't match. Newer
version of CASVA seem to output data like this,
probably to save space.
"""
it = fastqIteratorSimple(fn, verbose=verbose,
allowNameMissmatch=allowNameMissmatch)
for s in it:
yield s
|
A generator function which yields FastqSequence objects read from a file or
stream. This is a general function which wraps fastqIteratorSimple. In
future releases, we may allow dynamic switching of which base iterator is
used.
:param fn: A file-like stream or a string; if this is a
string, it's treated as a filename specifying
the location of an input fastq file, else it's
treated as a file-like object, which must have a
readline() method.
:param useMustableString: if True, construct sequences from lists of
chars, rather than python string objects, to
allow more efficient editing. Use with caution.
:param verbose: if True, print messages on progress to stderr.
:param debug: if True, print debugging messages to stderr.
:param sanger: if True, assume quality scores are in sanger
format. Otherwise, assume they're in Illumina
format.
:param allowNameMissmatch: don't throw error if name in sequence data and
quality data parts of a read don't match. Newer
version of CASVA seem to output data like this,
probably to save space.
|
def curve_points(self, beginframe, endframe, framestep, birthframe, startframe, stopframe, deathframe,
filternone=True, noiseframe=None):
"""
returns a list of frames from startframe to stopframe, in steps of framestepj
warning: the list of points may include "None" elements
:param beginframe: first frame to include in list of points
:param endframe: last frame to include in list of points
:param framestep: framestep
:param birthframe: frame before which animation always returns None
:param startframe: frame from which animation starts to evolve
:param stopframe: frame in which animation completed
:param deathframe: frame in which animation starts returning None
:param filternone: automatically remove None entries
:param noiseframe: for time varying noise, this represents the time for which the noise should be evaluated
:return: list of tweened values
"""
if endframe < beginframe and framestep > 0:
assert False, "infinite loop: beginframe = {0}, endframe = {1}, framestep = {2}".format(beginframe,
endframe, framestep)
if endframe > beginframe and framestep < 0:
assert False, "infinite loop: beginframe = {0}, endframe = {1}, framestep = {2}".format(beginframe,
endframe, framestep)
i = beginframe
result = [self.make_frame(i, birthframe, startframe, stopframe, deathframe, noiseframe)]
while i < endframe:
i += framestep
if i <= endframe:
result.append(self.make_frame(i, birthframe, startframe, stopframe, deathframe, noiseframe))
if filternone:
return filter_none(result)
else:
return result
|
returns a list of frames from startframe to stopframe, in steps of framestepj
warning: the list of points may include "None" elements
:param beginframe: first frame to include in list of points
:param endframe: last frame to include in list of points
:param framestep: framestep
:param birthframe: frame before which animation always returns None
:param startframe: frame from which animation starts to evolve
:param stopframe: frame in which animation completed
:param deathframe: frame in which animation starts returning None
:param filternone: automatically remove None entries
:param noiseframe: for time varying noise, this represents the time for which the noise should be evaluated
:return: list of tweened values
|
def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str:
"""
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
"""
if encode:
self._encode()
if destination is None:
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for root, _, items in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path)
|
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
|
def _loadFromHStream(self, dtype: HStream, bitAddr: int) -> int:
"""
Parse HUnion type to this transaction template instance
:return: address of it's end
"""
ch = TransTmpl(dtype.elmType, 0, parent=self, origin=self.origin)
self.children.append(ch)
return bitAddr + dtype.elmType.bit_length()
|
Parse HUnion type to this transaction template instance
:return: address of it's end
|
def add_to_manifest(self, manifest):
"""
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
"""
# Add this service to list of services
manifest.add_service(self.service.name)
# Add environment variables
manifest.add_env_var(self.__module__ + '.uri',
self.service.settings.data['url'])
manifest.add_env_var(self.__module__ + '.zone_id',
self.get_predix_zone_id())
manifest.write_manifest()
|
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
|
def create_checklist_item(self, card_id, checklist_id, checklistitem_json, **kwargs):
'''
Create a ChecklistItem object from JSON object
'''
return self.client.create_checklist_item(card_id, checklist_id, checklistitem_json, **kwargs)
|
Create a ChecklistItem object from JSON object
|
def iteritems(self, **options):
'''Return a query interator with (id, object) pairs.'''
iter = self.query(**options)
while True:
obj = iter.next()
yield (obj.id, obj)
|
Return a query interator with (id, object) pairs.
|
def getServiceDependenciesUIDs(self):
"""
This methods returns a list with the service dependencies UIDs
:return: a list of uids
"""
deps = self.getServiceDependencies()
deps_uids = [service.UID() for service in deps]
return deps_uids
|
This methods returns a list with the service dependencies UIDs
:return: a list of uids
|
def git_remote(self):
"""
If the distribution is installed via git, return the first URL of the
'origin' remote if one is configured for the repo, or else the first
URL of the lexicographically-first remote, or else None.
:return: origin or first remote URL
:rtype: :py:obj:`str` or :py:data:`None`
"""
if self._git_remotes is None or len(self._git_remotes) < 1:
return None
if 'origin' in self._git_remotes:
return self._git_remotes['origin']
k = sorted(self._git_remotes.keys())[0]
return self._git_remotes[k]
|
If the distribution is installed via git, return the first URL of the
'origin' remote if one is configured for the repo, or else the first
URL of the lexicographically-first remote, or else None.
:return: origin or first remote URL
:rtype: :py:obj:`str` or :py:data:`None`
|
def _search(self, mdb, query, filename, season_num, episode_num, auto=False):
""" Search the movie using all available datasources and let the user
select a result. Return the choosen datasource and produced movie dict.
If auto is enabled, directly returns the first movie found.
"""
choices = []
for datasource, movie in mdb.search(query, season=season_num, episode=episode_num):
if auto:
return datasource, movie
fmt = u'<b>{title}</b> - <b>{ep}</b> S{season:02d}E{episode:02d} [{datasource}]'
choices.append(option((datasource, movie), fmt, title=movie['title'],
ep=movie['episode_title'],
season=movie['season'],
episode=movie['episode'],
datasource=datasource.name))
if not choices:
printer.p('No results to display for the file: {fn}', fn=filename)
return None, None
choices.append(option(('manual', None), 'Enter information manually'))
choices.append(option(('abort', None), 'None of these'))
printer.p('Please choose the relevant result for the file: {fn}', fn=filename, end='\n\n')
return printer.choice(choices)
|
Search the movie using all available datasources and let the user
select a result. Return the choosen datasource and produced movie dict.
If auto is enabled, directly returns the first movie found.
|
def config(conf, confdefs):
'''
Initialize a config dict using the given confdef tuples.
'''
conf = conf.copy()
# for now just populate defval
for name, info in confdefs:
conf.setdefault(name, info.get('defval'))
return conf
|
Initialize a config dict using the given confdef tuples.
|
def hierarchical_match(d, k, default=None):
"""
Match a key against a dict, simplifying element at a time
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: hiearchically matched value or default
"""
if d is None:
return default
if type(k) != list and type(k) != tuple:
k = [k]
for n, _ in enumerate(k):
key = tuple(k[0:len(k)-n])
if len(key) == 1:
key = key[0]
try:
d[key]
except:
pass
else:
return d[key]
return default
|
Match a key against a dict, simplifying element at a time
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: hiearchically matched value or default
|
def add_clause(self, clause, soft=False):
"""
The method for adding a new hard of soft clause to the problem
formula. Although the input formula is to be specified as an
argument of the constructor of :class:`LBX`, adding clauses may be
helpful when *enumerating* MCSes of the formula. This way, the
clauses are added incrementally, i.e. *on the fly*.
The clause to add can be any iterable over integer literals. The
additional Boolean parameter ``soft`` can be set to ``True``
meaning the the clause being added is soft (note that parameter
``soft`` is set to ``False`` by default).
:param clause: a clause to add
:param soft: whether or not the clause is soft
:type clause: iterable(int)
:type soft: bool
"""
# first, map external literals to internal literals
# introduce new variables if necessary
cl = list(map(lambda l: self._map_extlit(l), clause))
if not soft:
# the clause is hard, and so we simply add it to the SAT oracle
self.oracle.add_clause(cl)
else:
self.soft.append(cl)
# soft clauses should be augmented with a selector
sel = cl[0]
if len(cl) > 1 or cl[0] < 0:
self.topv += 1
sel = self.topv
self.oracle.add_clause(cl + [-sel])
self.sels.append(sel)
|
The method for adding a new hard of soft clause to the problem
formula. Although the input formula is to be specified as an
argument of the constructor of :class:`LBX`, adding clauses may be
helpful when *enumerating* MCSes of the formula. This way, the
clauses are added incrementally, i.e. *on the fly*.
The clause to add can be any iterable over integer literals. The
additional Boolean parameter ``soft`` can be set to ``True``
meaning the the clause being added is soft (note that parameter
``soft`` is set to ``False`` by default).
:param clause: a clause to add
:param soft: whether or not the clause is soft
:type clause: iterable(int)
:type soft: bool
|
def Delete(self):
"""Delete this source restriction and commit change to cloud.
>>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].source_restrictions[0].Delete().WaitUntilComplete()
0
"""
self.public_ip.source_restrictions = [o for o in self.public_ip.source_restrictions if o!=self]
return(self.public_ip.Update())
|
Delete this source restriction and commit change to cloud.
>>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].source_restrictions[0].Delete().WaitUntilComplete()
0
|
def annual_event_counts_card(kind='all', current_year=None):
"""
Displays years and the number of events per year.
kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default).
current_year is an optional date object representing the year we're already
showing information about.
"""
if kind == 'all':
card_title = 'Events per year'
else:
card_title = '{} per year'.format(Event.get_kind_name_plural(kind))
return {
'card_title': card_title,
'kind': kind,
'years': annual_event_counts(kind=kind),
'current_year': current_year
}
|
Displays years and the number of events per year.
kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default).
current_year is an optional date object representing the year we're already
showing information about.
|
def add_random_tile(self):
"""Adds a random tile to the grid. Assumes that it has empty fields."""
x_pos, y_pos = np.where(self._state == 0)
assert len(x_pos) != 0
empty_index = np.random.choice(len(x_pos))
value = np.random.choice([1, 2], p=[0.9, 0.1])
self._state[x_pos[empty_index], y_pos[empty_index]] = value
|
Adds a random tile to the grid. Assumes that it has empty fields.
|
def add_pool_member(self, name, port, pool_name):
'''
Add a node to a pool
'''
if not self.check_pool(pool_name):
raise CommandExecutionError(
'{0} pool does not exists'.format(pool_name)
)
members_seq = self.bigIP.LocalLB.Pool.typefactory.create(
'Common.IPPortDefinitionSequence'
)
members_seq.items = []
member = self.bigIP.LocalLB.Pool.typefactory.create(
'Common.IPPortDefinition'
)
member.address = name
member.port = port
members_seq.items.append(member)
try:
self.bigIP.LocalLB.Pool.add_member(pool_names=[pool_name],
members=[members_seq])
except Exception as e:
raise Exception(
'Unable to add `{0}` to `{1}`\n\n{2}'.format(name,
pool_name,
e)
)
return True
|
Add a node to a pool
|
def connect(self, cback, subscribers=None, instance=None):
"""Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fconnect is not None:
def _connect(cback):
self._connect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fconnect(instance, cback, subscribers,
_connect, notify)
else:
result = self._fconnect(cback, subscribers, _connect, notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._connect(subscribers, cback)
result = None
return result
|
Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
|
def getWmWindowType(self, win, str=False):
"""
Get the list of window types of the given window (property
_NET_WM_WINDOW_TYPE).
:param win: the window object
:param str: True to get a list of string types instead of int
:return: list of (int|str)
"""
types = self._getProperty('_NET_WM_WINDOW_TYPE', win) or []
if not str:
return types
return [self._getAtomName(t) for t in types]
|
Get the list of window types of the given window (property
_NET_WM_WINDOW_TYPE).
:param win: the window object
:param str: True to get a list of string types instead of int
:return: list of (int|str)
|
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
|
Map a function to rows, or to given columns
|
def reduce_dimensionality(self, data):
"""
Reduces the dimensionality of the provided Instance or Instances object.
:param data: the data to process
:type data: Instances
:return: the reduced dataset
:rtype: Instances
"""
if type(data) is Instance:
return Instance(
javabridge.call(
self.jobject, "reduceDimensionality",
"(Lweka/core/Instance;)Lweka/core/Instance;", data.jobject))
else:
return Instances(
javabridge.call(
self.jobject, "reduceDimensionality",
"(Lweka/core/Instances;)Lweka/core/Instances;", data.jobject))
|
Reduces the dimensionality of the provided Instance or Instances object.
:param data: the data to process
:type data: Instances
:return: the reduced dataset
:rtype: Instances
|
def read_end_of_message(self):
"""Read the b"\\r\\n" at the end of the message."""
read = self._file.read
last = read(1)
current = read(1)
while last != b'' and current != b'' and not \
(last == b'\r' and current == b'\n'):
last = current
current = read(1)
|
Read the b"\\r\\n" at the end of the message.
|
def find_obfuscatables(tokens, obfunc, ignore_length=False):
"""
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space).
"""
global keyword_args
keyword_args = analyze.enumerate_keyword_args(tokens)
global imported_modules
imported_modules = analyze.enumerate_imports(tokens)
#print("imported_modules: %s" % imported_modules)
skip_line = False
skip_next = False
obfuscatables = []
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.NEWLINE:
skip_line = False
if skip_line:
continue
result = obfunc(tokens, index, ignore_length=ignore_length)
if result:
if skip_next:
skip_next = False
elif result == '__skipline__':
skip_line = True
elif result == '__skipnext__':
skip_next = True
elif result in obfuscatables:
pass
else:
obfuscatables.append(result)
else: # If result is empty we need to reset skip_next so we don't
skip_next = False # accidentally skip the next identifier
return obfuscatables
|
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space).
|
def size_container_folding(value):
"""
Convert value to ast expression if size is not too big.
Converter for sized container.
"""
if len(value) < MAX_LEN:
if isinstance(value, list):
return ast.List([to_ast(elt) for elt in value], ast.Load())
elif isinstance(value, tuple):
return ast.Tuple([to_ast(elt) for elt in value], ast.Load())
elif isinstance(value, set):
return ast.Set([to_ast(elt) for elt in value])
elif isinstance(value, dict):
keys = [to_ast(elt) for elt in value.keys()]
values = [to_ast(elt) for elt in value.values()]
return ast.Dict(keys, values)
elif isinstance(value, np.ndarray):
return ast.Call(func=ast.Attribute(
ast.Name(mangle('numpy'), ast.Load(), None),
'array',
ast.Load()),
args=[to_ast(totuple(value.tolist())),
ast.Attribute(
ast.Name(mangle('numpy'), ast.Load(), None),
value.dtype.name,
ast.Load())],
keywords=[])
else:
raise ConversionError()
else:
raise ToNotEval()
|
Convert value to ast expression if size is not too big.
Converter for sized container.
|
def last_restapi_key_transformer(key, attr_desc, value):
"""A key transformer that returns the last RestAPI key.
:param str key: The attribute name
:param dict attr_desc: The attribute metadata
:param object value: The value
:returns: The last RestAPI key.
"""
key, value = full_restapi_key_transformer(key, attr_desc, value)
return (key[-1], value)
|
A key transformer that returns the last RestAPI key.
:param str key: The attribute name
:param dict attr_desc: The attribute metadata
:param object value: The value
:returns: The last RestAPI key.
|
def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if infer_range is not None:
warnings.warn('`infer_range` argument has been deprecated',
DeprecationWarning)
if ctx is None:
ctx = current_context()
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=False, dtype=dtype, ctx=str(ctx))
|
Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
|
def readConfigFromJSON(self, fileName):
"""Read configuration from JSON.
:param fileName: path to the configuration file.
:type fileName: str.
"""
self.__logger.debug("readConfigFromJSON: reading from " + fileName)
with open(fileName) as data_file:
data = load(data_file)
self.readConfig(data)
|
Read configuration from JSON.
:param fileName: path to the configuration file.
:type fileName: str.
|
def _init_options(self, kwargs):
""" Initializes self.options """
self.options = self.task_config.options
if self.options is None:
self.options = {}
if kwargs:
self.options.update(kwargs)
# Handle dynamic lookup of project_config values via $project_config.attr
for option, value in list(self.options.items()):
try:
if value.startswith("$project_config."):
attr = value.replace("$project_config.", "", 1)
self.options[option] = getattr(self.project_config, attr, None)
except AttributeError:
pass
|
Initializes self.options
|
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params)
|
Pass a command directly to the current output processor
|
def preprocess_images(raw_color_im,
raw_depth_im,
camera_intr,
T_camera_world,
workspace_box,
workspace_im,
image_proc_config):
""" Preprocess a set of color and depth images. """
# read params
inpaint_rescale_factor = image_proc_config['inpaint_rescale_factor']
cluster = image_proc_config['cluster']
cluster_tolerance = image_proc_config['cluster_tolerance']
min_cluster_size = image_proc_config['min_cluster_size']
max_cluster_size = image_proc_config['max_cluster_size']
# deproject into 3D world coordinates
point_cloud_cam = camera_intr.deproject(raw_depth_im)
point_cloud_cam.remove_zero_points()
point_cloud_world = T_camera_world * point_cloud_cam
# compute the segmask for points above the box
seg_point_cloud_world, _ = point_cloud_world.box_mask(workspace_box)
seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world
depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)
# mask out objects in the known workspace
env_pixels = depth_im_seg.pixels_farther_than(workspace_im)
depth_im_seg._data[env_pixels[:,0], env_pixels[:,1]] = 0
# REMOVE NOISE
# clip low points
low_indices = np.where(point_cloud_world.data[2,:] < workspace_box.min_pt[2])[0]
point_cloud_world.data[2,low_indices] = workspace_box.min_pt[2]
# clip high points
high_indices = np.where(point_cloud_world.data[2,:] > workspace_box.max_pt[2])[0]
point_cloud_world.data[2,high_indices] = workspace_box.max_pt[2]
# segment out the region in the workspace (including the table)
workspace_point_cloud_world, valid_indices = point_cloud_world.box_mask(workspace_box)
invalid_indices = np.setdiff1d(np.arange(point_cloud_world.num_points),
valid_indices)
if cluster:
# create new cloud
pcl_cloud = pcl.PointCloud(workspace_point_cloud_world.data.T.astype(np.float32))
tree = pcl_cloud.make_kdtree()
# find large clusters (likely to be real objects instead of noise)
ec = pcl_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(cluster_tolerance)
ec.set_MinClusterSize(min_cluster_size)
ec.set_MaxClusterSize(max_cluster_size)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
num_clusters = len(cluster_indices)
# read out all points in large clusters
filtered_points = np.zeros([3,workspace_point_cloud_world.num_points])
cur_i = 0
for j, indices in enumerate(cluster_indices):
num_points = len(indices)
points = np.zeros([3,num_points])
for i, index in enumerate(indices):
points[0,i] = pcl_cloud[index][0]
points[1,i] = pcl_cloud[index][1]
points[2,i] = pcl_cloud[index][2]
filtered_points[:,cur_i:cur_i+num_points] = points.copy()
cur_i = cur_i + num_points
# reconstruct the point cloud
all_points = np.c_[filtered_points[:,:cur_i], point_cloud_world.data[:,invalid_indices]]
else:
all_points = point_cloud_world.data
filtered_point_cloud_world = PointCloud(all_points,
frame='world')
# compute the filtered depth image
filtered_point_cloud_cam = T_camera_world.inverse() * filtered_point_cloud_world
depth_im = camera_intr.project_to_image(filtered_point_cloud_cam)
# form segmask
segmask = depth_im_seg.to_binary()
valid_px_segmask = depth_im.invalid_pixel_mask().inverse()
segmask = segmask.mask_binary(valid_px_segmask)
# inpaint
color_im = raw_color_im.inpaint(rescale_factor=inpaint_rescale_factor)
depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
return color_im, depth_im, segmask
|
Preprocess a set of color and depth images.
|
def _create_socket(self, socket_family):
"""Create Socket.
:param int socket_family:
:rtype: socket.socket
"""
sock = socket.socket(socket_family, socket.SOCK_STREAM, 0)
sock.settimeout(self._parameters['timeout'] or None)
if self.use_ssl:
if not compatibility.SSL_SUPPORTED:
raise AMQPConnectionError(
'Python not compiled with support for TLSv1 or higher'
)
sock = self._ssl_wrap_socket(sock)
return sock
|
Create Socket.
:param int socket_family:
:rtype: socket.socket
|
def iter_format_block(
self, text=None,
width=60, chars=False, fill=False, newlines=False,
append=None, prepend=None, strip_first=False, strip_last=False,
lstrip=False):
""" Iterate over lines in a formatted block of text.
This iterator allows you to prepend to each line.
For basic blocks see iter_block().
Arguments:
text : String to format.
width : Maximum width for each line. The prepend string
is not included in this calculation.
Default: 60
chars : Whether to wrap on characters instead of spaces.
Default: False
fill : Insert spaces between words so that each line is
the same width. This overrides `chars`.
Default: False
newlines : Whether to preserve newlines in the original
string.
Default: False
append : String to append after each line.
prepend : String to prepend before each line.
strip_first : Whether to omit the prepend string for the first
line.
Default: False
Example (when using prepend='$'):
Without strip_first -> '$this', '$that'
With strip_first -> 'this', '$that'
strip_last : Whether to omit the append string for the last
line (like strip_first does for prepend).
Default: False
lstrip : Whether to remove leading spaces from each line.
This doesn't include any spaces in `prepend`.
Default: False
"""
if fill:
chars = False
iterlines = self.iter_block(
(self.text if text is None else text) or '',
width=width,
chars=chars,
newlines=newlines,
lstrip=lstrip,
)
if not (prepend or append):
# Shortcut some of the logic below when not prepending/appending.
if fill:
yield from (
self.expand_words(l, width=width) for l in iterlines
)
else:
yield from iterlines
else:
# Prepend, append, or both prepend/append to each line.
if prepend:
prependlen = len(prepend)
else:
# No prepend, stripping not necessary and shouldn't be tried.
strip_first = False
prependlen = 0
if append:
# Unfortunately appending mean exhausting the generator.
# I don't know where the last line is if I don't.
lines = list(iterlines)
lasti = len(lines) - 1
iterlines = (l for l in lines)
appendlen = len(append)
else:
# No append, stripping not necessary and shouldn't be tried.
strip_last = False
appendlen = 0
lasti = -1
for i, l in enumerate(self.iter_add_text(
iterlines,
prepend=prepend,
append=append)):
if strip_first and (i == 0):
# Strip the prepend that iter_add_text() added.
l = l[prependlen:]
elif strip_last and (i == lasti):
# Strip the append that iter_add_text() added.
l = l[:-appendlen]
if fill:
yield self.expand_words(l, width=width)
else:
yield l
|
Iterate over lines in a formatted block of text.
This iterator allows you to prepend to each line.
For basic blocks see iter_block().
Arguments:
text : String to format.
width : Maximum width for each line. The prepend string
is not included in this calculation.
Default: 60
chars : Whether to wrap on characters instead of spaces.
Default: False
fill : Insert spaces between words so that each line is
the same width. This overrides `chars`.
Default: False
newlines : Whether to preserve newlines in the original
string.
Default: False
append : String to append after each line.
prepend : String to prepend before each line.
strip_first : Whether to omit the prepend string for the first
line.
Default: False
Example (when using prepend='$'):
Without strip_first -> '$this', '$that'
With strip_first -> 'this', '$that'
strip_last : Whether to omit the append string for the last
line (like strip_first does for prepend).
Default: False
lstrip : Whether to remove leading spaces from each line.
This doesn't include any spaces in `prepend`.
Default: False
|
def stmt_lambdef_handle(self, original, loc, tokens):
"""Process multi-line lambdef statements."""
if len(tokens) == 2:
params, stmts = tokens
elif len(tokens) == 3:
params, stmts, last = tokens
if "tests" in tokens:
stmts = stmts.asList() + ["return " + last]
else:
stmts = stmts.asList() + [last]
else:
raise CoconutInternalException("invalid statement lambda tokens", tokens)
name = self.stmt_lambda_name()
body = openindent + self.stmt_lambda_proc("\n".join(stmts)) + closeindent
if isinstance(params, str):
self.stmt_lambdas.append(
"def " + name + params + ":\n" + body,
)
else:
params.insert(0, name) # construct match tokens
self.stmt_lambdas.append(
"".join(self.name_match_funcdef_handle(original, loc, params))
+ body,
)
return name
|
Process multi-line lambdef statements.
|
def compute(self, inputs, outputs):
"""
Get the next record from the queue and outputs it.
"""
if len(self.queue) > 0:
# Take the top element of the data queue
data = self.queue.pop()
else:
raise Exception("RawValues: No data: queue is empty ")
# Copy data into output vectors
outputs["resetOut"][0] = data["reset"]
outputs["dataOut"][:] = data["dataOut"]
|
Get the next record from the queue and outputs it.
|
def sim_levenshtein(src, tar, mode='lev', cost=(1, 1, 1, 1)):
"""Return the Levenshtein similarity of two strings.
This is a wrapper of :py:meth:`Levenshtein.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance, in
which edits may include inserts, deletes, and substitutions
- ``osa`` computes the Optimal String Alignment distance, in which
edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and transpositions, respectively (by default:
(1, 1, 1, 1))
Returns
-------
float
The Levenshtein similarity between src & tar
Examples
--------
>>> round(sim_levenshtein('cat', 'hat'), 12)
0.666666666667
>>> round(sim_levenshtein('Niall', 'Neil'), 12)
0.4
>>> sim_levenshtein('aluminum', 'Catalan')
0.125
>>> sim_levenshtein('ATCG', 'TAGC')
0.25
"""
return Levenshtein().sim(src, tar, mode, cost)
|
Return the Levenshtein similarity of two strings.
This is a wrapper of :py:meth:`Levenshtein.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance, in
which edits may include inserts, deletes, and substitutions
- ``osa`` computes the Optimal String Alignment distance, in which
edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and transpositions, respectively (by default:
(1, 1, 1, 1))
Returns
-------
float
The Levenshtein similarity between src & tar
Examples
--------
>>> round(sim_levenshtein('cat', 'hat'), 12)
0.666666666667
>>> round(sim_levenshtein('Niall', 'Neil'), 12)
0.4
>>> sim_levenshtein('aluminum', 'Catalan')
0.125
>>> sim_levenshtein('ATCG', 'TAGC')
0.25
|
def extract_zip(zip_file_path):
"""
Returns:
dict: Dict[str, DataFrame]
"""
dfs = {}
with zipfile.ZipFile(zip_file_path, mode='r') as z_file:
names = z_file.namelist()
for name in names:
content = z_file.read(name)
_, tmp_file_path = tempfile.mkstemp()
try:
with open(tmp_file_path, 'wb') as tmp_file:
tmp_file.write(content)
dfs[name] = joblib.load(tmp_file_path)
finally:
shutil.rmtree(tmp_file_path, ignore_errors=True)
return dfs
|
Returns:
dict: Dict[str, DataFrame]
|
def get_comment(self, project, work_item_id, comment_id, include_deleted=None, expand=None):
"""GetComment.
[Preview API] Returns a work item comment.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item to get the comment.
:param int comment_id: Id of the comment to return.
:param bool include_deleted: Specify if the deleted comment should be retrieved.
:param str expand: Specifies the additional data retrieval options for work item comments.
:rtype: :class:`<Comment> <azure.devops.v5_1.work-item-tracking.models.Comment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if work_item_id is not None:
route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int')
if comment_id is not None:
route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int')
query_parameters = {}
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='608aac0a-32e1-4493-a863-b9cf4566d257',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Comment', response)
|
GetComment.
[Preview API] Returns a work item comment.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item to get the comment.
:param int comment_id: Id of the comment to return.
:param bool include_deleted: Specify if the deleted comment should be retrieved.
:param str expand: Specifies the additional data retrieval options for work item comments.
:rtype: :class:`<Comment> <azure.devops.v5_1.work-item-tracking.models.Comment>`
|
def _fold_line(self, line):
"""Write string line as one or more folded lines."""
if len(line) <= self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
pos = self._cols
self._output_file.write(line[0:self._cols])
self._output_file.write(self._line_sep)
while pos < len(line):
self._output_file.write(b' ')
end = min(len(line), pos + self._cols - 1)
self._output_file.write(line[pos:end])
self._output_file.write(self._line_sep)
pos = end
|
Write string line as one or more folded lines.
|
def hotp(key, counter, digits=6):
"""
These test vectors come from RFC-4226
(https://tools.ietf.org/html/rfc4226#page-32).
>>> key = b'12345678901234567890'
>>> for c in range(10):
... hotp(key, c)
'755224'
'287082'
'359152'
'969429'
'338314'
'254676'
'287922'
'162583'
'399871'
'520489'
"""
msg = struct.pack('>Q', counter)
hs = hmac.new(key, msg, hashlib.sha1).digest()
offset = six.indexbytes(hs, 19) & 0x0f
val = struct.unpack('>L', hs[offset:offset + 4])[0] & 0x7fffffff
return '{val:0{digits}d}'.format(val=val % 10 ** digits, digits=digits)
|
These test vectors come from RFC-4226
(https://tools.ietf.org/html/rfc4226#page-32).
>>> key = b'12345678901234567890'
>>> for c in range(10):
... hotp(key, c)
'755224'
'287082'
'359152'
'969429'
'338314'
'254676'
'287922'
'162583'
'399871'
'520489'
|
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in parse_http_list(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
|
Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
|
def as_dict(self):
"""
Returns the model as a dict
"""
if not self._is_valid:
self.validate()
from .converters import to_dict
return to_dict(self)
|
Returns the model as a dict
|
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
zforce
PURPOSE:
evaluate vertical force K_z (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_z (R,z)
HISTORY:
2012-12-27 - Written - Bovy (IAS)
"""
if self._new:
#if R > 6.: return self._kp(R,z)
if nu.fabs(z) < 10.**-6.:
return 0.
kalphamax1= R
ks1= kalphamax1*0.5*(self._glx+1.)
weights1= kalphamax1*self._glw
sqrtp= nu.sqrt(z**2.+(ks1+R)**2.)
sqrtm= nu.sqrt(z**2.+(ks1-R)**2.)
evalInt1= ks1**2.*special.k0(ks1*self._alpha)*(1./sqrtp+1./sqrtm)/nu.sqrt(R**2.+z**2.-ks1**2.+sqrtp*sqrtm)/(sqrtp+sqrtm)
if R < 10.:
kalphamax2= 10.
ks2= (kalphamax2-kalphamax1)*0.5*(self._glx+1.)+kalphamax1
weights2= (kalphamax2-kalphamax1)*self._glw
sqrtp= nu.sqrt(z**2.+(ks2+R)**2.)
sqrtm= nu.sqrt(z**2.+(ks2-R)**2.)
evalInt2= ks2**2.*special.k0(ks2*self._alpha)*(1./sqrtp+1./sqrtm)/nu.sqrt(R**2.+z**2.-ks2**2.+sqrtp*sqrtm)/(sqrtp+sqrtm)
return -z*2.*nu.sqrt(2.)*self._alpha*nu.sum(weights1*evalInt1
+weights2*evalInt2)
else:
return -z*2.*nu.sqrt(2.)*self._alpha*nu.sum(weights1*evalInt1)
raise NotImplementedError("Not new=True not implemented for RazorThinExponentialDiskPotential")
|
NAME:
zforce
PURPOSE:
evaluate vertical force K_z (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_z (R,z)
HISTORY:
2012-12-27 - Written - Bovy (IAS)
|
def reprioritize(self, stream_id,
depends_on=None, weight=16, exclusive=False):
"""
Update the priority status of an existing stream.
:param stream_id: The stream ID of the stream being updated.
:param depends_on: (optional) The ID of the stream that the stream now
depends on. If ``None``, will be moved to depend on stream 0.
:param weight: (optional) The new weight to give the stream. Defaults
to 16.
:param exclusive: (optional) Whether this stream should now be an
exclusive dependency of the new parent.
"""
self._priority.reprioritize(stream_id, depends_on, weight, exclusive)
|
Update the priority status of an existing stream.
:param stream_id: The stream ID of the stream being updated.
:param depends_on: (optional) The ID of the stream that the stream now
depends on. If ``None``, will be moved to depend on stream 0.
:param weight: (optional) The new weight to give the stream. Defaults
to 16.
:param exclusive: (optional) Whether this stream should now be an
exclusive dependency of the new parent.
|
def pyside_load_ui(uifile, base_instance=None):
"""Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
"""
form_class, base_class = load_ui_type(uifile)
if not base_instance:
typeName = form_class.__name__
finalType = type(typeName,
(form_class, base_class),
{})
base_instance = finalType()
else:
if not isinstance(base_instance, base_class):
raise RuntimeError(
'The base_instance passed to loadUi does not inherit from'
' needed base type (%s)' % type(base_class))
typeName = type(base_instance).__name__
base_instance.__class__ = type(typeName,
(form_class, type(base_instance)),
{})
base_instance.setupUi(base_instance)
return base_instance
|
Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
|
def _convert_to_dict(self, setting):
'''
Converts a settings file into a dictionary, ignoring python defaults
@param setting: A loaded setting module
'''
the_dict = {}
set = dir(setting)
for key in set:
if key in self.ignore:
continue
value = getattr(setting, key)
the_dict[key] = value
return the_dict
|
Converts a settings file into a dictionary, ignoring python defaults
@param setting: A loaded setting module
|
def SearchFileNameTable(self, fileName):
"""
Search FileName table.
Find the show id for a given file name.
Parameters
----------
fileName : string
File name to look up in table.
Returns
----------
int or None
If a match is found in the database table the show id for this
entry is returned, otherwise this returns None.
"""
goodlogging.Log.Info("DB", "Looking up filename string '{0}' in database".format(fileName), verbosity=self.logVerbosity)
queryString = "SELECT ShowID FROM FileName WHERE FileName=?"
queryTuple = (fileName, )
result = self._ActionDatabase(queryString, queryTuple, error = False)
if result is None:
goodlogging.Log.Info("DB", "No match found in database for '{0}'".format(fileName), verbosity=self.logVerbosity)
return None
elif len(result) == 0:
return None
elif len(result) == 1:
goodlogging.Log.Info("DB", "Found file name match: {0}".format(result), verbosity=self.logVerbosity)
return result[0][0]
elif len(result) > 1:
goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in database table for: {0}".format(result))
|
Search FileName table.
Find the show id for a given file name.
Parameters
----------
fileName : string
File name to look up in table.
Returns
----------
int or None
If a match is found in the database table the show id for this
entry is returned, otherwise this returns None.
|
def get_all_roles(path_prefix=None, region=None, key=None, keyid=None,
profile=None):
'''
Get and return all IAM role details, starting at the optional path.
.. versionadded:: 2016.3.0
CLI Example:
salt-call boto_iam.get_all_roles
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_roles = conn.list_roles(path_prefix=path_prefix)
roles = _roles.list_roles_response.list_roles_result.roles
marker = getattr(
_roles.list_roles_response.list_roles_result, 'marker', None
)
while marker:
_roles = conn.list_roles(path_prefix=path_prefix, marker=marker)
roles = roles + _roles.list_roles_response.list_roles_result.roles
marker = getattr(
_roles.list_roles_response.list_roles_result, 'marker', None
)
return roles
|
Get and return all IAM role details, starting at the optional path.
.. versionadded:: 2016.3.0
CLI Example:
salt-call boto_iam.get_all_roles
|
def watchdog_handler(self):
"""Take care of threads if wachdog expires."""
_LOGGING.debug('%s Watchdog expired. Resetting connection.', self.name)
self.watchdog.stop()
self.reset_thrd.set()
|
Take care of threads if wachdog expires.
|
def dropbox_factory(request):
""" expects the id of an existing dropbox and returns its instance"""
try:
return request.registry.settings['dropbox_container'].get_dropbox(request.matchdict['drop_id'])
except KeyError:
raise HTTPNotFound('no such dropbox')
|
expects the id of an existing dropbox and returns its instance
|
def version_option(version=None, *param_decls, **attrs):
"""Adds a ``--version`` option which immediately ends the program
printing out the version number. This is implemented as an eager
option that prints the version and exits the program in the callback.
:param version: the version number to show. If not provided Click
attempts an auto discovery via setuptools.
:param prog_name: the name of the program (defaults to autodetection)
:param message: custom message to show instead of the default
(``'%(prog)s, version %(version)s'``)
:param others: everything else is forwarded to :func:`option`.
"""
if version is None:
module = sys._getframe(1).f_globals.get('__name__')
def decorator(f):
prog_name = attrs.pop('prog_name', None)
message = attrs.pop('message', '%(prog)s, version %(version)s')
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
return
prog = prog_name
if prog is None:
prog = ctx.find_root().info_name
ver = version
if ver is None:
try:
import pkg_resources
except ImportError:
pass
else:
for dist in pkg_resources.working_set:
scripts = dist.get_entry_map().get('console_scripts') or {}
for script_name, entry_point in iteritems(scripts):
if entry_point.module_name == module:
ver = dist.version
break
if ver is None:
raise RuntimeError('Could not determine version')
echo(message % {
'prog': prog,
'version': ver,
}, color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Show the version and exit.')
attrs['callback'] = callback
return option(*(param_decls or ('--version',)), **attrs)(f)
return decorator
|
Adds a ``--version`` option which immediately ends the program
printing out the version number. This is implemented as an eager
option that prints the version and exits the program in the callback.
:param version: the version number to show. If not provided Click
attempts an auto discovery via setuptools.
:param prog_name: the name of the program (defaults to autodetection)
:param message: custom message to show instead of the default
(``'%(prog)s, version %(version)s'``)
:param others: everything else is forwarded to :func:`option`.
|
def record_command(self, cmd, prg=''):
"""
record the command passed - this is usually the name of the program
being run or task being run
"""
self._log(self.logFileCommand , force_to_string(cmd), prg)
|
record the command passed - this is usually the name of the program
being run or task being run
|
def train(self, x_data, y_data):
"""Trains model on inputs
:param x_data: x matrix
:param y_data: y array
"""
x_train, _, y_train, _ = train_test_split(
x_data,
y_data,
test_size=0.67,
random_state=None
) # cross-split
self.model.fit(x_train, y_train)
|
Trains model on inputs
:param x_data: x matrix
:param y_data: y array
|
def log_det_jacobian(self, inputs):
"""Returns log det | dx / dy | = num_events * sum log | scale |."""
del inputs # unused
# Number of events is number of all elements excluding the batch and
# channel dimensions.
num_events = tf.reduce_prod(tf.shape(inputs)[1:-1])
log_det_jacobian = num_events * tf.reduce_sum(self.log_scale)
return log_det_jacobian
|
Returns log det | dx / dy | = num_events * sum log | scale |.
|
def ping(host, timeout=False, return_boolean=False):
'''
Performs an ICMP ping to a host
.. versionchanged:: 2015.8.0
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
.. versionadded:: 2015.5.0
Return a True or False instead of ping output.
.. code-block:: bash
salt '*' network.ping archlinux.org return_boolean=True
Set the time to wait for a response in seconds.
.. code-block:: bash
salt '*' network.ping archlinux.org timeout=3
'''
if timeout:
if __grains__['kernel'] == 'SunOS':
cmd = 'ping -c 4 {1} {0}'.format(timeout, salt.utils.network.sanitize_host(host))
else:
cmd = 'ping -W {0} -c 4 {1}'.format(timeout, salt.utils.network.sanitize_host(host))
else:
cmd = 'ping -c 4 {0}'.format(salt.utils.network.sanitize_host(host))
if return_boolean:
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] != 0:
return False
else:
return True
else:
return __salt__['cmd.run'](cmd)
|
Performs an ICMP ping to a host
.. versionchanged:: 2015.8.0
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
.. versionadded:: 2015.5.0
Return a True or False instead of ping output.
.. code-block:: bash
salt '*' network.ping archlinux.org return_boolean=True
Set the time to wait for a response in seconds.
.. code-block:: bash
salt '*' network.ping archlinux.org timeout=3
|
def set_exception(self, exception, override=False):
"""Set an exception for the TransferFuture
Implies the TransferFuture failed.
:param exception: The exception that cause the transfer to fail.
:param override: If True, override any existing state.
"""
with self._lock:
if not self.done() or override:
self._exception = exception
self._status = 'failed'
|
Set an exception for the TransferFuture
Implies the TransferFuture failed.
:param exception: The exception that cause the transfer to fail.
:param override: If True, override any existing state.
|
def download_uniprot_file(uniprot_id, filetype, outdir='', force_rerun=False):
"""Download a UniProt file for a UniProt ID/ACC
Args:
uniprot_id: Valid UniProt ID
filetype: txt, fasta, xml, rdf, or gff
outdir: Directory to download the file
Returns:
str: Absolute path to file
"""
my_file = '{}.{}'.format(uniprot_id, filetype)
url = 'http://www.uniprot.org/uniprot/{}'.format(my_file)
outfile = op.join(outdir, my_file)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
urlretrieve(url, outfile)
return outfile
|
Download a UniProt file for a UniProt ID/ACC
Args:
uniprot_id: Valid UniProt ID
filetype: txt, fasta, xml, rdf, or gff
outdir: Directory to download the file
Returns:
str: Absolute path to file
|
def _create_glance_db(self, root_db_pass, glance_db_pass):
"""Create the glance database"""
print red(env.host_string + ' | Create glance database')
sudo(
"mysql -uroot -p{0} -e \"CREATE DATABASE glance;\"".format(root_db_pass), shell=False)
sudo("mysql -uroot -p{0} -e \"GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '{1}';\"".format(
root_db_pass, glance_db_pass), shell=False)
sudo("mysql -uroot -p{0} -e \"GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '{1}';\"".format(
root_db_pass, glance_db_pass), shell=False)
|
Create the glance database
|
def server_deployment_mode(command, parser, cluster, cl_args):
'''
check the server deployment mode for the given cluster
if it is valid return the valid set of args
:param cluster:
:param cl_args:
:return:
'''
# Read the cluster definition, if not found
client_confs = cdefs.read_server_mode_cluster_definition(cluster, cl_args)
if not client_confs[cluster]:
return dict()
# tell the user which definition that we are using
if not cl_args.get('service_url', None):
Log.debug("Using cluster definition from file %s" \
% cliconfig.get_cluster_config_file(cluster))
else:
Log.debug("Using cluster service url %s" % cl_args['service_url'])
# if cluster definition exists, but service_url is not set, it is an error
if not 'service_url' in client_confs[cluster]:
config_file = cliconfig.get_cluster_config_file(cluster)
Log.error('No service url for %s cluster in %s', cluster, config_file)
sys.exit(1)
# get overrides
if 'config_property' in cl_args:
pass
try:
cluster_role_env = (cl_args['cluster'], cl_args['role'], cl_args['environ'])
config.server_mode_cluster_role_env(cluster_role_env, client_confs)
cluster_tuple = config.defaults_cluster_role_env(cluster_role_env)
except Exception as ex:
Log.error("Argument cluster/[role]/[env] is not correct: %s", str(ex))
sys.exit(1)
new_cl_args = dict()
new_cl_args['cluster'] = cluster_tuple[0]
new_cl_args['role'] = cluster_tuple[1]
new_cl_args['environ'] = cluster_tuple[2]
new_cl_args['service_url'] = client_confs[cluster]['service_url'].rstrip('/')
new_cl_args['deploy_mode'] = config.SERVER_MODE
cl_args.update(new_cl_args)
return cl_args
|
check the server deployment mode for the given cluster
if it is valid return the valid set of args
:param cluster:
:param cl_args:
:return:
|
def _pdb_frame(self):
"""Return current Pdb frame if there is any"""
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe
|
Return current Pdb frame if there is any
|
def get_distance_matrix(self):
"""
Compute and return distances between each pairs of points in the mesh.
This method requires that the coordinate arrays are one-dimensional.
NB: the depth of the points is ignored
.. warning::
Because of its quadratic space and time complexity this method
is safe to use for meshes of up to several thousand points. For
mesh of 10k points it needs ~800 Mb for just the resulting matrix
and four times that much for intermediate storage.
:returns:
Two-dimensional numpy array, square matrix of distances. The matrix
has zeros on main diagonal and positive distances in kilometers
on all other cells. That is, value in cell (3, 5) is the distance
between mesh's points 3 and 5 in km, and it is equal to value
in cell (5, 3).
Uses :func:`openquake.hazardlib.geo.geodetic.geodetic_distance`.
"""
assert self.lons.ndim == 1
distances = geodetic.geodetic_distance(
self.lons.reshape(self.lons.shape + (1, )),
self.lats.reshape(self.lats.shape + (1, )),
self.lons,
self.lats)
return numpy.matrix(distances, copy=False)
|
Compute and return distances between each pairs of points in the mesh.
This method requires that the coordinate arrays are one-dimensional.
NB: the depth of the points is ignored
.. warning::
Because of its quadratic space and time complexity this method
is safe to use for meshes of up to several thousand points. For
mesh of 10k points it needs ~800 Mb for just the resulting matrix
and four times that much for intermediate storage.
:returns:
Two-dimensional numpy array, square matrix of distances. The matrix
has zeros on main diagonal and positive distances in kilometers
on all other cells. That is, value in cell (3, 5) is the distance
between mesh's points 3 and 5 in km, and it is equal to value
in cell (5, 3).
Uses :func:`openquake.hazardlib.geo.geodetic.geodetic_distance`.
|
def plot(self, data=None, **kwargs):
"""
Plot the data
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to plot. If no data is passed, the xform_data from
the DataGeometry object will be returned.
kwargs : keyword arguments
Any keyword arguments supported by `hypertools.plot` are also supported
by this method
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
"""
# import plot here to avoid circular imports
from .plot.plot import plot as plotter
if data is None:
d = copy.copy(self.data)
transform = copy.copy(self.xform_data)
if any([k in kwargs for k in ['reduce', 'align', 'normalize',
'semantic', 'vectorizer', 'corpus']]):
d = copy.copy(self.data)
transform = None
else:
d = data
transform = None
# get kwargs and update with new kwargs
new_kwargs = copy.copy(self.kwargs)
update_kwargs = dict(transform=transform, reduce=self.reduce,
align=self.align, normalize=self.normalize,
semantic=self.semantic, vectorizer=self.vectorizer,
corpus=self.corpus)
new_kwargs.update(update_kwargs)
for key in kwargs:
new_kwargs.update({key : kwargs[key]})
return plotter(d, **new_kwargs)
|
Plot the data
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to plot. If no data is passed, the xform_data from
the DataGeometry object will be returned.
kwargs : keyword arguments
Any keyword arguments supported by `hypertools.plot` are also supported
by this method
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
|
def retrieve_console_log(self, filename=None, dir=None):
"""Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
"""
if hasattr(self, "consoleLog") and self.consoleLog is not None:
logger.debug("Retrieving PE console log: " + self.consoleLog)
if not filename:
filename = _file_name('pe', self.id, '.stdouterr')
return self.rest_client._retrieve_file(self.consoleLog, filename, dir, 'text/plain')
else:
return None
|
Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
|
def kill_tweens(self, obj = None):
"""Stop tweening an object, without completing the motion or firing the
on_complete"""
if obj is not None:
try:
del self.current_tweens[obj]
except:
pass
else:
self.current_tweens = collections.defaultdict(set)
|
Stop tweening an object, without completing the motion or firing the
on_complete
|
def create_gce_image(zone,
project,
instance_name,
name,
description):
"""
Shuts down the instance and creates and image from the disk.
Assumes that the disk name is the same as the instance_name (this is the
default behavior for boot disks on GCE).
"""
disk_name = instance_name
try:
down_gce(instance_name=instance_name, project=project, zone=zone)
except HttpError as e:
if e.resp.status == 404:
log_yellow("the instance {} is already down".format(instance_name))
else:
raise e
body = {
"rawDisk": {},
"name": name,
"sourceDisk": "projects/{}/zones/{}/disks/{}".format(
project, zone, disk_name
),
"description": description
}
compute = _get_gce_compute()
gce_wait_until_done(
compute.images().insert(project=project, body=body).execute()
)
return name
|
Shuts down the instance and creates and image from the disk.
Assumes that the disk name is the same as the instance_name (this is the
default behavior for boot disks on GCE).
|
def _nodes(self):
"""
Returns the list of nodes present in the network
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['A', 'B', 'C'])
>>> sorted(dbn._nodes())
['B', 'A', 'C']
"""
return list(set([node for node, timeslice in
super(DynamicBayesianNetwork, self).nodes()]))
|
Returns the list of nodes present in the network
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['A', 'B', 'C'])
>>> sorted(dbn._nodes())
['B', 'A', 'C']
|
def get_port_profile_status_input_port_profile_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
port_profile_status = ET.SubElement(input, "port-profile-status")
port_profile_status.text = kwargs.pop('port_profile_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def initialiseDevice(self):
"""
performs initialisation of the device
:param batchSize: the no of samples that each provideData call should yield
:return:
"""
logger.debug("Initialising device")
self.getInterruptStatus()
self.setAccelerometerSensitivity(self._accelerationFactor * 32768.0)
self.setGyroSensitivity(self._gyroFactor * 32768.0)
self.setSampleRate(self.fs)
for loop in self.ZeroRegister:
self.i2c_io.write(self.MPU6050_ADDRESS, loop, 0)
# Sets clock source to gyro reference w/ PLL
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_1, 0b00000010)
# Controls frequency of wakeups in accel low power mode plus the sensor standby modes
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_2, 0x00)
# Enables any I2C master interrupt source to generate an interrupt
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_INT_ENABLE, 0x01)
# enable the FIFO
self.enableFifo()
logger.debug("Initialised device")
|
performs initialisation of the device
:param batchSize: the no of samples that each provideData call should yield
:return:
|
def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest()
|
Compute HMAC-SHA256 hash of header.
Used to prevent header tampering.
|
def parse_description():
"""
Parse the description in the README file
pandoc --from=markdown --to=rst --output=README.rst README.md
CommandLine:
python -c "import setup; print(setup.parse_description())"
"""
from os.path import dirname, join, exists
readme_fpath = join(dirname(__file__), 'README.rst')
# This breaks on pip install, so check that it exists.
if exists(readme_fpath):
textlines = []
with open(readme_fpath, 'r') as f:
textlines = f.readlines()
text = ''.join(textlines).strip()
return text
return ''
|
Parse the description in the README file
pandoc --from=markdown --to=rst --output=README.rst README.md
CommandLine:
python -c "import setup; print(setup.parse_description())"
|
def ekbseg(handle, tabnam, cnames, decls):
"""
Start a new segment in an E-kernel.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekbseg_c.html
:param handle: File handle.
:type handle: int
:param tabnam: Table name.
:type tabnam: str
:param cnames: Names of columns.
:type cnames: list of str.
:param decls: Declarations of columns.
:type decls: list of str.
:return: Segment number.
:rtype: int
"""
handle = ctypes.c_int(handle)
tabnam = stypes.stringToCharP(tabnam)
ncols = ctypes.c_int(len(cnames))
cnmlen = ctypes.c_int(len(max(cnames, key=len)) + 1) # needs to be len(name)+1 ie 'c1' to 3 for ekbseg do not fail
cnames = stypes.listToCharArrayPtr(cnames)
declen = ctypes.c_int(len(max(decls, key=len)) + 1)
decls = stypes.listToCharArrayPtr(decls)
segno = ctypes.c_int()
libspice.ekbseg_c(handle, tabnam, ncols, cnmlen, cnames, declen, decls, ctypes.byref(segno))
return segno.value
|
Start a new segment in an E-kernel.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekbseg_c.html
:param handle: File handle.
:type handle: int
:param tabnam: Table name.
:type tabnam: str
:param cnames: Names of columns.
:type cnames: list of str.
:param decls: Declarations of columns.
:type decls: list of str.
:return: Segment number.
:rtype: int
|
def _to_ctfile(self):
"""Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string.
:return: ``CTfile`` formatted string.
:rtype: :py:class:`str`.
"""
output = io.StringIO()
for key in self:
if key == 'HeaderBlock':
for line in self[key].values():
output.write(line)
output.write('\n')
elif key == 'Ctab':
ctab_str = self[key]._to_ctfile()
output.write(ctab_str)
else:
raise KeyError('Molfile object does not supposed to have any other information: "{}".'.format(key))
return output.getvalue()
|
Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string.
:return: ``CTfile`` formatted string.
:rtype: :py:class:`str`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.