code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def build_css_class(localized_fieldname, prefix=''):
"""
Returns a css class based on ``localized_fieldname`` which is easily
splitable and capable of regionalized language codes.
Takes an optional ``prefix`` which is prepended to the returned string.
"""
bits = localized_fieldname.split('_')
css_class = ''
if len(bits) == 1:
css_class = str(localized_fieldname)
elif len(bits) == 2:
# Fieldname without underscore and short language code
# Examples:
# 'foo_de' --> 'foo-de',
# 'bar_en' --> 'bar-en'
css_class = '-'.join(bits)
elif len(bits) > 2:
# Try regionalized language code
# Examples:
# 'foo_es_ar' --> 'foo-es_ar',
# 'foo_bar_zh_tw' --> 'foo_bar-zh_tw'
css_class = _join_css_class(bits, 2)
if not css_class:
# Try short language code
# Examples:
# 'foo_bar_de' --> 'foo_bar-de',
# 'foo_bar_baz_de' --> 'foo_bar_baz-de'
css_class = _join_css_class(bits, 1)
return '%s-%s' % (prefix, css_class) if prefix else css_class
|
Returns a css class based on ``localized_fieldname`` which is easily
splitable and capable of regionalized language codes.
Takes an optional ``prefix`` which is prepended to the returned string.
|
def connection_factory(self, endpoint, *args, **kwargs):
"""
Called to create a new connection with proper configuration.
Intended for internal use only.
"""
kwargs = self._make_connection_kwargs(endpoint, kwargs)
return self.connection_class.factory(endpoint, self.connect_timeout, *args, **kwargs)
|
Called to create a new connection with proper configuration.
Intended for internal use only.
|
def symbols():
"""Return a list of symbols."""
symbols = []
for line in symbols_stream():
symbols.append(line.decode('utf-8').strip())
return symbols
|
Return a list of symbols.
|
def languages(self):
"""
A list of strings describing the user's languages.
"""
languages = []
for language in self.cache['languages']:
language = Structure(
id = language['id'],
name = language['name']
)
languages.append(language)
return languages
|
A list of strings describing the user's languages.
|
def fetch(self):
"""
Fetch a CompositionSettingsInstance
:returns: Fetched CompositionSettingsInstance
:rtype: twilio.rest.video.v1.composition_settings.CompositionSettingsInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CompositionSettingsInstance(self._version, payload, )
|
Fetch a CompositionSettingsInstance
:returns: Fetched CompositionSettingsInstance
:rtype: twilio.rest.video.v1.composition_settings.CompositionSettingsInstance
|
def strip_transcript_versions(fasta, out_file):
"""
strip transcript versions from a FASTA file. these appear like this:
>ENST00000434970.2 cdna chromosome:GRCh38:14:22439007:22439015:1 etc
"""
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with open(fasta) as in_handle:
for line in in_handle:
if line.startswith(">"):
out_handle.write(line.split(" ")[0].split(".")[0] + "\n")
else:
out_handle.write(line)
return out_file
|
strip transcript versions from a FASTA file. these appear like this:
>ENST00000434970.2 cdna chromosome:GRCh38:14:22439007:22439015:1 etc
|
def _dihedral_affine(k:partial(uniform_int,0,7)):
"Randomly flip `x` image based on `k`."
x = -1 if k&1 else 1
y = -1 if k&2 else 1
if k&4: return [[0, x, 0.],
[y, 0, 0],
[0, 0, 1.]]
return [[x, 0, 0.],
[0, y, 0],
[0, 0, 1.]]
|
Randomly flip `x` image based on `k`.
|
def check_minions(self,
expr,
tgt_type='glob',
delimiter=DEFAULT_TARGET_DELIM,
greedy=True):
'''
Check the passed regex against the available minions' public keys
stored for authentication. This should return a set of ids which
match the regex, this will then be used to parse the returns to
make sure everyone has checked back in.
'''
try:
if expr is None:
expr = ''
check_func = getattr(self, '_check_{0}_minions'.format(tgt_type), None)
if tgt_type in ('grain',
'grain_pcre',
'pillar',
'pillar_pcre',
'pillar_exact',
'compound',
'compound_pillar_exact'):
_res = check_func(expr, delimiter, greedy)
else:
_res = check_func(expr, greedy)
_res['ssh_minions'] = False
if self.opts.get('enable_ssh_minions', False) is True and isinstance('tgt', six.string_types):
roster = salt.roster.Roster(self.opts, self.opts.get('roster', 'flat'))
ssh_minions = roster.targets(expr, tgt_type)
if ssh_minions:
_res['minions'].extend(ssh_minions)
_res['ssh_minions'] = True
except Exception:
log.exception(
'Failed matching available minions with %s pattern: %s',
tgt_type, expr)
_res = {'minions': [], 'missing': []}
return _res
|
Check the passed regex against the available minions' public keys
stored for authentication. This should return a set of ids which
match the regex, this will then be used to parse the returns to
make sure everyone has checked back in.
|
def bilinear(x, W, y, input_size, seq_len, batch_size, num_outputs=1, bias_x=False, bias_y=False):
"""Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size
"""
if bias_x:
x = nd.concat(x, nd.ones((1, seq_len, batch_size)), dim=0)
if bias_y:
y = nd.concat(y, nd.ones((1, seq_len, batch_size)), dim=0)
nx, ny = input_size + bias_x, input_size + bias_y
# W: (num_outputs x ny) x nx
lin = nd.dot(W, x)
if num_outputs > 1:
lin = reshape_fortran(lin, (ny, num_outputs * seq_len, batch_size))
y = y.transpose([2, 1, 0]) # May cause performance issues
lin = lin.transpose([2, 1, 0])
blin = nd.batch_dot(lin, y, transpose_b=True)
blin = blin.transpose([2, 1, 0])
if num_outputs > 1:
blin = reshape_fortran(blin, (seq_len, num_outputs, seq_len, batch_size))
return blin
|
Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size
|
def add_page(self, pattern, classname):
""" Add a new page to the web application. Only available after that the Plugin Manager is loaded """
if not self._loaded:
raise PluginManagerNotLoadedException()
self._app.add_mapping(pattern, classname)
|
Add a new page to the web application. Only available after that the Plugin Manager is loaded
|
def _request_delete(self, path, params=None, url=BASE_URL):
"""Perform a HTTP DELETE request."""
url = urljoin(url, path)
headers = self._get_request_headers()
response = requests.delete(
url, params=params, headers=headers, timeout=DEFAULT_TIMEOUT)
response.raise_for_status()
if response.status_code == 200:
return response.json()
|
Perform a HTTP DELETE request.
|
def copy(self):
"""Return a shallow copy of a RangeSet."""
cpy = self.__class__()
cpy._autostep = self._autostep
cpy.padding = self.padding
cpy.update(self)
return cpy
|
Return a shallow copy of a RangeSet.
|
def remove_objects(self, bucket_name, objects_iter):
"""
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
"""
is_valid_bucket_name(bucket_name)
if isinstance(objects_iter, basestring):
raise TypeError(
'objects_iter cannot be `str` or `bytes` instance. It must be '
'a list, tuple or iterator of object names'
)
# turn list like objects into an iterator.
objects_iter = itertools.chain(objects_iter)
obj_batch = []
exit_loop = False
while not exit_loop:
try:
object_name = next(objects_iter)
is_non_empty_string(object_name)
except StopIteration:
exit_loop = True
if not exit_loop:
obj_batch.append(object_name)
# if we have 1000 items in the batch, or we have to exit
# the loop, we have to make a request to delete objects.
if len(obj_batch) == 1000 or (exit_loop and len(obj_batch) > 0):
# send request and parse response
errs_result = self._process_remove_objects_batch(
bucket_name, obj_batch
)
# return the delete errors.
for err_result in errs_result:
yield err_result
# clear batch for next set of items
obj_batch = []
|
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
|
def get_collection(cls):
"""Return a reference to the database collection for the class"""
# By default the collection returned will be the published collection,
# however if the `draft` flag has been set against the global context
# (e.g `g`) then the collection returned will contain draft documents.
if g.get('draft'):
return getattr(
cls.get_db(),
'{collection}_draft'.format(collection=cls._collection)
)
return getattr(cls.get_db(), cls._collection)
|
Return a reference to the database collection for the class
|
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
|
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
|
def upload(self, f):
"""Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object.
"""
if hasattr(f, 'read'):
needs_closing = False
else:
f = open(f, 'rb')
needs_closing = True
# The Puush server can't handle non-ASCII filenames.
# The official Puush desktop app actually substitutes ? for
# non-ISO-8859-1 characters, which helps some Unicode filenames,
# but some are still let through and encounter server errors.
# Try uploading a file named åäö.txt through the desktop app -
# it won't work. It's better to let this Python API do that,
# however, with the behavior probably intended in the desktop app.
filename = os.path.basename(f.name).encode('ascii', 'replace')
filename = filename.decode('ascii') # Requests doesn't like bytes
md5 = md5_file(f)
data = {
'z': 'meaningless',
'c': md5
}
files = {
'f': (filename, f)
}
res = self._api_request('up', data=data, files=files)[0]
if res[0] == '-1':
raise PuushError("File upload failed.")
elif res[0] == '-3':
raise PuushError("File upload failed: hash didn't match with "
"the file the server received.")
if needs_closing:
f.close()
_, url, id, size = res
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return self._File(id, url, filename, now, 0)
|
Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object.
|
def _distance_matrix_generic(x, centering, exponent=1):
"""Compute a centered distance matrix given a matrix."""
_check_valid_dcov_exponent(exponent)
x = _transform_to_2d(x)
# Calculate distance matrices
a = distances.pairwise_distances(x, exponent=exponent)
# Double centering
a = centering(a, out=a)
return a
|
Compute a centered distance matrix given a matrix.
|
def BE64(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False):
'''64-bit field, Big endian encoded'''
return UInt64(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_BE, fuzzable=fuzzable, name=name, full_range=full_range)
|
64-bit field, Big endian encoded
|
def save(self, filename, binary=True):
"""
Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
"""
filename = os.path.abspath(os.path.expanduser(filename))
file_mode = True
# Check filetype
ftype = filename[-3:]
if ftype == 'ply':
writer = vtk.vtkPLYWriter()
elif ftype == 'vtp':
writer = vtk.vtkXMLPolyDataWriter()
file_mode = False
if binary:
writer.SetDataModeToBinary()
else:
writer.SetDataModeToAscii()
elif ftype == 'stl':
writer = vtk.vtkSTLWriter()
elif ftype == 'vtk':
writer = vtk.vtkPolyDataWriter()
else:
raise Exception('Filetype must be either "ply", "stl", or "vtk"')
writer.SetFileName(filename)
writer.SetInputData(self)
if binary and file_mode:
writer.SetFileTypeToBinary()
elif file_mode:
writer.SetFileTypeToASCII()
writer.Write()
|
Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
|
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1):
r"""
Louvain clustering for a temporal network.
Parameters
-----------
tnet : array, dict, TemporalNetwork
Input network
resolution : int
resolution of Louvain clustering ($\gamma$)
intersliceweight : int
interslice weight of multilayer clustering ($\omega$). Must be positive.
n_iter : int
Number of iterations to run louvain for
randomseed : int
Set for reproduceability
negativeedge : str
If there are negative edges, what should be done with them.
Options: 'ignore' (i.e. set to 0). More options to be added.
consensus : float (0.5 default)
When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount.
Returns
-------
communities : array (node,time)
node,time array of community assignment
Notes
-------
References
----------
"""
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
# Divide resolution by the number of timepoints
resolution = resolution / tnet.T
supranet = create_supraadjacency_matrix(
tnet, intersliceweight=intersliceweight)
if negativeedge == 'ignore':
supranet = supranet[supranet['weight'] > 0]
nxsupra = tnet_to_nx(supranet)
np.random.seed(randomseed)
while True:
comtmp = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)}
for j in as_completed(job):
comtmp.append(j.result())
comtmp = np.stack(comtmp)
comtmp = comtmp.transpose()
comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F')
if n_iter == 1:
break
nxsupra_old = nxsupra
nxsupra = make_consensus_matrix(comtmp, consensus_threshold)
# If there was no consensus, there are no communities possible, return
if nxsupra is None:
break
if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all():
break
communities = comtmp[:, :, 0]
if temporal_consensus == True:
communities = make_temporal_consensus(communities)
return communities
|
r"""
Louvain clustering for a temporal network.
Parameters
-----------
tnet : array, dict, TemporalNetwork
Input network
resolution : int
resolution of Louvain clustering ($\gamma$)
intersliceweight : int
interslice weight of multilayer clustering ($\omega$). Must be positive.
n_iter : int
Number of iterations to run louvain for
randomseed : int
Set for reproduceability
negativeedge : str
If there are negative edges, what should be done with them.
Options: 'ignore' (i.e. set to 0). More options to be added.
consensus : float (0.5 default)
When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount.
Returns
-------
communities : array (node,time)
node,time array of community assignment
Notes
-------
References
----------
|
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The size in bytes.
"""
if value is None:
if not self:
# If this is a empty list, then returns zero
return 0
elif issubclass(type(self[0]), GenericType):
# If the type of the elements is GenericType, then returns the
# length of the list multiplied by the size of the GenericType.
return len(self) * self[0].get_size()
# Otherwise iter over the list accumulating the sizes.
return sum(item.get_size() for item in self)
return type(self)(value).get_size()
|
Return the size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The size in bytes.
|
def kruskal_mst(graph):
"""Implements Kruskal's Algorithm for finding minimum spanning trees.
Assumes a non-empty, connected graph.
"""
edges_accepted = 0
ds = DisjointSet()
pq = PriorityQueue()
accepted_edges = []
label_lookup = {}
nodes = graph.get_all_node_ids()
num_vertices = len(nodes)
for n in nodes:
label = ds.add_set()
label_lookup[n] = label
edges = graph.get_all_edge_objects()
for e in edges:
pq.put(e['id'], e['cost'])
while edges_accepted < (num_vertices - 1):
edge_id = pq.get()
edge = graph.get_edge(edge_id)
node_a, node_b = edge['vertices']
label_a = label_lookup[node_a]
label_b = label_lookup[node_b]
a_set = ds.find(label_a)
b_set = ds.find(label_b)
if a_set != b_set:
edges_accepted += 1
accepted_edges.append(edge_id)
ds.union(a_set, b_set)
return accepted_edges
|
Implements Kruskal's Algorithm for finding minimum spanning trees.
Assumes a non-empty, connected graph.
|
def multicategory_scatterplot(output_directory, file_prefix, df,
x_series_index, y_series_index, category_series_index,
series_color, plot_title = '',
x_axis_label = '', y_axis_label = '',
min_predicted_ddg = None, max_predicted_ddg = None, min_experimental_ddg = None, max_experimental_ddg = None):
'''This function was adapted from the covariation benchmark.'''
# todo: Abstract this graph from the current usage (DDG measurements).
# todo: make the capped value for unquantified but classified measurements (e.g. DDG > 7 kcal/mol) parameterizable
# todo: add an option to identify outliers by standard deviations (over the set of errors |x - y|) rather than by fixed value
# todo: add an option to use geom_text_repel to avoid/reduce overlapping text
# todo: allow users to provide colors for the facets / categories
# Changeset
# todo: Change it to take in a pandas dataframe instead of the data_table_headers + data_table parameters.
# todo: Add exception if number of cases > 2 so the general case can be implemented once we have test data.
# todo: use one column as the category e.g. "PDB". assert that there is a maximum number of categories. Test with > 2 categories
# todo: remove all references to SNX27 and NHERF1 below and loop over the set of categories instead
#print(df[facet_index])
color_map = {}
categories = list(df.ix[:, category_series_index].unique())
print(type(categories))
num_categories = len(categories)
category_colors = get_spaced_plot_colors(num_categories)
for x in xrange(num_categories):
color_map[categories[x]] = '#' + category_colors[x]
df['CategorizationColor'] = df.apply(lambda r: color_map[r[category_series_index]], axis = 1)
categorization_color_index = len(df.columns.values) - 1
# Monday: continue here
print(df)
sys.exit(0)
try: os.mkdir(output_directory)
except: pass
assert(os.path.exists(output_directory))
df['Categorization'] = df.apply(lambda r: _determine_fraction_correct_class(r[x_series_index], r[y_series_index])[0], axis = 1)
categorization_index = len(df.columns.values) - 1
df['CategorizationShape'] = df.apply(lambda r: _determine_fraction_correct_class(r[x_series_index], r[y_series_index])[1], axis = 1)
categorization_shape_index = len(df.columns.values) - 1
# Create the R script
boxplot_r_script = '''
library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
# PNG generation
png('%(file_prefix)s.png', width=2560, height=2048, bg="white", res=600)
txtalpha <- 0.6
redtxtalpha <- 0.6
%(png_plot_commands)s
'''
xy_table_filename = '{0}.txt'.format(file_prefix)
xy_table_filepath = os.path.join(output_directory, xy_table_filename)
write_file(xy_table_filepath, '\n'.join(','.join(map(str, line)) for line in [data_table_headers] + data_table))
single_plot_commands = '''
# Set the margins
par(mar=c(5, 5, 1, 1))
xy_data <- read.csv('%(xy_table_filename)s', header=T)
names(xy_data)[%(x_series_index)d + 1] <- "xvalues"
names(xy_data)[%(y_series_index)d + 1] <- "yvalues"
# coefs contains two values: (Intercept) and yvalues
coefs <- coef(lm(xvalues~yvalues, data = xy_data))
fitcoefs = coef(lm(xvalues~0 + yvalues, data = xy_data))
fitlmv_yvalues <- as.numeric(fitcoefs[1])
lmv_intercept <- as.numeric(coefs[1])
lmv_yvalues <- as.numeric(coefs[2])
lm(xy_data$yvalues~xy_data$xvalues)
xlabel <- "%(x_axis_label)s"
ylabel <- "%(y_axis_label)s"
plot_title <- "%(plot_title)s"
rvalue <- cor(xy_data$yvalues, xy_data$xvalues)
rvalue
xy_data
#3QDO = SNX27
#1G9O = NHERF1
valid_xy_data <- xy_data[which(xy_data$xvalues < 6.99),]
rvalue <- cor(valid_xy_data$yvalues, valid_xy_data$xvalues)
rvalue
valid_xy_data
valid_xy_data_NHERF1 <- xy_data[which(xy_data$xvalues < 6.99 & xy_data$PDB == '1G9O'),]
rvalue_NHERF1 <- cor(valid_xy_data_NHERF1$yvalues, valid_xy_data_NHERF1$xvalues)
rvalue_NHERF1
valid_xy_data_NHERF1
coefs_NHERF1 <- coef(lm(xvalues~yvalues, data = valid_xy_data_NHERF1))
lmv_intercept_NHERF1 <- as.numeric(coefs_NHERF1[1])
lmv_yvalues_NHERF1 <- as.numeric(coefs_NHERF1[2])
valid_xy_data_SNX27 <- xy_data[which(xy_data$xvalues < 6.99 & xy_data$PDB == '3QDO'),]
rvalue_SNX27 <- cor(valid_xy_data_SNX27$yvalues, valid_xy_data_SNX27$xvalues)
rvalue_SNX27
valid_xy_data_SNX27
coefs_SNX27 <- coef(lm(xvalues~yvalues, data = valid_xy_data_SNX27))
lmv_intercept_SNX27 <- as.numeric(coefs_SNX27[1])
lmv_yvalues_SNX27 <- as.numeric(coefs_SNX27[2])
lmv_intercept
lmv_yvalues
lmv_intercept_NHERF1
lmv_yvalues_NHERF1
lmv_intercept_SNX27
lmv_yvalues_SNX27
# Set graph limits and the position for the correlation value
minx <- min(0.0, min(xy_data$xvalues) - 0.1)
miny <- min(0.0, min(xy_data$yvalues) - 0.1)
maxx <- max(1.0, max(xy_data$xvalues) + 0.1)
maxy <- max(1.0, max(xy_data$yvalues) + 0.1)
'''
if min_predicted_ddg != None:
single_plot_commands += '''
miny <- min(miny - 0.2, %(min_predicted_ddg)f - 0.2)
'''
if max_predicted_ddg != None:
single_plot_commands += '''
maxy <- max(maxy + 0.5, %(max_predicted_ddg)f + 0.5)
miny <- -6
maxy <- 12.5
'''
if min_experimental_ddg != None:
single_plot_commands += '''
minx <- min(minx, %(min_experimental_ddg)f)
'''
if max_experimental_ddg != None:
single_plot_commands += '''
maxx <- max(maxx, %(max_experimental_ddg)f) + 0.2
'''
single_plot_commands += '''
xpos <- minx + 0.2
ypos <- maxy - 1
ypos_SNX27 <- ypos - 1
ypos_NHERF1 <- ypos_SNX27 - 1
lrt <- expression('R'^tst)
p <- qplot(main="", xvalues, yvalues, data=xy_data, xlab=xlabel, ylab=ylabel, shape = PDB, alpha = I(txtalpha)) +
geom_point(aes(color = PDB), alpha = 0.6) +
scale_colour_manual(name="", values = c("1G9O"="orange", "3QDO"="blue", "3"="red", "value3"="grey", "value2"="black")) +
labs(title = "%(plot_title)s") +
theme(plot.title = element_text(color = "#555555", size=rel(0.75))) +
# Correlation fit lines (global + one per facet
geom_abline(size = 0.125, color="black", intercept = lmv_intercept, slope = lmv_yvalues, alpha=0.2) +
geom_abline(size = 0.125, color="orange", intercept = lmv_intercept_NHERF1, slope = lmv_yvalues_NHERF1, alpha=0.4) +
geom_abline(size = 0.125, color="blue", intercept = lmv_intercept_SNX27, slope = lmv_yvalues_SNX27, alpha=0.4) +
geom_abline(slope=1, intercept=0, linetype=3, size=0.25, alpha=0.4) + # add a diagonal (dotted)
coord_cartesian(xlim = c(minx, maxx), ylim = c(miny, maxy)) + # set the graph limits
geom_text(hjust = 0, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yvalues - xvalues) > 2 & xvalues <= 0), aes(xvalues, yvalues+0.35, label=Origin_of_peptide), check_overlap = TRUE) + # label outliers
geom_text(hjust = 1, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yvalues - xvalues) > 2 & xvalues > 0), aes(xvalues, yvalues+0.35, label=Origin_of_peptide), check_overlap = TRUE) + # label outliers
geom_text(hjust=0, size=2, colour="black", aes(x = xpos, y = ypos, label = sprintf("R == %%0.2f", round(rvalue, digits = 4))), parse = TRUE) +
geom_text(hjust=0, size=2, colour="darkorange", aes(x = xpos, y = ypos_NHERF1, label = sprintf("R[NHERF] == %%0.2f", round(rvalue_NHERF1, digits = 4))), parse = TRUE) +
geom_text(hjust=0, size=2, colour="blue", aes(x = xpos, y = ypos_SNX27, label = sprintf("R[SNX27] == %%0.2f", round(rvalue_SNX27, digits = 4))), parse = TRUE) +
theme(legend.position = "none")
# geom_text(hjust=0, size=2, colour="black", aes(xpos, ypos, fontface="plain", family = "sans", label=paste(sprintf("R = %%0.2f%%s", round(rvalue, digits = 4), lrt), expression('R'[3]) ))) # add correlation text; hjust=0 sets left-alignment
#geom_text(hjust=0, size=3, colour="black", aes(xpos, ypos, fontface="plain", family = "sans", label=sprintf("R = %%0.2f", round(rvalue, digits = 4)))) # add correlation text; hjust=0 sets left-alignment
# geom_text(hjust=0, size=3, colour="black", aes(xpos, ypos, fontface="plain", family = "sans", label=sprintf("R = %%0.2f", round(rvalue, digits = 4)))) # add correlation text; hjust=0 sets left-alignment
# Plot graph
p
dev.off()
'''
#geom_point(aes(color = C)) +
#color = "%(series_color)s"
# Create the R script
plot_type = 'png'
png_plot_commands = single_plot_commands % locals()
boxplot_r_script = boxplot_r_script % locals()
r_script_filename = '{0}.R'.format(file_prefix)
r_script_filepath = os.path.join(output_directory, r_script_filename)
write_file(r_script_filepath, boxplot_r_script)
# Run the R script
run_r_script(r_script_filename, cwd = output_directory)
|
This function was adapted from the covariation benchmark.
|
def send(self, to, subject, body, reply_to=None, **kwargs):
"""
Send email via AWS SES.
:returns string: message id
***
Composes an email message based on input data, and then immediately
queues the message for sending.
:type to: list of strings or string
:param to: The To: field(s) of the message.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:sender: email address of the sender. String or typle(name, email)
:reply_to: email to reply to
**kwargs:
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message
will then be forwarded to the email address
specified by the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
"""
if not self.sender:
raise AttributeError("Sender email 'sender' or 'source' is not provided")
kwargs["to_addresses"] = to
kwargs["subject"] = subject
kwargs["body"] = body
kwargs["source"] = self._get_sender(self.sender)[0]
kwargs["reply_addresses"] = self._get_sender(reply_to or self.reply_to)[2]
response = self.ses.send_email(**kwargs)
return response["SendEmailResponse"]["SendEmailResult"]["MessageId"]
|
Send email via AWS SES.
:returns string: message id
***
Composes an email message based on input data, and then immediately
queues the message for sending.
:type to: list of strings or string
:param to: The To: field(s) of the message.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:sender: email address of the sender. String or typle(name, email)
:reply_to: email to reply to
**kwargs:
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message
will then be forwarded to the email address
specified by the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
|
def command_clean(string, vargs):
"""
Remove characters that are not IPA valid from the given string,
and print the remaining string.
:param str string: the string to act upon
:param dict vargs: the command line arguments
"""
valid_chars, invalid_chars = remove_invalid_ipa_characters(
unicode_string=string,
return_invalid=True,
single_char_parsing=vargs["single_char_parsing"]
)
print(u"".join(valid_chars))
print_invalid_chars(invalid_chars, vargs)
|
Remove characters that are not IPA valid from the given string,
and print the remaining string.
:param str string: the string to act upon
:param dict vargs: the command line arguments
|
def ls_command(
endpoint_plus_path,
recursive_depth_limit,
recursive,
long_output,
show_hidden,
filter_val,
):
"""
Executor for `globus ls`
"""
endpoint_id, path = endpoint_plus_path
# do autoactivation before the `ls` call so that recursive invocations
# won't do this repeatedly, and won't have to instantiate new clients
client = get_client()
autoactivate(client, endpoint_id, if_expires_in=60)
# create the query paramaters to send to operation_ls
ls_params = {"show_hidden": int(show_hidden)}
if path:
ls_params["path"] = path
if filter_val:
# this char has special meaning in the LS API's filter clause
# can't be part of the pattern (but we don't support globbing across
# dir structures anyway)
if "/" in filter_val:
raise click.UsageError('--filter cannot contain "/"')
# format into a simple filter clause which operates on filenames
ls_params["filter"] = "name:{}".format(filter_val)
# get the `ls` result
if recursive:
# NOTE:
# --recursive and --filter have an interplay that some users may find
# surprising
# if we're asked to change or "improve" the behavior in the future, we
# could do so with "type:dir" or "type:file" filters added in, and
# potentially work out some viable behavior based on what people want
res = client.recursive_operation_ls(
endpoint_id, depth=recursive_depth_limit, **ls_params
)
else:
res = client.operation_ls(endpoint_id, **ls_params)
def cleaned_item_name(item):
return item["name"] + ("/" if item["type"] == "dir" else "")
# and then print it, per formatting rules
formatted_print(
res,
fields=[
("Permissions", "permissions"),
("User", "user"),
("Group", "group"),
("Size", "size"),
("Last Modified", "last_modified"),
("File Type", "type"),
("Filename", cleaned_item_name),
],
simple_text=(
None
if long_output or is_verbose() or not outformat_is_text()
else "\n".join(cleaned_item_name(x) for x in res)
),
json_converter=iterable_response_to_dict,
)
|
Executor for `globus ls`
|
def _get_footer_size(file_obj):
"""Read the footer size in bytes, which is serialized as little endian."""
file_obj.seek(-8, 2)
tup = struct.unpack(b"<i", file_obj.read(4))
return tup[0]
|
Read the footer size in bytes, which is serialized as little endian.
|
def fit(self, X, y, n_iter=None):
"""w = w + α * δ * X"""
self.n_iter = self.n_iter if n_iter is None else n_iter
X = getattr(X, 'values', X).reshape(len(X), 1)
X_1 = self.homogenize(X)
for i in range(self.n_iter):
for i in range(0, len(X), 10): # minibatch learning for numerical stability
batch = slice(i, min(i + 10, len(X)))
Xbatch, ybatch = X[batch, :], y[batch]
X_1_batch = X_1[batch, :]
self.W += (self.alpha / len(X) ** 1.5) * (
self.delta(Xbatch, ybatch).reshape((len(Xbatch), 1)).T.dot(X_1_batch))
return self
|
w = w + α * δ * X
|
def set_line_width(self, width):
"""Sets the current line width within the cairo context.
The line width value specifies the diameter of a pen
that is circular in user space,
(though device-space pen may be an ellipse in general
due to scaling / shear / rotation of the CTM).
.. note::
When the description above refers to user space and CTM
it refers to the user space and CTM in effect
at the time of the stroking operation,
not the user space and CTM in effect
at the time of the call to :meth:`set_line_width`.
The simplest usage makes both of these spaces identical.
That is, if there is no change to the CTM
between a call to :meth:`set_line_width`
and the stroking operation,
then one can just pass user-space values to :meth:`set_line_width`
and ignore this note.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default line width value is 2.0.
:type width: float
:param width: The new line width.
"""
cairo.cairo_set_line_width(self._pointer, width)
self._check_status()
|
Sets the current line width within the cairo context.
The line width value specifies the diameter of a pen
that is circular in user space,
(though device-space pen may be an ellipse in general
due to scaling / shear / rotation of the CTM).
.. note::
When the description above refers to user space and CTM
it refers to the user space and CTM in effect
at the time of the stroking operation,
not the user space and CTM in effect
at the time of the call to :meth:`set_line_width`.
The simplest usage makes both of these spaces identical.
That is, if there is no change to the CTM
between a call to :meth:`set_line_width`
and the stroking operation,
then one can just pass user-space values to :meth:`set_line_width`
and ignore this note.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default line width value is 2.0.
:type width: float
:param width: The new line width.
|
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
|
Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
|
def write_bytes(self, where, data, force=False):
"""
Write a concrete or symbolic (or mixed) buffer to memory
:param int where: address to write to
:param data: data to write
:type data: str or list
:param force: whether to ignore memory permissions
"""
mp = self.memory.map_containing(where)
# TODO (ehennenfent) - fast write can have some yet-unstudied unintended side effects.
# At the very least, using it in non-concrete mode will break the symbolic strcmp/strlen models. The 1024 byte
# minimum is intended to minimize the potential effects of this by ensuring that if there _are_ any other
# issues, they'll only crop up when we're doing very large writes, which are fairly uncommon.
can_write_raw = type(mp) is AnonMap and \
isinstance(data, (str, bytes)) and \
(mp.end - mp.start + 1) >= len(data) >= 1024 and \
not issymbolic(data) and \
self._concrete
if can_write_raw:
logger.debug("Using fast write")
offset = mp._get_offset(where)
if isinstance(data, str):
data = bytes(data.encode('utf-8'))
mp._data[offset:offset + len(data)] = data
self._publish('did_write_memory', where, data, 8 * len(data))
else:
for i in range(len(data)):
self.write_int(where + i, Operators.ORD(data[i]), 8, force)
|
Write a concrete or symbolic (or mixed) buffer to memory
:param int where: address to write to
:param data: data to write
:type data: str or list
:param force: whether to ignore memory permissions
|
def t_BIN(self, t):
r'(%[01]+)|([01]+[bB])' # A Binary integer
# Note 00B is a 0 binary, but
# 00Bh is a 12 in hex. So this pattern must come
# after HEXA
if t.value[0] == '%':
t.value = t.value[1:] # Remove initial %
else:
t.value = t.value[:-1] # Remove last 'b'
t.value = int(t.value, 2) # Convert to decimal
t.type = 'INTEGER'
return t
|
r'(%[01]+)|([01]+[bB])
|
def save(name, data, rc_file='~/.odoorpcrc'):
"""Save the `data` session configuration under the name `name`
in the `rc_file` file.
>>> import odoorpc
>>> odoorpc.session.save(
... 'foo',
... {'type': 'ODOO', 'host': 'localhost', 'protocol': 'jsonrpc',
... 'port': 8069, 'timeout': 120, 'database': 'db_name'
... 'user': 'admin', 'passwd': 'password'}) # doctest: +SKIP
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoorpc.session.save(
... session,
... {'type': 'ODOO', 'host': HOST, 'protocol': PROTOCOL,
... 'port': PORT, 'timeout': 120, 'database': DB,
... 'user': USER, 'passwd': PWD})
"""
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
conf.add_section(name)
for key in data:
value = data[key]
conf.set(name, key, str(value))
with open(os.path.expanduser(rc_file), 'w') as file_:
os.chmod(os.path.expanduser(rc_file), stat.S_IREAD | stat.S_IWRITE)
conf.write(file_)
|
Save the `data` session configuration under the name `name`
in the `rc_file` file.
>>> import odoorpc
>>> odoorpc.session.save(
... 'foo',
... {'type': 'ODOO', 'host': 'localhost', 'protocol': 'jsonrpc',
... 'port': 8069, 'timeout': 120, 'database': 'db_name'
... 'user': 'admin', 'passwd': 'password'}) # doctest: +SKIP
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoorpc.session.save(
... session,
... {'type': 'ODOO', 'host': HOST, 'protocol': PROTOCOL,
... 'port': PORT, 'timeout': 120, 'database': DB,
... 'user': USER, 'passwd': PWD})
|
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
|
Add vertex and update vertex attributes
|
def add_chart(self, component):
"""Add a chart to the layout."""
if getattr(component, "name") != "Chart":
raise Exception("Component is not an instance of Chart")
self.charts.append(component)
|
Add a chart to the layout.
|
def build_module(name, doc=None):
"""create and initialize an astroid Module node"""
node = nodes.Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
|
create and initialize an astroid Module node
|
def preorder(self, skip_seed=False):
"""
Return a generator that yields the nodes of the tree in preorder.
If skip_seed=True then the root node is not included.
"""
for node in self._tree.preorder_node_iter():
if skip_seed and node is self._tree.seed_node:
continue
yield node
|
Return a generator that yields the nodes of the tree in preorder.
If skip_seed=True then the root node is not included.
|
def delete_volume(target, stop=True):
'''
Deletes a gluster volume
target
Volume to delete
stop : True
If ``True``, stop volume before delete
CLI Example:
.. code-block:: bash
salt '*' glusterfs.delete_volume <volume>
'''
volinfo = info()
if target not in volinfo:
log.error('Cannot delete non-existing volume %s', target)
return False
# Stop volume if requested to and it is running
running = (volinfo[target]['status'] == '1')
if not stop and running:
# Fail if volume is running if stop is not requested
log.error('Volume %s must be stopped before deletion', target)
return False
if running:
if not stop_volume(target, force=True):
return False
cmd = 'volume delete {0}'.format(target)
return _gluster(cmd)
|
Deletes a gluster volume
target
Volume to delete
stop : True
If ``True``, stop volume before delete
CLI Example:
.. code-block:: bash
salt '*' glusterfs.delete_volume <volume>
|
def set_setting(name, value):
'''
Set the configuration for the named audit setting
Args:
name (str):
The name of the setting to configure
value (str):
The configuration for the named value. Valid options are:
- No Auditing
- Success
- Failure
- Success and Failure
Returns:
bool: True if successful
Raises:
KeyError: On invalid ``name`` or ``value``
CommandExecutionError: If an error is encountered modifying the setting
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
'''
# Input validation
if name.lower() not in _get_valid_names():
raise KeyError('Invalid name: {0}'.format(name))
for setting in settings:
if value.lower() == setting.lower():
cmd = '/set /subcategory:"{0}" {1}'.format(name, settings[setting])
break
else:
raise KeyError('Invalid setting value: {0}'.format(value))
_auditpol_cmd(cmd)
return True
|
Set the configuration for the named audit setting
Args:
name (str):
The name of the setting to configure
value (str):
The configuration for the named value. Valid options are:
- No Auditing
- Success
- Failure
- Success and Failure
Returns:
bool: True if successful
Raises:
KeyError: On invalid ``name`` or ``value``
CommandExecutionError: If an error is encountered modifying the setting
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
|
def _make_value(self, value):
"""
Constructs a _child_spec value from a native Python data type, or
an appropriate Asn1Value object
:param value:
A native Python value, or some child of Asn1Value
:return:
An object of type _child_spec
"""
if isinstance(value, self._child_spec):
new_value = value
elif issubclass(self._child_spec, Any):
if isinstance(value, Asn1Value):
new_value = value
else:
raise ValueError(unwrap(
'''
Can not set a native python value to %s where the
_child_spec is Any - value must be an instance of Asn1Value
''',
type_name(self)
))
elif issubclass(self._child_spec, Choice):
if not isinstance(value, Asn1Value):
raise ValueError(unwrap(
'''
Can not set a native python value to %s where the
_child_spec is the choice type %s - value must be an
instance of Asn1Value
''',
type_name(self),
self._child_spec.__name__
))
if not isinstance(value, self._child_spec):
wrapper = self._child_spec()
wrapper.validate(value.class_, value.tag, value.contents)
wrapper._parsed = value
value = wrapper
new_value = value
else:
return self._child_spec(value=value)
params = {}
if self._child_spec.explicit:
params['explicit'] = self._child_spec.explicit
if self._child_spec.implicit:
params['implicit'] = (self._child_spec.class_, self._child_spec.tag)
return _fix_tagging(new_value, params)
|
Constructs a _child_spec value from a native Python data type, or
an appropriate Asn1Value object
:param value:
A native Python value, or some child of Asn1Value
:return:
An object of type _child_spec
|
def create_application(self):
"""Create and return a new instance of tinman.application.Application"""
return application.Application(self.settings,
self.namespace.routes,
self.port)
|
Create and return a new instance of tinman.application.Application
|
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs):
"""
Using a resource attribute mapping, take the value from the source and apply
it to the target. Both source and target scenarios must be specified (and therefor
must exist).
"""
user_id = int(kwargs.get('user_id'))
rm = aliased(ResourceAttrMap, name='rm')
#Check the mapping exists.
mapping = db.DBSession.query(rm).filter(
or_(
and_(
rm.resource_attr_id_a == source_resource_attr_id,
rm.resource_attr_id_b == target_resource_attr_id
),
and_(
rm.resource_attr_id_a == target_resource_attr_id,
rm.resource_attr_id_b == source_resource_attr_id
)
)
).first()
if mapping is None:
raise ResourceNotFoundError("Mapping between %s and %s not found"%
(source_resource_attr_id,
target_resource_attr_id))
#check scenarios exist
s1 = _get_scenario(source_scenario_id, user_id)
s2 = _get_scenario(target_scenario_id, user_id)
rs = aliased(ResourceScenario, name='rs')
rs1 = db.DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id,
rs.scenario_id == source_scenario_id).first()
rs2 = db.DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id,
rs.scenario_id == target_scenario_id).first()
#3 possibilities worth considering:
#1: Both RS exist, so update the target RS
#2: Target RS does not exist, so create it with the dastaset from RS1
#3: Source RS does not exist, so it must be removed from the target scenario if it exists
return_value = None#Either return null or return a new or updated resource scenario
if rs1 is not None:
if rs2 is not None:
log.info("Destination Resource Scenario exists. Updating dastaset ID")
rs2.dataset_id = rs1.dataset_id
else:
log.info("Destination has no data, so making a new Resource Scenario")
rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id)
db.DBSession.add(rs2)
db.DBSession.flush()
return_value = rs2
else:
log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario")
if rs2 is not None:
db.DBSession.delete(rs2)
db.DBSession.flush()
return return_value
|
Using a resource attribute mapping, take the value from the source and apply
it to the target. Both source and target scenarios must be specified (and therefor
must exist).
|
def _initialise(self, feed_type="linear"):
"""
Initialise the object by generating appropriate filenames,
opening associated file handles and inspecting the FITS axes
of these files.
"""
self._filenames = filenames = _create_filenames(self._filename_schema,
feed_type)
self._files = files = _open_fits_files(filenames)
self._axes = axes = _create_axes(filenames, files)
self._dim_indices = dim_indices = l_ax, m_ax, f_ax = tuple(
axes.iaxis(d) for d in self._fits_dims)
# Complain if we can't find required axes
for i, ax in zip(dim_indices, self._fits_dims):
if i == -1:
raise ValueError("'%s' axis not found!" % ax)
self._cube_extents = _cube_extents(axes, l_ax, m_ax, f_ax,
self._l_sign, self._m_sign)
self._shape = tuple(axes.naxis[d] for d in dim_indices) + (4,)
self._beam_freq_map = axes.grid[f_ax]
# Now describe our dimension sizes
self._dim_updates = [(n, axes.naxis[i]) for n, i
in zip(self._beam_dims, dim_indices)]
self._initialised = True
|
Initialise the object by generating appropriate filenames,
opening associated file handles and inspecting the FITS axes
of these files.
|
def uniqid(iface='wlan0', is_hex=True):
"""
使用网卡的物理地址 ``默认wlan0`` 来作为标识
- 置位 ``is_hex``, 来确定返回 ``16进制/10进制格式``
:param iface: ``网络接口(默认wlan0)``
:type iface: str
:param is_hex: ``True(返回16进制)/False(10进制)``
:type is_hex: bool
:return: ``mac地址/空``
:rtype: str
"""
# return str(appid.getnode()) if not is_hex else str(hex(appid.getnode()))[2:-1]
m_ = get_addr(iface)
m_ = ''.join(m_.split(':')) if m_ else m_
if m_ and not is_hex:
m_ = str(int(m_.upper(), 16))
return m_
|
使用网卡的物理地址 ``默认wlan0`` 来作为标识
- 置位 ``is_hex``, 来确定返回 ``16进制/10进制格式``
:param iface: ``网络接口(默认wlan0)``
:type iface: str
:param is_hex: ``True(返回16进制)/False(10进制)``
:type is_hex: bool
:return: ``mac地址/空``
:rtype: str
|
def add_reader(self, fd, callback):
" Start watching the file descriptor for read availability. "
h = msvcrt.get_osfhandle(fd)
self._read_fds[h] = callback
|
Start watching the file descriptor for read availability.
|
def get_shape_str(tensors):
"""
Internally used by layer registry, to print shapes of inputs/outputs of layers.
Args:
tensors (list or tf.Tensor): a tensor or a list of tensors
Returns:
str: a string to describe the shape
"""
if isinstance(tensors, (list, tuple)):
for v in tensors:
assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v))
shape_str = ",".join(
map(lambda x: str(x.get_shape().as_list()), tensors))
else:
assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors))
shape_str = str(tensors.get_shape().as_list())
return shape_str
|
Internally used by layer registry, to print shapes of inputs/outputs of layers.
Args:
tensors (list or tf.Tensor): a tensor or a list of tensors
Returns:
str: a string to describe the shape
|
def get_parameter(self):
"""Obtain the parameter object from the current widget state.
:returns: A BooleanParameter from the current state of widget
"""
self._parameter.value = self._input.value()
return self._parameter
|
Obtain the parameter object from the current widget state.
:returns: A BooleanParameter from the current state of widget
|
def refreshUi( self ):
"""
Refreshes the interface based on the current settings.
"""
widget = self.uiContentsTAB.currentWidget()
is_content = isinstance(widget, QWebView)
if is_content:
self._currentContentsIndex = self.uiContentsTAB.currentIndex()
history = widget.page().history()
else:
history = None
self.uiBackACT.setEnabled(is_content and history.canGoBack())
self.uiForwardACT.setEnabled(is_content and history.canGoForward())
self.uiHomeACT.setEnabled(is_content)
self.uiNewTabACT.setEnabled(is_content)
self.uiCopyTextACT.setEnabled(is_content)
self.uiCloseTabACT.setEnabled(is_content and
self.uiContentsTAB.count() > 2)
for i in range(1, self.uiContentsTAB.count()):
widget = self.uiContentsTAB.widget(i)
self.uiContentsTAB.setTabText(i, widget.title())
|
Refreshes the interface based on the current settings.
|
def serialize_to_file(obj, file_name, append=False):
"""Pickle obj to file_name."""
logging.info("Serializing to file %s.", file_name)
with tf.gfile.Open(file_name, "a+" if append else "wb") as output_file:
pickle.dump(obj, output_file)
logging.info("Done serializing to file %s.", file_name)
|
Pickle obj to file_name.
|
def NormalizePath(path, sep="/"):
"""A sane implementation of os.path.normpath.
The standard implementation treats leading / and // as different leading to
incorrect normal forms.
NOTE: Its ok to use a relative path here (without leading /) but any /../ will
still be removed anchoring the path at the top level (e.g. foo/../../../../bar
=> bar).
Args:
path: The path to normalize.
sep: Separator used.
Returns:
A normalized path. In this context normalized means that all input paths
that would result in the system opening the same physical file will produce
the same normalized path.
"""
if not path:
return sep
path = SmartUnicode(path)
path_list = path.split(sep)
# This is a relative path and the first element is . or ..
if path_list[0] in [".", "..", ""]:
path_list.pop(0)
# Deliberately begin at index 1 to preserve a single leading /
i = 0
while True:
list_len = len(path_list)
# We begin at the last known good position so we never iterate over path
# elements which are already examined
for i in range(i, len(path_list)):
# Remove /./ form
if path_list[i] == "." or not path_list[i]:
path_list.pop(i)
break
# Remove /../ form
elif path_list[i] == "..":
path_list.pop(i)
# Anchor at the top level
if (i == 1 and path_list[0]) or i > 1:
i -= 1
path_list.pop(i)
break
# If we didnt alter the path so far we can quit
if len(path_list) == list_len:
return sep + sep.join(path_list)
|
A sane implementation of os.path.normpath.
The standard implementation treats leading / and // as different leading to
incorrect normal forms.
NOTE: Its ok to use a relative path here (without leading /) but any /../ will
still be removed anchoring the path at the top level (e.g. foo/../../../../bar
=> bar).
Args:
path: The path to normalize.
sep: Separator used.
Returns:
A normalized path. In this context normalized means that all input paths
that would result in the system opening the same physical file will produce
the same normalized path.
|
def __track_vars(self, command_result):
""" Check if there are any tracked variable inside the result. And keep them for future use.
:param command_result: command result tot check
:return:
"""
command_env = command_result.environment()
for var_name in self.tracked_vars():
if var_name in command_env.keys():
self.__vars[var_name] = command_env[var_name]
|
Check if there are any tracked variable inside the result. And keep them for future use.
:param command_result: command result tot check
:return:
|
def unicode_wrapper(self, property, default=ugettext('Untitled')):
"""
Wrapper to allow for easy unicode representation of an object by
the specified property. If this wrapper is not able to find the
right translation of the specified property, it will return the
default value instead.
Example::
def __unicode__(self):
return unicode_wrapper('name', default='Unnamed')
"""
# TODO: Test coverage!
try:
value = getattr(self, property)
except ValueError:
logger.warn(
u'ValueError rendering unicode for %s object.',
self._meta.object_name
)
value = None
if not value:
value = default
return value
|
Wrapper to allow for easy unicode representation of an object by
the specified property. If this wrapper is not able to find the
right translation of the specified property, it will return the
default value instead.
Example::
def __unicode__(self):
return unicode_wrapper('name', default='Unnamed')
|
def print_markdown(data, title=None):
"""Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
"""
def excl_value(value):
# contains path, i.e. personal info
return isinstance(value, basestring_) and Path(value).exists()
if isinstance(data, dict):
data = list(data.items())
markdown = ["* **{}:** {}".format(l, unicode_(v))
for l, v in data if not excl_value(v)]
if title:
print("\n## {}".format(title))
print('\n{}\n'.format('\n'.join(markdown)))
|
Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
|
def suggest(self, index=None, body=None, params=None):
"""
The suggest feature suggests similar looking terms based on a provided
text by using a suggester.
`<http://elasticsearch.org/guide/reference/api/search/suggest/>`_
:arg index: A comma-separated list of index names to restrict the
operation; use `_all` or empty string to perform the operation on
all indices
:arg body: The request definition
:arg ignore_indices: When performed on multiple indices, allows to
ignore `missing` ones (default: none)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
:arg source: The URL-encoded request definition (instead of using
request body)
"""
_, data = yield self.transport.perform_request('POST',
_make_path(index,
'_suggest'),
params=params, body=body)
raise gen.Return(data)
|
The suggest feature suggests similar looking terms based on a provided
text by using a suggester.
`<http://elasticsearch.org/guide/reference/api/search/suggest/>`_
:arg index: A comma-separated list of index names to restrict the
operation; use `_all` or empty string to perform the operation on
all indices
:arg body: The request definition
:arg ignore_indices: When performed on multiple indices, allows to
ignore `missing` ones (default: none)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
:arg source: The URL-encoded request definition (instead of using
request body)
|
def convert_value(v):
"""
默认使用Json转化数据,凡以[或{开始的,均载入json
:param v:
:return:
"""
if v and isinstance(v, six.string_types):
v = v.strip()
if (v.startswith('{') and v.endswith('}')) or (v.startswith('[') and v.endswith(']')):
try:
return json.loads(v)
except Exception as e:
logger.error(e)
return v
|
默认使用Json转化数据,凡以[或{开始的,均载入json
:param v:
:return:
|
def course_feature(catalog, soup):
"""Parses all the courses (AKA, the most important part).
"""
courses = {}
course_crns = {}
for course in soup.findAll('course'):
c = Course.from_soup_tag(course)
courses[str(c)] = c
catalog.courses = courses
catalog.courses
logger.info('Catalog has %d courses' % len(courses))
|
Parses all the courses (AKA, the most important part).
|
def subscribe(self, channel_name):
"""Subscribe to a channel
:param channel_name: The name of the channel to subscribe to.
:type channel_name: str
:rtype : Channel
"""
data = {'channel': channel_name}
if channel_name.startswith('presence-'):
data['auth'] = self._generate_presence_key(
self.connection.socket_id,
self.key,
channel_name,
self.secret,
self.user_data
)
data['channel_data'] = json.dumps(self.user_data)
elif channel_name.startswith('private-'):
data['auth'] = self._generate_private_key(
self.connection.socket_id,
self.key,
channel_name,
self.secret
)
self.connection.send_event('pusher:subscribe', data)
self.channels[channel_name] = Channel(channel_name, self.connection)
return self.channels[channel_name]
|
Subscribe to a channel
:param channel_name: The name of the channel to subscribe to.
:type channel_name: str
:rtype : Channel
|
def _write(self, command, future):
"""Write a command to the socket
:param Command command: the Command data structure
"""
def on_written():
self._on_written(command, future)
try:
self._stream.write(command.command, callback=on_written)
except iostream.StreamClosedError as error:
future.set_exception(exceptions.ConnectionError(error))
except Exception as error:
LOGGER.exception('unhandled write failure - %r', error)
future.set_exception(exceptions.ConnectionError(error))
|
Write a command to the socket
:param Command command: the Command data structure
|
def _get_key_from_raw_synset(raw_synset):
"""Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
"""
pos = raw_synset.pos
literal = raw_synset.variants[0].literal
sense = "%02d"%raw_synset.variants[0].sense
return '.'.join([literal,pos,sense])
|
Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
|
def API_GET(self, courseid, taskid, submissionid): # pylint: disable=arguments-differ
"""
List all the submissions that the connected user made. Returns list of the form
::
[
{
"id": "submission_id1",
"submitted_on": "date",
"status" : "done", #can be "done", "waiting", "error" (execution status of the task).
"grade": 0.0,
"input": {}, #the input data. File are base64 encoded.
"result" : "success" #only if status=done. Result of the execution.
"feedback": "" #only if status=done. the HTML global feedback for the task
"problems_feedback": #only if status=done. HTML feedback per problem. Some pid may be absent.
{
"pid1": "feedback1",
#...
}
}
#...
]
If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id/submissions/submissionid,
this dict will contain one entry or the page will return 404 Not Found.
"""
with_input = "input" in web.input()
return _get_submissions(self.course_factory, self.submission_manager, self.user_manager, self.app._translations, courseid, taskid, with_input, submissionid)
|
List all the submissions that the connected user made. Returns list of the form
::
[
{
"id": "submission_id1",
"submitted_on": "date",
"status" : "done", #can be "done", "waiting", "error" (execution status of the task).
"grade": 0.0,
"input": {}, #the input data. File are base64 encoded.
"result" : "success" #only if status=done. Result of the execution.
"feedback": "" #only if status=done. the HTML global feedback for the task
"problems_feedback": #only if status=done. HTML feedback per problem. Some pid may be absent.
{
"pid1": "feedback1",
#...
}
}
#...
]
If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id/submissions/submissionid,
this dict will contain one entry or the page will return 404 Not Found.
|
def get_possible_combos_for_transition(trans, model, self_model, is_external=False):
""" The function provides combos for a transition and its respective
:param trans:
:param model:
:param self_model:
:param is_external:
:return:
"""
from_state_combo = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
from_outcome_combo = Gtk.ListStore(GObject.TYPE_STRING)
to_state_combo = Gtk.ListStore(GObject.TYPE_STRING)
to_outcome_combo = Gtk.ListStore(GObject.TYPE_STRING)
trans_dict = model.state.transitions
# get from state
if trans is None:
from_state = None
elif trans.from_state is not None:
from_state = model.state.states[trans.from_state]
else:
from_state = model.state if is_external else self_model.state
# collect all free from-outcome-combo and from_state which are still valid -> filter all outcome already in use
free_from_outcomes_dict = {}
for state in model.state.states.values():
from_o_combo = state.outcomes.values()
# print([o.outcome_id for o in from_o_combo], state_model.state.state_id)
for transition in trans_dict.values():
# print(transition, [[o.outcome_id == transition.from_outcome, transition.from_state == state_model.state.state_id] for o in from_o_combo])
from_o_combo = [o for o in from_o_combo if not (o.outcome_id == transition.from_outcome and
transition.from_state == state.state_id)]
# print([o.outcome_id for o in from_o_combo])
if len(from_o_combo) > 0:
free_from_outcomes_dict[state.state_id] = from_o_combo
# check if parent has start_state
if model.state.start_state_id is None:
free_from_outcomes_dict[model.state.state_id] = [None]
# for from-state-combo use all states with free outcomes and from_state
combined_states = [model.state] if is_external else [self_model.state]
combined_states.extend(model.state.states.values())
free_from_states = [state for state in combined_states if state.state_id in free_from_outcomes_dict]
if trans is None:
return None, None, None, None, free_from_states, free_from_outcomes_dict
def append_from_state_combo(possible_state):
if possible_state.state_id == self_model.state.state_id:
from_state_combo.append(['self.' + possible_state.state_id, possible_state.state_id])
elif is_external and from_state.state_id == model.state.state_id:
from_state_combo.append(['parent.' + possible_state.state_id, possible_state.state_id])
else:
from_state_combo.append([possible_state.name + '.' + possible_state.state_id, possible_state.state_id])
append_from_state_combo(from_state)
for state in free_from_states:
if from_state is not state:
append_from_state_combo(state)
# for from-outcome-combo collect all combos for actual transition
# -> actual outcome + free outcomes of actual from_state.state_id
if trans is not None:
if trans.from_outcome is None:
from_outcome_combo.append(["None"])
else:
outcome = from_state.outcomes[trans.from_outcome]
from_outcome_combo.append([outcome.name + "." + str(outcome.outcome_id)])
for outcome in free_from_outcomes_dict.get(from_state.state_id, []):
if outcome is None:
from_outcome_combo.append(["None"])
else:
from_outcome_combo.append([outcome.name + "." + str(outcome.outcome_id)])
# get to state
if trans.to_state == model.state.state_id:
to_state = model.state if is_external else self_model.state
else:
to_state = model.state.states[trans.to_state]
# for to-state-combo filter from_state -> first actual to_state + other optional states
def generate_to_state_combo(possible_state):
if possible_state.state_id == self_model.state.state_id:
to_state_combo.append(["self." + possible_state.state_id])
elif is_external and possible_state.state_id == model.state.state_id:
to_state_combo.append(['parent.' + possible_state.state_id])
else:
to_state_combo.append([possible_state.name + '.' + possible_state.state_id])
to_states = [model.state] if is_external else [self_model.state]
to_states.extend(model.state.states.values())
generate_to_state_combo(to_state)
for state in to_states:
if not to_state.state_id == state.state_id:
generate_to_state_combo(state)
# for to-outcome-combo use parent combos -> first actual outcome + other outcome
def append_to_outcome_combo(possible_outcome):
if is_external:
to_outcome_combo.append(['parent.' + possible_outcome.name + "." + str(possible_outcome.outcome_id)])
else:
to_outcome_combo.append(['self.' + possible_outcome.name + "." + str(possible_outcome.outcome_id)])
if trans.to_outcome is not None:
append_to_outcome_combo(model.state.outcomes[trans.to_outcome])
for outcome in model.state.outcomes.values():
if not (trans.to_outcome == outcome.outcome_id and trans.to_state == model.state.state_id):
append_to_outcome_combo(outcome)
return from_state_combo, from_outcome_combo, to_state_combo, to_outcome_combo, free_from_states, free_from_outcomes_dict
|
The function provides combos for a transition and its respective
:param trans:
:param model:
:param self_model:
:param is_external:
:return:
|
def do_videoplaceholder(parser, token):
"""
Method that parse the imageplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return VideoPlaceholderNode(name, **params)
|
Method that parse the imageplaceholder template tag.
|
def build_dependencies(self) -> "Dependencies":
"""
Return `Dependencies` instance containing the build dependencies available on this Package.
The ``Package`` class should provide access to the full dependency tree.
.. code:: python
>>> owned_package.build_dependencies['zeppelin']
<ZeppelinPackage>
"""
validate_build_dependencies_are_present(self.manifest)
dependencies = self.manifest["build_dependencies"]
dependency_packages = {}
for name, uri in dependencies.items():
try:
validate_build_dependency(name, uri)
dependency_package = Package.from_uri(uri, self.w3)
except PyEthPMError as e:
raise FailureToFetchIPFSAssetsError(
f"Failed to retrieve build dependency: {name} from URI: {uri}.\n"
f"Got error: {e}."
)
else:
dependency_packages[name] = dependency_package
return Dependencies(dependency_packages)
|
Return `Dependencies` instance containing the build dependencies available on this Package.
The ``Package`` class should provide access to the full dependency tree.
.. code:: python
>>> owned_package.build_dependencies['zeppelin']
<ZeppelinPackage>
|
def capability(cap, *wrap_exceptions):
""" Return a decorator, that registers function as capability. Also, all specified exceptions are
caught and instead of them the :class:`.WClientCapabilityError` exception is raised
:param cap: target function capability (may be a str or :class:`.WNetworkClientCapabilities` class )
:param wrap_exceptions: exceptions to caught
:return: decorator
"""
if isinstance(cap, WNetworkClientCapabilities) is True:
cap = cap.value
elif isinstance(cap, str) is False:
raise TypeError('Invalid capability type')
def first_level_decorator(decorated_function):
def second_level_decorator(original_function, *args, **kwargs):
if len(wrap_exceptions) == 0:
return original_function(*args, **kwargs)
try:
return original_function(*args, **kwargs)
except wrap_exceptions as e:
raise WClientCapabilityError(
'Error during "%s" capability execution' % cap
) from e
result_fn = decorator(second_level_decorator)(decorated_function)
result_fn.__capability_name__ = cap
return result_fn
return first_level_decorator
|
Return a decorator, that registers function as capability. Also, all specified exceptions are
caught and instead of them the :class:`.WClientCapabilityError` exception is raised
:param cap: target function capability (may be a str or :class:`.WNetworkClientCapabilities` class )
:param wrap_exceptions: exceptions to caught
:return: decorator
|
def on_success(self, retval, task_id, args, kwargs):
"""on_success
http://docs.celeryproject.org/en/latest/reference/celery.app.task.html
:param retval: return value
:param task_id: celery task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
"""
log.info(("{} SUCCESS - retval={} task_id={} "
"args={} kwargs={}")
.format(
self.log_label,
retval,
task_id,
args,
kwargs))
|
on_success
http://docs.celeryproject.org/en/latest/reference/celery.app.task.html
:param retval: return value
:param task_id: celery task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
|
def complete_set_acls(self, cmd_param_text, full_cmd, *rest):
""" FIXME: complete inside a quoted param is broken """
possible_acl = [
"digest:",
"username_password:",
"world:anyone:c",
"world:anyone:cd",
"world:anyone:cdr",
"world:anyone:cdrw",
"world:anyone:cdrwa",
]
complete_acl = partial(complete_values, possible_acl)
completers = [self._complete_path, complete_acl, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest)
|
FIXME: complete inside a quoted param is broken
|
def get_config_node(self):
'''get_config_node
High-level api: get_config_node returns an Element node in the config
tree, which is corresponding to the URL in the Restconf GET reply.
Returns
-------
Element
A config node.
'''
default_ns = ''
config_node = etree.Element(config_tag, nsmap={'nc': nc_url})
for index, url_piece in enumerate(self._url_pieces):
if index == len(self._url_pieces)-1:
config_node_parent = self.copy(config_node)
node_name, values = self.parse_url_piece(url_piece)
default_ns, tag = self.convert_tag(default_ns, node_name,
src=Tag.JSON_NAME,
dst=Tag.LXML_ETREE)
config_node = self.subelement(config_node, tag, None)
schema_node = self.device.get_schema_node(config_node)
if schema_node.get('type') == 'leaf-list' and len(values) > 0:
model_name, text_value = self.get_name(values[0])
if model_name:
prefix = self._name_to_prefix[model_name]
config_node.text = '{}:{}'.format(prefix, text_value)
else:
config_node.text = text_value
elif schema_node.get('type') == 'list' and len(values) > 0:
key_tags = BaseCalculator._get_list_keys(schema_node)
for key_tag, value in zip(key_tags, values):
key = self.subelement(config_node, key_tag, value)
return config_node_parent, config_node
|
get_config_node
High-level api: get_config_node returns an Element node in the config
tree, which is corresponding to the URL in the Restconf GET reply.
Returns
-------
Element
A config node.
|
def _get_chat(self) -> Dict:
"""
As Telegram changes where the chat object is located in the response,
this method tries to be smart about finding it in the right place.
"""
if 'callback_query' in self._update:
query = self._update['callback_query']
if 'message' in query:
return query['message']['chat']
else:
return {'id': query['chat_instance']}
elif 'inline_query' in self._update:
return patch_dict(
self._update['inline_query']['from'],
is_inline_query=True,
)
elif 'message' in self._update:
return self._update['message']['chat']
|
As Telegram changes where the chat object is located in the response,
this method tries to be smart about finding it in the right place.
|
def _serialiseServices(self, jobStore, jobGraph, rootJobGraph):
"""
Serialises the services for a job.
"""
def processService(serviceJob, depth):
# Extend the depth of the services if necessary
if depth == len(jobGraph.services):
jobGraph.services.append([])
# Recursively call to process child services
for childServiceJob in serviceJob.service._childServices:
processService(childServiceJob, depth+1)
# Make a job wrapper
serviceJobGraph = serviceJob._createEmptyJobGraphForJob(jobStore, predecessorNumber=1)
# Create the start and terminate flags
serviceJobGraph.startJobStoreID = jobStore.getEmptyFileStoreID()
serviceJobGraph.terminateJobStoreID = jobStore.getEmptyFileStoreID()
serviceJobGraph.errorJobStoreID = jobStore.getEmptyFileStoreID()
assert jobStore.fileExists(serviceJobGraph.startJobStoreID)
assert jobStore.fileExists(serviceJobGraph.terminateJobStoreID)
assert jobStore.fileExists(serviceJobGraph.errorJobStoreID)
# Create the service job tuple
j = ServiceJobNode(jobStoreID=serviceJobGraph.jobStoreID,
memory=serviceJobGraph.memory, cores=serviceJobGraph.cores,
disk=serviceJobGraph.disk, preemptable=serviceJobGraph.preemptable,
startJobStoreID=serviceJobGraph.startJobStoreID,
terminateJobStoreID=serviceJobGraph.terminateJobStoreID,
errorJobStoreID=serviceJobGraph.errorJobStoreID,
jobName=serviceJobGraph.jobName, unitName=serviceJobGraph.unitName,
command=serviceJobGraph.command,
predecessorNumber=serviceJobGraph.predecessorNumber)
# Add the service job tuple to the list of services to run
jobGraph.services[depth].append(j)
# Break the links between the services to stop them being serialised together
#childServices = serviceJob.service._childServices
serviceJob.service._childServices = None
assert serviceJob._services == []
#service = serviceJob.service
# Pickle the job
serviceJob.pickledService = pickle.dumps(serviceJob.service, protocol=pickle.HIGHEST_PROTOCOL)
serviceJob.service = None
# Serialise the service job and job wrapper
serviceJob._serialiseJob(jobStore, { serviceJob:serviceJobGraph }, rootJobGraph)
# Restore values
#serviceJob.service = service
#serviceJob.service._childServices = childServices
for serviceJob in self._services:
processService(serviceJob, 0)
self._services = []
|
Serialises the services for a job.
|
def regressfile(filename):
"""
Run all stories in filename 'filename' in python 2 and 3.
Rewrite stories if appropriate.
"""
_storybook().in_filename(filename).with_params(
**{"python version": "2.7.14"}
).ordered_by_name().play()
_storybook().with_params(**{"python version": "3.7.0"}).in_filename(
filename
).ordered_by_name().play()
|
Run all stories in filename 'filename' in python 2 and 3.
Rewrite stories if appropriate.
|
def is_temple_project():
"""Raises `InvalidTempleProjectError` if repository is not a temple project"""
if not os.path.exists(temple.constants.TEMPLE_CONFIG_FILE):
msg = 'No {} file found in repository.'.format(temple.constants.TEMPLE_CONFIG_FILE)
raise temple.exceptions.InvalidTempleProjectError(msg)
|
Raises `InvalidTempleProjectError` if repository is not a temple project
|
def copy_module(target_path, my_directory_full_path, my_module):
'''
Helper function for copy_module_to_local(). Provides the actual copy
functionality, with highly cautious safeguards against copying over
important things.
Parameters
----------
target_path : string
String, file path to target location
my_directory_full_path: string
String, full pathname to this file's directory
my_module : string
String, name of the module to copy
Returns
-------
none
'''
if target_path == 'q' or target_path == 'Q':
print("Goodbye!")
return
elif target_path == os.path.expanduser("~") or os.path.normpath(target_path) == os.path.expanduser("~"):
print("You have indicated that the target location is "+target_path+" -- that is, you want to wipe out your home directory with the contents of "+my_module+". My programming does not allow me to do that.\n\nGoodbye!")
return
elif os.path.exists(target_path):
print("There is already a file or directory at the location "+target_path+". For safety reasons this code does not overwrite existing files.\nPlease remove the file at "+target_path+" and try again.")
return
else:
user_input = input("""You have indicated you want to copy module:\n """+ my_module
+ """\nto:\n """+ target_path +"""\nIs that correct? Please indicate: y / [n]\n\n""")
if user_input == 'y' or user_input == 'Y':
#print("copy_tree(",my_directory_full_path,",", target_path,")")
copy_tree(my_directory_full_path, target_path)
else:
print("Goodbye!")
return
|
Helper function for copy_module_to_local(). Provides the actual copy
functionality, with highly cautious safeguards against copying over
important things.
Parameters
----------
target_path : string
String, file path to target location
my_directory_full_path: string
String, full pathname to this file's directory
my_module : string
String, name of the module to copy
Returns
-------
none
|
def _fn_with_diet_vars(fn, args, params):
"""Call function with args; use diet variables according to params."""
vs_ctr = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Custom gradient function."""
del outputs # recomputing below
with common_layers.fn_device_dependency("diet_grad",
output_grads[0].device) as out_dep:
with tf.variable_scope(vs_ctr[0], reuse=True):
outputs = fn(*inputs)
variables = [common_layers.underlying_variable_ref(v) for v in variables]
dequantized_variables = [
params.dequantized[v.name][-1] for v in variables
]
grads = tf.gradients(outputs, inputs + dequantized_variables,
output_grads)
grad_inputs = grads[:len(inputs)]
grad_variables = grads[len(inputs):]
opt = _create_diet_optimizer(params)
# Apply grad_variables here
var_updates = []
for v, dv in zip(variables, grad_variables):
with tf.variable_scope(vs_ctr[0].name):
opt.create_slots(v)
update_op = opt.update_variable(v, dv)
var_updates.append(update_op)
with tf.control_dependencies(var_updates):
grad_inputs = [tf.identity(dx) for dx in grad_inputs]
out_dep.append(grad_inputs)
return grad_inputs, [None] * len(variables)
@common_layers.fn_with_custom_grad(grad_fn, use_global_vars=True)
def forward(*inputs):
with tf.variable_scope(
None, default_name="diet",
custom_getter=make_diet_var_getter(params)) as vs:
vs_ctr.append(vs)
outputs = fn(*inputs)
return outputs
with common_layers.fn_device_dependency("diet_forward",
args[0].device) as out_dep:
outputs = forward(*args)
out_dep.append(outputs)
return outputs
|
Call function with args; use diet variables according to params.
|
def create_chunked_body_end(trailers=None):
"""Create the ending that terminates a chunked body."""
chunk = []
chunk.append('0\r\n')
if trailers:
for name, value in trailers:
chunk.append(name)
chunk.append(': ')
chunk.append(value)
chunk.append('\r\n')
chunk.append('\r\n')
return s2b(''.join(chunk))
|
Create the ending that terminates a chunked body.
|
def sg_rnn_layer_func(func):
r"""Decorates function as sg_rnn_layer functions.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(tensor, **kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
tensor: automatically passed by decorator
kwargs:
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
ln: Boolean. If True, layer normalization is applied.
bias: Boolean. If True, biases are added. As a default, it is set to True
name: A name for the layer. As a default, the function name is assigned.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
summary: If True, summaries are added. The default is True.
"""
# kwargs parsing
opt = tf.sg_opt(kwargs) + sg_get_context()
# set default argument
try:
shape = tensor.get_shape().as_list()
# dropout off
opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1], dout=0, summary=True)
# disable bias when normalization on
opt += tf.sg_opt(bias=not opt.ln)
finally:
pass
# automatic layer naming
if opt.name is None:
# layer function name will be used as layer name
opt.name = func.__name__.replace('sg_', '')
# find existing layer names
exist_layers = []
for t in tf.global_variables():
scope_name = tf.get_variable_scope().name
prefix = scope_name + '/' if len(scope_name) > 0 else ''
i = t.name.rfind(prefix + opt.name)
if i >= 0:
exist_layers.append(t.name[i:].split('/')[-2])
exist_layers = list(set(exist_layers))
# layer name numbering
if len(exist_layers) == 0:
opt.name += '_1'
else:
opt.name += '_%d' % (max([int(n.split('_')[-1]) for n in exist_layers]) + 1)
with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:
# call layer function
out = func(tensor, opt)
# apply dropout
if opt.dout:
out = tf.cond(_phase,
lambda: tf.nn.dropout(out, 1 - opt.dout),
lambda: out)
# rename tensor
out = tf.identity(out, 'out')
# add final output summary
if opt.summary:
tf.sg_summary_activation(out)
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(),
prev=tensor, is_layer=True, name=opt.name)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper
|
r"""Decorates function as sg_rnn_layer functions.
Args:
func: function to decorate
|
def parse(self, text, html=True):
'''Parse the text and return a ParseResult instance.'''
self._urls = []
self._users = []
self._lists = []
self._tags = []
reply = REPLY_REGEX.match(text)
reply = reply.groups(0)[0] if reply is not None else None
parsed_html = self._html(text) if html else self._text(text)
return ParseResult(self._urls, self._users, reply,
self._lists, self._tags, parsed_html)
|
Parse the text and return a ParseResult instance.
|
def add(self, node):
"""Register a top level node (and its children) for syncing up to the server. There's no need to call this for nodes created by
:py:meth:`createNote` or :py:meth:`createList` as they are automatically added.
LoginException: If :py:meth:`login` has not been called.
Args:
node (gkeepapi.node.Node): The node to sync.
Raises:
Invalid: If the parent node is not found.
"""
if node.parent_id != _node.Root.ID:
raise exception.InvalidException('Not a top level node')
self._nodes[node.id] = node
self._nodes[node.parent_id].append(node, False)
|
Register a top level node (and its children) for syncing up to the server. There's no need to call this for nodes created by
:py:meth:`createNote` or :py:meth:`createList` as they are automatically added.
LoginException: If :py:meth:`login` has not been called.
Args:
node (gkeepapi.node.Node): The node to sync.
Raises:
Invalid: If the parent node is not found.
|
def read_config(self):
""" Read credentials from the config file """
with open(self.config_file) as cfg:
try:
self.config.read_file(cfg)
except AttributeError:
self.config.readfp(cfg)
self.client_id = self.config.get('exist', 'client_id')
self.client_secret = self.config.get('exist', 'client_secret')
self.access_token = self.config.get('exist', 'access_token')
|
Read credentials from the config file
|
def call_openssl(cmd, message, silent=False):
"""
call openssl
:param cmd: a string of command send to openssl
:param message: a string to print out if not silent
:param silent: a boolean for whether to suppress output from openssl
"""
if silent:
with open(os.devnull, 'w') as devnull:
return subprocess.check_call(cmd, shell=True, stdout=devnull,
stderr=subprocess.STDOUT)
else:
print message
return subprocess.check_call(cmd, shell=True)
|
call openssl
:param cmd: a string of command send to openssl
:param message: a string to print out if not silent
:param silent: a boolean for whether to suppress output from openssl
|
def custom_size(self, minimum: int = 40, maximum: int = 62) -> int:
"""Generate clothing size using custom format.
:param minimum: Minimum value.
:param maximum: Maximum value.
:return: Clothing size.
"""
return self.random.randint(minimum, maximum)
|
Generate clothing size using custom format.
:param minimum: Minimum value.
:param maximum: Maximum value.
:return: Clothing size.
|
def guard_handler(instance, transition_id):
"""Generic workflow guard handler that returns true if the transition_id
passed in can be performed to the instance passed in.
This function is called automatically by a Script (Python) located at
bika/lims/skins/guard_handler.py, which in turn is fired by Zope when an
expression like "python:here.guard_handler('<transition_id>')" is set to
any given guard (used by default in all bika's DC Workflow guards).
Walks through bika.lims.workflow.<obj_type>.guards and looks for a function
that matches with 'guard_<transition_id>'. If found, calls the function and
returns its value (true or false). If not found, returns True by default.
:param instance: the object for which the transition_id has to be evaluated
:param transition_id: the id of the transition
:type instance: ATContentType
:type transition_id: string
:return: true if the transition can be performed to the passed in instance
:rtype: bool
"""
if not instance:
return True
clazz_name = instance.portal_type
# Inspect if bika.lims.workflow.<clazzname>.<guards> module exists
wf_module = _load_wf_module('{0}.guards'.format(clazz_name.lower()))
if not wf_module:
return True
# Inspect if guard_<transition_id> function exists in the above module
key = 'guard_{0}'.format(transition_id)
guard = getattr(wf_module, key, False)
if not guard:
return True
#logger.info('{0}.guards.{1}'.format(clazz_name.lower(), key))
return guard(instance)
|
Generic workflow guard handler that returns true if the transition_id
passed in can be performed to the instance passed in.
This function is called automatically by a Script (Python) located at
bika/lims/skins/guard_handler.py, which in turn is fired by Zope when an
expression like "python:here.guard_handler('<transition_id>')" is set to
any given guard (used by default in all bika's DC Workflow guards).
Walks through bika.lims.workflow.<obj_type>.guards and looks for a function
that matches with 'guard_<transition_id>'. If found, calls the function and
returns its value (true or false). If not found, returns True by default.
:param instance: the object for which the transition_id has to be evaluated
:param transition_id: the id of the transition
:type instance: ATContentType
:type transition_id: string
:return: true if the transition can be performed to the passed in instance
:rtype: bool
|
def _maintain_parent(self, request, response):
"""
Maintain the parent ID in the querystring for response_add and
response_change.
"""
location = response._headers.get("location")
parent = request.GET.get("parent")
if parent and location and "?" not in location[1]:
url = "%s?parent=%s" % (location[1], parent)
return HttpResponseRedirect(url)
return response
|
Maintain the parent ID in the querystring for response_add and
response_change.
|
def unlock(self):
"""
Releases the lock.
"""
return self._encode_invoke(lock_unlock_codec, thread_id=thread_id(),
reference_id=self.reference_id_generator.get_and_increment())
|
Releases the lock.
|
def delete_data(self, url, *args, **kwargs):
"""Deletes data under provided url
Returns status as boolean.
Args:
**url**: address of file to be deleted
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Boolean. True if request was successful. False if not.
"""
res = self._conn.delete(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200 or res.status_code == 202:
return True
else:
return False
|
Deletes data under provided url
Returns status as boolean.
Args:
**url**: address of file to be deleted
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Boolean. True if request was successful. False if not.
|
def send(self, stanza, *, timeout=None, cb=None):
"""
Send a stanza.
:param stanza: Stanza to send
:type stanza: :class:`~.IQ`, :class:`~.Presence` or :class:`~.Message`
:param timeout: Maximum time in seconds to wait for an IQ response, or
:data:`None` to disable the timeout.
:type timeout: :class:`~numbers.Real` or :data:`None`
:param cb: Optional callback which is called synchronously when the
reply is received (IQ requests only!)
:raise OSError: if the underlying XML stream fails and stream
management is not disabled.
:raise aioxmpp.stream.DestructionRequested:
if the stream is closed while sending the stanza or waiting for a
response.
:raise aioxmpp.errors.XMPPError: if an error IQ response is received
:raise aioxmpp.errors.ErroneousStanza: if the IQ response could not be
parsed
:raise ValueError: if `cb` is given and `stanza` is not an IQ request.
:return: IQ response :attr:`~.IQ.payload` or :data:`None`
Send the stanza and wait for it to be sent. If the stanza is an IQ
request, the response is awaited and the :attr:`~.IQ.payload` of the
response is returned.
If the stream is currently not ready, this method blocks until the
stream is ready to send payload stanzas. Note that this may be before
initial presence has been sent. To synchronise with that type of
events, use the appropriate signals.
The `timeout` as well as any of the exception cases referring to a
"response" do not apply for IQ response stanzas, message stanzas or
presence stanzas sent with this method, as this method only waits for
a reply if an IQ *request* stanza is being sent.
If `stanza` is an IQ request and the response is not received within
`timeout` seconds, :class:`TimeoutError` (not
:class:`asyncio.TimeoutError`!) is raised.
If `cb` is given, `stanza` must be an IQ request (otherwise,
:class:`ValueError` is raised before the stanza is sent). It must be a
callable returning an awaitable. It receives the response stanza as
first and only argument. The returned awaitable is awaited by
:meth:`send` and the result is returned instead of the original
payload. `cb` is called synchronously from the stream handling loop
when the response is received, so it can benefit from the strong
ordering guarantees given by XMPP XML Streams.
The `cb` may also return :data:`None`, in which case :meth:`send` will
simply return the IQ payload as if `cb` was not given. Since the return
value of coroutine functions is awaitable, it is valid and supported to
pass a coroutine function as `cb`.
.. warning::
Remember that it is an implementation detail of the event loop when
a coroutine is scheduled after it awaited an awaitable; this
implies that if the caller of :meth:`send` is merely awaiting the
:meth:`send` coroutine, the strong ordering guarantees of XMPP XML
Streams are lost.
To regain those, use the `cb` argument.
.. note::
For the sake of readability, unless you really need the strong
ordering guarantees, avoid the use of the `cb` argument. Avoid
using a coroutine function unless you really need to.
.. versionchanged:: 0.10
* This method now waits until the stream is ready to send stanza¸
payloads.
* This method was moved from
:meth:`aioxmpp.stream.StanzaStream.send`.
.. versionchanged:: 0.9
The `cb` argument was added.
.. versionadded:: 0.8
"""
if not self.running:
raise ConnectionError("client is not running")
if not self.established:
self.logger.debug("send(%s): stream not established, waiting",
stanza)
# wait for the stream to be established
stopped_fut = self.on_stopped.future()
failure_fut = self.on_failure.future()
established_fut = asyncio.ensure_future(
self.established_event.wait()
)
done, pending = yield from asyncio.wait(
[
established_fut,
failure_fut,
stopped_fut,
],
return_when=asyncio.FIRST_COMPLETED,
)
if not established_fut.done():
established_fut.cancel()
if failure_fut.done():
if not stopped_fut.done():
stopped_fut.cancel()
failure_fut.exception()
raise ConnectionError("client failed to connect")
if stopped_fut.done():
raise ConnectionError("client shut down by user request")
self.logger.debug("send(%s): stream established, sending")
return (yield from self.stream._send_immediately(stanza,
timeout=timeout,
cb=cb))
|
Send a stanza.
:param stanza: Stanza to send
:type stanza: :class:`~.IQ`, :class:`~.Presence` or :class:`~.Message`
:param timeout: Maximum time in seconds to wait for an IQ response, or
:data:`None` to disable the timeout.
:type timeout: :class:`~numbers.Real` or :data:`None`
:param cb: Optional callback which is called synchronously when the
reply is received (IQ requests only!)
:raise OSError: if the underlying XML stream fails and stream
management is not disabled.
:raise aioxmpp.stream.DestructionRequested:
if the stream is closed while sending the stanza or waiting for a
response.
:raise aioxmpp.errors.XMPPError: if an error IQ response is received
:raise aioxmpp.errors.ErroneousStanza: if the IQ response could not be
parsed
:raise ValueError: if `cb` is given and `stanza` is not an IQ request.
:return: IQ response :attr:`~.IQ.payload` or :data:`None`
Send the stanza and wait for it to be sent. If the stanza is an IQ
request, the response is awaited and the :attr:`~.IQ.payload` of the
response is returned.
If the stream is currently not ready, this method blocks until the
stream is ready to send payload stanzas. Note that this may be before
initial presence has been sent. To synchronise with that type of
events, use the appropriate signals.
The `timeout` as well as any of the exception cases referring to a
"response" do not apply for IQ response stanzas, message stanzas or
presence stanzas sent with this method, as this method only waits for
a reply if an IQ *request* stanza is being sent.
If `stanza` is an IQ request and the response is not received within
`timeout` seconds, :class:`TimeoutError` (not
:class:`asyncio.TimeoutError`!) is raised.
If `cb` is given, `stanza` must be an IQ request (otherwise,
:class:`ValueError` is raised before the stanza is sent). It must be a
callable returning an awaitable. It receives the response stanza as
first and only argument. The returned awaitable is awaited by
:meth:`send` and the result is returned instead of the original
payload. `cb` is called synchronously from the stream handling loop
when the response is received, so it can benefit from the strong
ordering guarantees given by XMPP XML Streams.
The `cb` may also return :data:`None`, in which case :meth:`send` will
simply return the IQ payload as if `cb` was not given. Since the return
value of coroutine functions is awaitable, it is valid and supported to
pass a coroutine function as `cb`.
.. warning::
Remember that it is an implementation detail of the event loop when
a coroutine is scheduled after it awaited an awaitable; this
implies that if the caller of :meth:`send` is merely awaiting the
:meth:`send` coroutine, the strong ordering guarantees of XMPP XML
Streams are lost.
To regain those, use the `cb` argument.
.. note::
For the sake of readability, unless you really need the strong
ordering guarantees, avoid the use of the `cb` argument. Avoid
using a coroutine function unless you really need to.
.. versionchanged:: 0.10
* This method now waits until the stream is ready to send stanza¸
payloads.
* This method was moved from
:meth:`aioxmpp.stream.StanzaStream.send`.
.. versionchanged:: 0.9
The `cb` argument was added.
.. versionadded:: 0.8
|
def generate_VJ_junction_transfer_matrices(self):
"""Compute the transfer matrices for the VJ junction.
Sets the attributes Tvj, Svj, Dvj, lTvj, and lDvj.
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#Compute Tvj
Tvj = {}
for aa in self.codons_dict.keys():
current_Tvj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Tvj[nt2num[codon[2]], nt2num[init_nt]] += self.Rvj[nt2num[codon[2]],nt2num[codon[1]]]*self.Rvj[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvj[nt2num[codon[0]],nt2num[init_nt]]
Tvj[aa] = current_Tvj
#Compute Svj
Svj = {}
for aa in self.codons_dict.keys():
current_Svj = np.zeros((4, 4))
for ins_nt in 'ACGT':
if any([codon.startswith(ins_nt) for codon in self.codons_dict[aa]]):
current_Svj[nt2num[ins_nt], :] = self.Rvj[nt2num[ins_nt], :]
Svj[aa] = current_Svj
#Compute Dvj
Dvj = {}
for aa in self.codons_dict.keys():
current_Dvj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Dvj[nt2num[codon[2]], nt2num[init_nt]] += self.Rvj[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvj[nt2num[codon[0]],nt2num[init_nt]]
Dvj[aa] = current_Dvj
#Compute lTvj
lTvj = {}
for aa in self.codons_dict.keys():
current_lTvj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lTvj[nt2num[codon[2]], nt2num[codon[0]]] += self.Rvj[nt2num[codon[2]],nt2num[codon[1]]]*self.first_nt_bias_insVJ[nt2num[codon[1]]]
lTvj[aa] = current_lTvj
#Compute lDvj
lDvj = {}
for aa in self.codons_dict.keys():
current_lDvj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lDvj[nt2num[codon[2]], nt2num[codon[0]]] += self.first_nt_bias_insVJ[nt2num[codon[1]]]
lDvj[aa] = current_lDvj
#Set the attributes
self.Tvj = Tvj
self.Svj = Svj
self.Dvj = Dvj
self.lTvj = lTvj
self.lDvj = lDvj
|
Compute the transfer matrices for the VJ junction.
Sets the attributes Tvj, Svj, Dvj, lTvj, and lDvj.
|
def create_graph(grid):
"""
This function creates a graph of vertices and edges from segments returned by SLIC.
:param array grid: A grid of segments as returned by the slic function defined in skimage library
:return: A graph as [vertices, edges]
"""
try:
import numpy as np
except ImportError:
print(
"NumPY is not installed. segraph needs NumPY to function. Please use 'pip install numpy' to install numpy.")
exit(0)
print("Creating a graph using segmented grid..")
# get an array of unique labels
try:
vertices = np.unique(grid)
# get number of vertices
num_vertices = len(vertices)
# map these unique labels to [1,...,N], where N is the number of labels (vertices)
mapping = dict(zip(vertices, np.arange(num_vertices)))
mapped_grid = np.array([mapping[x] for x in grid.flat]).reshape(grid.shape)
# create edges, going left to right and top to bottom
l2r = np.c_[mapped_grid[:, :-1].ravel(), mapped_grid[:, 1:].ravel()]
t2b = np.c_[mapped_grid[:-1, :].ravel(), mapped_grid[1:, :].ravel()]
# stack for entire graph
edges = np.vstack([l2r, t2b])
edges = edges[edges[:, 0] != edges[:, 1], :]
edges = np.sort(edges, axis=1)
# create a edge map, a hashmap
edge_map = edges[:, 0] + num_vertices * edges[:, 1]
# filter unique connections as edges
edges = np.unique(edge_map)
# reverse map and form edges as pairs
edges = [[vertices[edge % num_vertices],
vertices[edge // num_vertices]] for edge in edges]
except:
print("Invalid argument supplied !")
return None
return vertices, edges
|
This function creates a graph of vertices and edges from segments returned by SLIC.
:param array grid: A grid of segments as returned by the slic function defined in skimage library
:return: A graph as [vertices, edges]
|
def decorate_event_js(js_code):
"""setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
"""
def add_annotation(method):
setattr(method, "__is_event", True )
setattr(method, "_js_code", js_code )
return method
return add_annotation
|
setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
|
def getWorkersName(data):
"""Returns the list of the names of the workers sorted alphabetically"""
names = [fichier for fichier in data.keys()]
names.sort()
try:
names.remove("broker")
except ValueError:
pass
return names
|
Returns the list of the names of the workers sorted alphabetically
|
def validate(schema, data, owner=None):
"""Validate input data with input schema.
:param Schema schema: schema able to validate input data.
:param data: data to validate.
:param Schema owner: input schema parent schema.
:raises: Exception if the data is not validated.
"""
schema._validate(data=data, owner=owner)
|
Validate input data with input schema.
:param Schema schema: schema able to validate input data.
:param data: data to validate.
:param Schema owner: input schema parent schema.
:raises: Exception if the data is not validated.
|
def solve_resolve(expr, vars):
"""Use IStructured.resolve to get member (rhs) from the object (lhs).
This operation supports both scalars and repeated values on the LHS -
resolving from a repeated value implies a map-like operation and returns a
new repeated values.
"""
objs, _ = __solve_for_repeated(expr.lhs, vars)
member = solve(expr.rhs, vars).value
try:
results = [structured.resolve(o, member)
for o in repeated.getvalues(objs)]
except (KeyError, AttributeError):
# Raise a better exception for the non-existent member.
raise errors.EfilterKeyError(root=expr.rhs, key=member,
query=expr.source)
except (TypeError, ValueError):
# Is this a null object error?
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Cannot resolve member %r from a null." % member)
else:
raise
except NotImplementedError:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Cannot resolve members from a non-structured value.")
return Result(repeated.meld(*results), ())
|
Use IStructured.resolve to get member (rhs) from the object (lhs).
This operation supports both scalars and repeated values on the LHS -
resolving from a repeated value implies a map-like operation and returns a
new repeated values.
|
def set_title(self, title, subtitle=''):
"""Set the title and the subtitle of the suite."""
self.title = title
self.subtitle = subtitle
|
Set the title and the subtitle of the suite.
|
def pole_error(ax, fit, **kwargs):
"""
Plot the error to the pole to a plane on a `mplstereonet`
axis object.
"""
ell = normal_errors(fit.axes, fit.covariance_matrix)
lonlat = -N.array(ell)
n = len(lonlat)
codes = [Path.MOVETO]
codes += [Path.LINETO]*(n-1)
vertices = list(lonlat)
plot_patch(ax, vertices, codes, **kwargs)
|
Plot the error to the pole to a plane on a `mplstereonet`
axis object.
|
def stop(self):
"""Stop the name server.
"""
self.listener.setsockopt(LINGER, 1)
self.loop = False
with nslock:
self.listener.close()
|
Stop the name server.
|
def pyeapi_nxos_api_args(**prev_kwargs):
'''
.. versionadded:: 2019.2.0
Return the key-value arguments used for the authentication arguments for the
:mod:`pyeapi execution module <salt.module.arista_pyeapi>`.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_nxos_api_args
'''
kwargs = {}
napalm_opts = salt.utils.napalm.get_device_opts(__opts__, salt_obj=__salt__)
optional_args = napalm_opts['OPTIONAL_ARGS']
kwargs['host'] = napalm_opts['HOSTNAME']
kwargs['username'] = napalm_opts['USERNAME']
kwargs['password'] = napalm_opts['PASSWORD']
kwargs['timeout'] = napalm_opts['TIMEOUT']
kwargs['transport'] = optional_args.get('transport')
kwargs['port'] = optional_args.get('port')
kwargs['verify'] = optional_args.get('verify')
prev_kwargs.update(kwargs)
return prev_kwargs
|
.. versionadded:: 2019.2.0
Return the key-value arguments used for the authentication arguments for the
:mod:`pyeapi execution module <salt.module.arista_pyeapi>`.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_nxos_api_args
|
def constcase(text, acronyms=None):
"""Return text in CONST_CASE style (aka SCREAMING_SNAKE_CASE).
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> constcase("hello world")
'HELLO_WORLD'
>>> constcase("helloHTMLWorld", True, ["HTML"])
'HELLO_HTML_WORLD'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms)
return '_'.join([w.upper() for w in words])
|
Return text in CONST_CASE style (aka SCREAMING_SNAKE_CASE).
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> constcase("hello world")
'HELLO_WORLD'
>>> constcase("helloHTMLWorld", True, ["HTML"])
'HELLO_HTML_WORLD'
|
def to_string_with_default(value, default_value):
"""
Converts value into string or returns default when value is None.
:param value: the value to convert.
:param default_value: the default value.
:return: string value or default when value is null.
"""
result = StringConverter.to_nullable_string(value)
return result if result != None else default_value
|
Converts value into string or returns default when value is None.
:param value: the value to convert.
:param default_value: the default value.
:return: string value or default when value is null.
|
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
|
Select the best ScriptWriter suitable for Windows
|
def _inject_specs(self, specs):
"""Injects targets into the graph for the given `Specs` object.
Yields the resulting addresses.
"""
if not specs:
return
logger.debug('Injecting specs to %s: %s', self, specs)
with self._resolve_context():
thts, = self._scheduler.product_request(TransitiveHydratedTargets,
[specs])
self._index(thts.closure)
for hydrated_target in thts.roots:
yield hydrated_target.address
|
Injects targets into the graph for the given `Specs` object.
Yields the resulting addresses.
|
def multivariate_normality(X, alpha=.05):
"""Henze-Zirkler multivariate normality test.
Parameters
----------
X : np.array
Data matrix of shape (n_samples, n_features).
alpha : float
Significance level.
Returns
-------
normal : boolean
True if X comes from a multivariate normal distribution.
p : float
P-value.
See Also
--------
normality : Test the univariate normality of one or more variables.
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Henze-Zirkler test has a good overall power against alternatives
to normality and is feasable for any dimension and any sample size.
Adapted to Python from a Matlab code by Antonio Trujillo-Ortiz and
tested against the R package MVN.
Rows with missing values are automatically removed using the
:py:func:`remove_na` function.
References
----------
.. [1] Henze, N., & Zirkler, B. (1990). A class of invariant consistent
tests for multivariate normality. Communications in Statistics-Theory
and Methods, 19(10), 3595-3617.
.. [2] Trujillo-Ortiz, A., R. Hernandez-Walls, K. Barba-Rojo and L.
Cupul-Magana. (2007). HZmvntest: Henze-Zirkler's Multivariate
Normality Test. A MATLAB file.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('multivariate')
>>> X = data[['Fever', 'Pressure', 'Aches']]
>>> normal, p = pg.multivariate_normality(X, alpha=.05)
>>> print(normal, round(p, 3))
True 0.717
"""
from scipy.stats import lognorm
# Check input and remove missing values
X = np.asarray(X)
assert X.ndim == 2, 'X must be of shape (n_samples, n_features).'
X = X[~np.isnan(X).any(axis=1)]
n, p = X.shape
assert n >= 3, 'X must have at least 3 rows.'
assert p >= 2, 'X must have at least two columns.'
# Covariance matrix
S = np.cov(X, rowvar=False, bias=True)
S_inv = np.linalg.pinv(S)
difT = X - X.mean(0)
# Squared-Mahalanobis distances
Dj = np.diag(np.linalg.multi_dot([difT, S_inv, difT.T]))
Y = np.linalg.multi_dot([X, S_inv, X.T])
Djk = -2 * Y.T + np.repeat(np.diag(Y.T), n).reshape(n, -1) + \
np.tile(np.diag(Y.T), (n, 1))
# Smoothing parameter
b = 1 / (np.sqrt(2)) * ((2 * p + 1) / 4)**(1 / (p + 4)) * \
(n**(1 / (p + 4)))
if np.linalg.matrix_rank(S) == p:
hz = n * (1 / (n**2) * np.sum(np.sum(np.exp(-(b**2) / 2 * Djk))) - 2
* ((1 + (b**2))**(-p / 2)) * (1 / n)
* (np.sum(np.exp(-((b**2) / (2 * (1 + (b**2)))) * Dj)))
+ ((1 + (2 * (b**2)))**(-p / 2)))
else:
hz = n * 4
wb = (1 + b**2) * (1 + 3 * b**2)
a = 1 + 2 * b**2
# Mean and variance
mu = 1 - a**(-p / 2) * (1 + p * b**2 / a + (p * (p + 2)
* (b**4)) / (2 * a**2))
si2 = 2 * (1 + 4 * b**2)**(-p / 2) + 2 * a**(-p) * \
(1 + (2 * p * b**4) / a**2 + (3 * p * (p + 2) * b**8) / (4 * a**4)) \
- 4 * wb**(-p / 2) * (1 + (3 * p * b**4) / (2 * wb)
+ (p * (p + 2) * b**8) / (2 * wb**2))
# Lognormal mean and variance
pmu = np.log(np.sqrt(mu**4 / (si2 + mu**2)))
psi = np.sqrt(np.log((si2 + mu**2) / mu**2))
# P-value
pval = lognorm.sf(hz, psi, scale=np.exp(pmu))
normal = True if pval > alpha else False
return normal, pval
|
Henze-Zirkler multivariate normality test.
Parameters
----------
X : np.array
Data matrix of shape (n_samples, n_features).
alpha : float
Significance level.
Returns
-------
normal : boolean
True if X comes from a multivariate normal distribution.
p : float
P-value.
See Also
--------
normality : Test the univariate normality of one or more variables.
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Henze-Zirkler test has a good overall power against alternatives
to normality and is feasable for any dimension and any sample size.
Adapted to Python from a Matlab code by Antonio Trujillo-Ortiz and
tested against the R package MVN.
Rows with missing values are automatically removed using the
:py:func:`remove_na` function.
References
----------
.. [1] Henze, N., & Zirkler, B. (1990). A class of invariant consistent
tests for multivariate normality. Communications in Statistics-Theory
and Methods, 19(10), 3595-3617.
.. [2] Trujillo-Ortiz, A., R. Hernandez-Walls, K. Barba-Rojo and L.
Cupul-Magana. (2007). HZmvntest: Henze-Zirkler's Multivariate
Normality Test. A MATLAB file.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('multivariate')
>>> X = data[['Fever', 'Pressure', 'Aches']]
>>> normal, p = pg.multivariate_normality(X, alpha=.05)
>>> print(normal, round(p, 3))
True 0.717
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.