code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def safe_print(ustring, errors='replace', **kwargs):
""" Safely print a unicode string """
encoding = sys.stdout.encoding or 'utf-8'
if sys.version_info[0] == 3:
print(ustring, **kwargs)
else:
bytestr = ustring.encode(encoding, errors=errors)
print(bytestr, **kwargs)
|
Safely print a unicode string
|
def summarize(self, **kwargs):
"""
Return pandas DataFrame with the most important results stored in the timers.
"""
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].values.argmin()
ref_wtime = frame.ix[i]["wall_time"]
ref_ncpus = frame.ix[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
|
Return pandas DataFrame with the most important results stored in the timers.
|
def cholesky(A, sparse=True, verbose=True):
"""
Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings
"""
if SKSPIMPORT:
A = sp.sparse.csc_matrix(A)
try:
F = spcholesky(A)
# permutation matrix P
P = sp.sparse.lil_matrix(A.shape)
p = F.P()
P[np.arange(len(p)), p] = 1
# permute
L = F.L()
L = P.T.dot(L)
except CholmodNotPositiveDefiniteError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return L.T # upper triangular factorization
return L.T.A # upper triangular factorization
else:
msg = 'Could not import Scikit-Sparse or Suite-Sparse.\n'\
'This will slow down optimization for models with '\
'monotonicity/convexity penalties and many splines.\n'\
'See installation instructions for installing '\
'Scikit-Sparse and Suite-Sparse via Conda.'
if verbose:
warnings.warn(msg)
if sp.sparse.issparse(A):
A = A.A
try:
L = sp.linalg.cholesky(A, lower=False)
except LinAlgError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return sp.sparse.csc_matrix(L)
return L
|
Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings
|
def invalidate_cache(self, obj=None, queryset=None,
extra=None, force_all=False):
"""
Method that should be called by all tiggers to invalidate the
cache for an item(s).
Should be overriden by inheriting classes to customize behavior.
"""
if self.cache_manager:
if queryset != None:
force_all = True
self.cache_manager.invalidate_cache(self.model, instance=obj,
extra=extra,
force_all=force_all)
|
Method that should be called by all tiggers to invalidate the
cache for an item(s).
Should be overriden by inheriting classes to customize behavior.
|
def solveConsRepAgent(solution_next,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid):
'''
Solve one period of the simple representative agent consumption-saving model.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration).
'''
# Unpack next period's solution and the income distribution
vPfuncNext = solution_next.vPfunc
ShkPrbsNext = IncomeDstn[0]
PermShkValsNext = IncomeDstn[1]
TranShkValsNext = IncomeDstn[2]
# Make tiled versions of end-of-period assets, shocks, and probabilities
aNrmNow = aXtraGrid
aNrmCount = aNrmNow.size
ShkCount = ShkPrbsNext.size
aNrm_tiled = np.tile(np.reshape(aNrmNow,(aNrmCount,1)),(1,ShkCount))
# Tile arrays of the income shocks and put them into useful shapes
PermShkVals_tiled = np.tile(np.reshape(PermShkValsNext,(1,ShkCount)),(aNrmCount,1))
TranShkVals_tiled = np.tile(np.reshape(TranShkValsNext,(1,ShkCount)),(aNrmCount,1))
ShkPrbs_tiled = np.tile(np.reshape(ShkPrbsNext,(1,ShkCount)),(aNrmCount,1))
# Calculate next period's capital-to-permanent-labor ratio under each combination
# of end-of-period assets and shock realization
kNrmNext = aNrm_tiled/(PermGroFac*PermShkVals_tiled)
# Calculate next period's market resources
KtoLnext = kNrmNext/TranShkVals_tiled
RfreeNext = 1. - DeprFac + CapShare*KtoLnext**(CapShare-1.)
wRteNext = (1.-CapShare)*KtoLnext**CapShare
mNrmNext = RfreeNext*kNrmNext + wRteNext*TranShkVals_tiled
# Calculate end-of-period marginal value of assets for the RA
vPnext = vPfuncNext(mNrmNext)
EndOfPrdvP = DiscFac*np.sum(RfreeNext*(PermGroFac*PermShkVals_tiled)**(-CRRA)*vPnext*ShkPrbs_tiled,axis=1)
# Invert the first order condition to get consumption, then find endogenous gridpoints
cNrmNow = EndOfPrdvP**(-1./CRRA)
mNrmNow = aNrmNow + cNrmNow
# Construct the consumption function and the marginal value function
cFuncNow = LinearInterp(np.insert(mNrmNow,0,0.0),np.insert(cNrmNow,0,0.0))
vPfuncNow = MargValueFunc(cFuncNow,CRRA)
# Construct and return the solution for this period
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow)
return solution_now
|
Solve one period of the simple representative agent consumption-saving model.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration).
|
def get_by(self, field, value):
"""
Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to
filter the list of resources returned.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: List of firmware baseline resources.
"""
firmwares = self.get_all()
matches = []
for item in firmwares:
if item.get(field) == value:
matches.append(item)
return matches
|
Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to
filter the list of resources returned.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: List of firmware baseline resources.
|
def _buildTraitCovar(self, trait_covar_type='freeform', rank=1, fixed_trait_covar=None, jitter=1e-4):
"""
Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix
"""
assert trait_covar_type in ['freeform', 'diag', 'lowrank', 'lowrank_id', 'lowrank_diag', 'block', 'block_id', 'block_diag', 'fixed'], 'VarianceDecomposition:: trait_covar_type not valid'
if trait_covar_type=='freeform':
cov = FreeFormCov(self.P, jitter=jitter)
elif trait_covar_type=='fixed':
assert fixed_trait_covar is not None, 'VarianceDecomposition:: set fixed_trait_covar'
assert fixed_trait_covar.shape[0]==self.P, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
assert fixed_trait_covar.shape[1]==self.P, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
cov = FixedCov(fixed_trait_covar)
elif trait_covar_type=='diag':
cov = DiagonalCov(self.P)
elif trait_covar_type=='lowrank':
cov = LowRankCov(self.P, rank=rank)
elif trait_covar_type=='lowrank_id':
cov = SumCov(LowRankCov(self.P, rank=rank), FixedCov(sp.eye(self.P)))
elif trait_covar_type=='lowrank_diag':
cov = SumCov(LowRankCov(self.P, rank=rank), DiagonalCov(self.P))
elif trait_covar_type=='block':
cov = FixedCov(sp.ones([self.P, self.P]))
elif trait_covar_type=='block_id':
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
elif trait_covar_type=='block_diag':
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
return cov
|
Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix
|
def normalize_val(val):
"""Normalize JSON/YAML derived values as they pertain
to Vault resources and comparison operations """
if is_unicode(val) and val.isdigit():
return int(val)
elif isinstance(val, list):
return ','.join(val)
elif val is None:
return ''
return val
|
Normalize JSON/YAML derived values as they pertain
to Vault resources and comparison operations
|
def message_user(self, username, domain, subject, message):
"""Currently use send_message_chat and discard subject, because headline messages are not
stored by mod_offline."""
kwargs = {
'body': message,
'from': domain,
'to': '%s@%s' % (username, domain),
}
if self.api_version <= (14, 7):
# TODO: it's unclear when send_message was introduced
command = 'send_message_chat'
else:
command = 'send_message'
kwargs['subject'] = subject
kwargs['type'] = 'normal'
result = self.rpc(command, **kwargs)
if result['res'] == 0:
return
else:
raise BackendError(result.get('text', 'Unknown Error'))
|
Currently use send_message_chat and discard subject, because headline messages are not
stored by mod_offline.
|
def text_list_to_colors(names):
'''
Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors.
'''
# STEP A: compute strings distance between all combnations of strings
Dnames = np.zeros( (len(names), len(names)) )
for i in range(len(names)):
for j in range(len(names)):
Dnames[i,j] = 1 - 2.0 * levenshtein(names[i], names[j]) / float(len(names[i]+names[j]))
# STEP B: pca dimanesionality reduction to a single-dimension (from the distance space)
pca = sklearn.decomposition.PCA(n_components = 1)
pca.fit(Dnames)
# STEP C: mapping of 1-dimensional values to colors in a jet-colormap
textToColor = pca.transform(Dnames)
textToColor = 255 * (textToColor - textToColor.min()) / (textToColor.max() - textToColor.min())
textmaps = generateColorMap();
colors = [textmaps[int(c)] for c in textToColor]
return colors
|
Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors.
|
def remove_task_db(self, fs_id):
'''ๅฐไปปๅกไปๆฐๆฎๅบไธญๅ ้ค'''
sql = 'DELETE FROM tasks WHERE fsid=?'
self.cursor.execute(sql, [fs_id, ])
self.check_commit()
|
ๅฐไปปๅกไปๆฐๆฎๅบไธญๅ ้ค
|
def eqy(ql, qs, ns=None,):
"""
*New in pywbem 0.12*
This function is a wrapper for :meth:`~pywbem.WBEMConnection.ExecQuery`.
Execute a query in a namespace.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
Returns:
A list of :class:`~pywbem.CIMInstance` objects that represents
the query result.
These instances have their `path` attribute set to identify
their creation class and the target namespace of the query, but
they are not addressable instances.
""" # noqa: E501
return CONN.ExecQuery(QueryLanguage=ql,
Query=qs,
namespace=ns)
|
*New in pywbem 0.12*
This function is a wrapper for :meth:`~pywbem.WBEMConnection.ExecQuery`.
Execute a query in a namespace.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
Returns:
A list of :class:`~pywbem.CIMInstance` objects that represents
the query result.
These instances have their `path` attribute set to identify
their creation class and the target namespace of the query, but
they are not addressable instances.
|
def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, target):
"""Shape N,num_inducing,num_inducing,Ntheta"""
self._psi_computations(Z, mu, S)
d_var = 2.*self._psi2 / self.variance
# d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom)
d_length = -2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] * self.inv_lengthscale2) / (self.inv_lengthscale * self._psi2_denom)
target[0] += np.sum(dL_dpsi2 * d_var)
dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None]
if not self.ARD:
target[1] += dpsi2_dlength.sum() # *(-self.lengthscale2)
else:
target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0)
|
Shape N,num_inducing,num_inducing,Ntheta
|
def set_pixel_spacing(hdr, spacing):
r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`."""
warnings.warn('get_pixel_spacing() is depreciated, use set_voxel_spacing() instead', category=DeprecationWarning)
set_voxel_spacing(hdr, spacing)
|
r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`.
|
def function(self):
"""
The function passed to the `fit_function` specified in `scipy_data_fitting.Fit.options`,
and used by `scipy_data_fitting.Fit.pointspace` to generate plots, etc.
Its number of arguments and their order is determined by items 1, 2, and 3
as listed in `scipy_data_fitting.Fit.all_variables`.
All parameter values will be multiplied by their corresponding prefix before being passed to this function.
By default, it is a functional form of `scipy_data_fitting.Fit.expression` converted
using `scipy_data_fitting.Model.lambdify`.
See also `scipy_data_fitting.Fit.lambdify_options`.
"""
if not hasattr(self,'_function'):
function = self.model.lambdify(self.expression, self.all_variables, **self.lambdify_options)
self._function = lambda *x: function(*(x + self.fixed_values))
return self._function
|
The function passed to the `fit_function` specified in `scipy_data_fitting.Fit.options`,
and used by `scipy_data_fitting.Fit.pointspace` to generate plots, etc.
Its number of arguments and their order is determined by items 1, 2, and 3
as listed in `scipy_data_fitting.Fit.all_variables`.
All parameter values will be multiplied by their corresponding prefix before being passed to this function.
By default, it is a functional form of `scipy_data_fitting.Fit.expression` converted
using `scipy_data_fitting.Model.lambdify`.
See also `scipy_data_fitting.Fit.lambdify_options`.
|
def _remove_hlink(self):
"""
Remove the a:hlinkClick or a:hlinkHover element, including dropping
any relationship it might have.
"""
hlink = self._hlink
if hlink is None:
return
rId = hlink.rId
if rId:
self.part.drop_rel(rId)
self._element.remove(hlink)
|
Remove the a:hlinkClick or a:hlinkHover element, including dropping
any relationship it might have.
|
def is_none(entity, prop, name):
"bool: True if the value of a property is None."
return is_not_empty(entity, prop, name) and getattr(entity, name) is None
|
bool: True if the value of a property is None.
|
def compute_column_width_and_height(self):
'''
compute and set the column width for all colls in the table
'''
# skip tables with no row
if not self.rows:
return
# determine row height
for row in self.rows:
max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1
for cell in row.columns:
cell.height = max_row_height
# determine maximum number of columns
max_columns = max([len(row.columns) for row in self.rows])
for column_idx in range(max_columns):
# determine max_column_width
row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows]
max_column_width = max((len(line) for line in chain(*row_cell_lines)))
# set column width in all rows
for row in self.rows:
if len(row.columns) > column_idx:
row.columns[column_idx].width = max_column_width
|
compute and set the column width for all colls in the table
|
def justify(clr, argd):
""" Justify str/Colr based on user args. """
methodmap = {
'--ljust': clr.ljust,
'--rjust': clr.rjust,
'--center': clr.center,
}
for flag in methodmap:
if argd[flag]:
if argd[flag] in ('0', '-'):
val = get_terminal_size(default=(80, 35))[0]
else:
val = try_int(argd[flag], minimum=None)
if val < 0:
# Negative value, subtract from terminal width.
val = get_terminal_size(default=(80, 35))[0] + val
return methodmap[flag](val)
# No justify args given.
return clr
|
Justify str/Colr based on user args.
|
def build_html():
"""Build the html, to be served by IndexHandler"""
source = AjaxDataSource(data_url='./data',
polling_interval=INTERVAL,
method='GET')
# OHLC plot
p = figure(plot_height=400,
title='OHLC',
sizing_mode='scale_width',
tools="xpan,xwheel_zoom,xbox_zoom,reset",
x_axis_type=None,
y_axis_location="right",
y_axis_label="Price ($)")
p.x_range.follow = "end"
p.x_range.follow_interval = 100
p.x_range.range_padding = 0
p.line(x='time', y='average', alpha=0.25, line_width=3, color='black',
source=source)
p.line(x='time', y='ma', alpha=0.8, line_width=2, color='steelblue',
source=source)
p.segment(x0='time', y0='low', x1='time', y1='high', line_width=2,
color='black', source=source)
p.segment(x0='time', y0='open', x1='time', y1='close', line_width=8,
color='color', source=source, alpha=0.8)
# MACD plot
p2 = figure(plot_height=200,
title='MACD',
sizing_mode='scale_width',
x_range=p.x_range,
x_axis_label='Time (s)',
tools="xpan,xwheel_zoom,xbox_zoom,reset",
y_axis_location="right")
p2.line(x='time', y='macd', color='darkred', line_width=2, source=source)
p2.line(x='time', y='macd9', color='navy', line_width=2, source=source)
p2.segment(x0='time', y0=0, x1='time', y1='macdh', line_width=6, color='steelblue',
alpha=0.5, source=source)
# Combine plots together
plot = gridplot([[p], [p2]], toolbar_location="left", plot_width=1000)
# Compose html from plots and template
script, div = components(plot, theme=theme)
html = template.render(resources=CDN.render(), script=script, div=div)
return html
|
Build the html, to be served by IndexHandler
|
def main(self, function):
"""
Decorator to define the main function of the experiment.
The main function of an experiment is the default command that is being
run when no command is specified, or when calling the run() method.
Usually it is more convenient to use ``automain`` instead.
"""
captured = self.command(function)
self.default_command = captured.__name__
return captured
|
Decorator to define the main function of the experiment.
The main function of an experiment is the default command that is being
run when no command is specified, or when calling the run() method.
Usually it is more convenient to use ``automain`` instead.
|
def retrieve_import_alias_mapping(names_list):
"""Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call"""
import_alias_names = dict()
for alias in names_list:
if alias.asname:
import_alias_names[alias.asname] = alias.name
return import_alias_names
|
Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call
|
def primary_keys_full(cls):
"""Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy
"""
mapper = cls.__mapper__
return [
mapper.get_property_by_column(column)
for column in mapper.primary_key
]
|
Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy
|
def extract_all(zipfile, dest_folder):
"""
reads the zip file, determines compression
and unzips recursively until source files
are extracted
"""
z = ZipFile(zipfile)
print(z)
z.extract(dest_folder)
|
reads the zip file, determines compression
and unzips recursively until source files
are extracted
|
def rename_tier(self, id_from, id_to):
"""Rename a tier. Note that this renames also the child tiers that have
the tier as a parent.
:param str id_from: Original name of the tier.
:param str id_to: Target name of the tier.
:throws KeyError: If the tier doesnt' exist.
"""
childs = self.get_child_tiers_for(id_from)
self.tiers[id_to] = self.tiers.pop(id_from)
self.tiers[id_to][2]['TIER_ID'] = id_to
for child in childs:
self.tiers[child][2]['PARENT_REF'] = id_to
|
Rename a tier. Note that this renames also the child tiers that have
the tier as a parent.
:param str id_from: Original name of the tier.
:param str id_to: Target name of the tier.
:throws KeyError: If the tier doesnt' exist.
|
def update_iscsi_settings(self, iscsi_data):
"""Update iscsi data
:param data: default iscsi config data
"""
self._conn.patch(self.path, data=iscsi_data)
|
Update iscsi data
:param data: default iscsi config data
|
def groups_kick(self, room_id, user_id, **kwargs):
"""Removes a user from the private group."""
return self.__call_api_post('groups.kick', roomId=room_id, userId=user_id, kwargs=kwargs)
|
Removes a user from the private group.
|
def mac_address_table_aging_time_conversational_time_out(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table")
aging_time = ET.SubElement(mac_address_table, "aging-time")
conversational_time_out = ET.SubElement(aging_time, "conversational-time-out")
conversational_time_out.text = kwargs.pop('conversational_time_out')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get(self, request, *args, **kwargs):
"""Handler for HTTP GET requests."""
try:
context = self.get_context_data(**kwargs)
except exceptions.NotAvailable:
exceptions.handle(request)
self.set_workflow_step_errors(context)
return self.render_to_response(context)
|
Handler for HTTP GET requests.
|
def instruction_list_to_easm(instruction_list: list) -> str:
"""Convert a list of instructions into an easm op code string.
:param instruction_list:
:return:
"""
result = ""
for instruction in instruction_list:
result += "{} {}".format(instruction["address"], instruction["opcode"])
if "argument" in instruction:
result += " " + instruction["argument"]
result += "\n"
return result
|
Convert a list of instructions into an easm op code string.
:param instruction_list:
:return:
|
def schedule_to_array(schedule, events, slots):
"""Convert a schedule from schedule to array form
Parameters
----------
schedule : list or tuple
of instances of :py:class:`resources.ScheduledItem`
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
"""
array = np.zeros((len(events), len(slots)), dtype=np.int8)
for item in schedule:
array[events.index(item.event), slots.index(item.slot)] = 1
return array
|
Convert a schedule from schedule to array form
Parameters
----------
schedule : list or tuple
of instances of :py:class:`resources.ScheduledItem`
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
|
def calculate_integral(self, T1, T2):
r'''Method to compute the enthalpy integral of heat capacity from
`T1` to `T2`. Analytically integrates across the piecewise spline
as necessary.
Parameters
----------
T1 : float
Initial temperature, [K]
T2 : float
Final temperature, [K]
Returns
-------
dS : float
Enthalpy difference between `T1` and `T2`, [J/mol/K]
'''
# Simplify the problem so we can assume T2 >= T1
if T2 < T1:
flipped = True
T1, T2 = T2, T1
else:
flipped = False
# Fastest case - only one coefficient set, occurs surprisingly often
if self.n == 1:
dH = (Zabransky_cubic_integral(T2, *self.coeff_sets[0])
- Zabransky_cubic_integral(T1, *self.coeff_sets[0]))
else:
ind_T1, ind_T2 = self._coeff_ind_from_T(T1), self._coeff_ind_from_T(T2)
# Second fastest case - both are in the same coefficient set
if ind_T1 == ind_T2:
dH = (Zabransky_cubic_integral(T2, *self.coeff_sets[ind_T2])
- Zabransky_cubic_integral(T1, *self.coeff_sets[ind_T1]))
# Fo through the loop if we need to - inevitably slow
else:
dH = (Zabransky_cubic_integral(self.Ts[ind_T1], *self.coeff_sets[ind_T1])
- Zabransky_cubic_integral(T1, *self.coeff_sets[ind_T1]))
for i in range(ind_T1, ind_T2):
diff =(Zabransky_cubic_integral(self.Ts[i+1], *self.coeff_sets[i])
- Zabransky_cubic_integral(self.Ts[i], *self.coeff_sets[i]))
dH += diff
end = (Zabransky_cubic_integral(T2, *self.coeff_sets[ind_T2])
- Zabransky_cubic_integral(self.Ts[ind_T2], *self.coeff_sets[ind_T2]))
dH += end
return -dH if flipped else dH
|
r'''Method to compute the enthalpy integral of heat capacity from
`T1` to `T2`. Analytically integrates across the piecewise spline
as necessary.
Parameters
----------
T1 : float
Initial temperature, [K]
T2 : float
Final temperature, [K]
Returns
-------
dS : float
Enthalpy difference between `T1` and `T2`, [J/mol/K]
|
def file_data(self):
"""Return Group file (only supported for Document and Report)."""
return {
'fileContent': self._file_content,
'fileName': self._group_data.get('fileName'),
'type': self._group_data.get('type'),
}
|
Return Group file (only supported for Document and Report).
|
def bool(cls, must=None, should=None, must_not=None, minimum_number_should_match=None, boost=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html
A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are:
'must' - The clause(query) must appear in matching documents.
'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter.
'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s).
'minimum_number_should_match' - Minimum number of documents that should match
'boost' - boost value
> term = ElasticQuery()
> term.term(user='kimchy')
> query = ElasticQuery()
> query.bool(should=term)
> query.query()
{ 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}}
'''
instance = cls(bool={})
if must is not None:
instance['bool']['must'] = must
if should is not None:
instance['bool']['should'] = should
if must_not is not None:
instance['bool']['must_not'] = must_not
if minimum_number_should_match is not None:
instance['bool']['minimum_number_should_match'] = minimum_number_should_match
if boost is not None:
instance['bool']['boost'] = boost
return instance
|
http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html
A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are:
'must' - The clause(query) must appear in matching documents.
'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter.
'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s).
'minimum_number_should_match' - Minimum number of documents that should match
'boost' - boost value
> term = ElasticQuery()
> term.term(user='kimchy')
> query = ElasticQuery()
> query.bool(should=term)
> query.query()
{ 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}}
|
def configure_profile(msg_type, profile_name, data, auth):
"""
Create the profile entry.
Args:
:msg_type: (str) message type to create config entry.
:profile_name: (str) name of the profile entry
:data: (dict) dict values for the 'settings'
:auth: (dict) auth parameters
"""
with jsonconfig.Config("messages", indent=4) as cfg:
write_data(msg_type, profile_name, data, cfg)
write_auth(msg_type, profile_name, auth, cfg)
print("[+] Configuration entry for <" + profile_name + "> created.")
print("[+] Configuration file location: " + cfg.filename)
|
Create the profile entry.
Args:
:msg_type: (str) message type to create config entry.
:profile_name: (str) name of the profile entry
:data: (dict) dict values for the 'settings'
:auth: (dict) auth parameters
|
def base64url_decode(input):
"""Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
"""
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
|
Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
|
def delete(self, uid):
"""Example DELETE method.
"""
try:
record = resource_db[uid].copy()
except KeyError:
return self.response_factory.not_found(errors=['Resource with UID {} does not exist!'])
del resource_db[uid]
return self.response_factory.ok(data=record)
|
Example DELETE method.
|
def load(controller=None, filename="", name=None, rsrc=None):
"Create the GUI objects defined in the resource (filename or python struct)"
# if no filename is given, search for the rsrc.py with the same module name:
if not filename and not rsrc:
if isinstance(controller, types.ClassType):
# use the controller class module (to get __file__ for rsrc.py)
mod_dict = util.get_class_module_dict(controller)
elif isinstance(controller, types.ModuleType):
# use the module provided as controller
mod_dict = controller.__dict__
elif isinstance(controller, Controller):
# use the instance provided as controller
mod_dict = util.get_class_module_dict(controller)
else:
# use the caller module (no controller explicitelly provided)
mod_dict = util.get_caller_module_dict()
# do not use as controller if it was explicitly False or empty
if controller is None:
controller = mod_dict
if util.main_is_frozen():
# running standalone
if '__file__' in mod_dict:
filename = os.path.split(mod_dict['__file__'])[1]
else:
# __main__ has not __file__ under py2exe!
filename = os.path.split(sys.argv[0])[-1]
filename = os.path.join(util.get_app_dir(), filename)
else:
# figure out the .rsrc.py filename based on the module name
filename = mod_dict['__file__']
# chop the .pyc or .pyo from the end
base, ext = os.path.splitext(filename)
filename = base + ".rsrc.py"
# when rsrc is a file name, open, read and eval it:
if isinstance(filename, basestring):
rsrc = parse(filename)
ret = []
# search over the resource to create the requested object (or all)
for win in rsrc:
if not name or win['name'] == name:
ret.append(build_window(win))
# associate event handlers
if ret and controller:
connect(ret[0], controller)
# return the first instance created (if any):
return ret[0]
else:
# return all the instances created -for the designer- (if any):
return ret
|
Create the GUI objects defined in the resource (filename or python struct)
|
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None):
"""
Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/])
"""
file_name = f.split('/')[-1].split('.')[0]
if tag != '':
tag = '_' + tag
if suffix:
file_name, _ = drop_bids_suffix(file_name)
save_name = file_name + tag
save_name += '_' + suffix
else:
save_name = file_name + tag
paths_post_pipeline = f.split(self.pipeline)
if self.pipeline_subdir:
paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[
0]
else:
paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0]
base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \
teneto.__version__ + '/' + paths_post_pipeline + '/'
save_dir = base_dir + '/' + save_directory + '/'
if not os.path.exists(save_dir):
# A case has happened where this has been done in parallel and an error was raised. So do try/except
try:
os.makedirs(save_dir)
except:
# Wait 2 seconds so that the error does not try and save something in the directory before it is created
time.sleep(2)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'):
try:
with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs:
json.dump(self.tenetoinfo, fs)
except:
# Same as above, just in case parallel does duplicaiton
time.sleep(2)
return save_name, save_dir, base_dir
|
Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/])
|
def _get_config_type(cla55: type) -> Optional[str]:
"""
Find the name (if any) that a subclass was registered under.
We do this simply by iterating through the registry until we
find it.
"""
# Special handling for pytorch RNN types:
if cla55 == torch.nn.RNN:
return "rnn"
elif cla55 == torch.nn.LSTM:
return "lstm"
elif cla55 == torch.nn.GRU:
return "gru"
for subclass_dict in Registrable._registry.values():
for name, subclass in subclass_dict.items():
if subclass == cla55:
return name
# Special handling for initializer functions
if hasattr(subclass, '_initializer_wrapper'):
sif = subclass()._init_function
if sif == cla55:
return sif.__name__.rstrip("_")
return None
|
Find the name (if any) that a subclass was registered under.
We do this simply by iterating through the registry until we
find it.
|
def _set_policy(self, v, load=False):
"""
Setter method for policy, mapped from YANG variable /rbridge_id/maps/policy (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_policy() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("policyname",policy.policy, yang_name="policy", rest_name="policy", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='policyname', extensions={u'tailf-common': {u'info': u'Configure Policy', u'callpoint': u'MapsPolicy'}}), is_container='list', yang_name="policy", rest_name="policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Policy', u'callpoint': u'MapsPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """policy must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("policyname",policy.policy, yang_name="policy", rest_name="policy", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='policyname', extensions={u'tailf-common': {u'info': u'Configure Policy', u'callpoint': u'MapsPolicy'}}), is_container='list', yang_name="policy", rest_name="policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Policy', u'callpoint': u'MapsPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)""",
})
self.__policy = t
if hasattr(self, '_set'):
self._set()
|
Setter method for policy, mapped from YANG variable /rbridge_id/maps/policy (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_policy() directly.
|
def _get_benchmark_handler(self, last_trade, freq='minutely'):
'''
Setup a custom benchmark handler or let zipline manage it
'''
return LiveBenchmark(
last_trade, frequency=freq).surcharge_market_data \
if utils.is_live(last_trade) else None
|
Setup a custom benchmark handler or let zipline manage it
|
def browseprofile(profilelog):
'''
Browse interactively a profile log in console
'''
print('Starting the pstats profile browser...\n')
try:
browser = ProfileBrowser(profilelog)
print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
|
Browse interactively a profile log in console
|
def _init_metadata(self):
"""stub"""
self._choices_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choices'),
'element_label': 'Choices',
'instructions': 'Enter as many choices as you wish',
'required': True,
'read_only': False,
'linked': False,
'array': True,
'default_object_values': [''],
'syntax': 'OBJECT',
'object_set': []
}
self._choice_name_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'question_string'),
'element_label': 'choice name',
'instructions': 'enter a short label for this choice',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_string_values': [''],
'syntax': 'STRING',
'minimum_string_length': 0,
'maximum_string_length': 1024,
'string_set': []
}
self._multi_answer_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'multi_answer'),
'element_label': 'Is Multi-Answer',
'instructions': 'accepts a boolean (True/False) value',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_boolean_values': ['False'],
'syntax': 'BOOLEAN',
'id_set': []
}
|
stub
|
def fmt_text(text, bg = None, fg = None, attr = None, plain = False):
"""
Apply given console formating around given text.
"""
if not plain:
if fg is not None:
text = TEXT_FORMATING['fg'][fg] + text
if bg is not None:
text = TEXT_FORMATING['bg'][bg] + text
if attr is not None:
text = TEXT_FORMATING['attr'][attr] + text
if (fg is not None) or (bg is not None) or (attr is not None):
text += TEXT_FORMATING['rst']
return text
|
Apply given console formating around given text.
|
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
|
Finds the outermost context.
|
async def get(self, request):
"""Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated.
"""
ticket = await self.get_ticket(request)
if ticket is None:
return None
try:
# Returns a tuple of (user_id, token, userdata, validuntil)
now = time.time()
fields = self._ticket.validate(ticket, self._get_ip(request), now)
# Check if we need to reissue a ticket
if (self._reissue_time is not None and
now >= (fields.valid_until - self._reissue_time)):
# Reissue our ticket, and save it in our request.
request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)
return fields.user_id
except TicketError as e:
return None
|
Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated.
|
def spare_disk(self, disk_xml=None):
""" Number of spare disk per type.
For example: storage.ontap.filer201.disk.SATA
"""
spare_disk = {}
disk_types = set()
for filer_disk in disk_xml:
disk_types.add(filer_disk.find('effective-disk-type').text)
if not filer_disk.find('raid-state').text == 'spare':
continue
disk_type = filer_disk.find('effective-disk-type').text
if disk_type in spare_disk:
spare_disk[disk_type] += 1
else:
spare_disk[disk_type] = 1
for disk_type in disk_types:
if disk_type in spare_disk:
self.push('spare_' + disk_type, 'disk', spare_disk[disk_type])
else:
self.push('spare_' + disk_type, 'disk', 0)
|
Number of spare disk per type.
For example: storage.ontap.filer201.disk.SATA
|
def get_resource(self, path):
"""Getting the required information from the API."""
response = self._http_request(path)
try:
return response.json()
except ValueError:
raise exception.ServiceException("Invalid service response.")
|
Getting the required information from the API.
|
def scan_full(self, regex, return_string=True, advance_pointer=True):
"""
Match from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched.
>>> s = Scanner("test string")
>>> s.scan_full(r' ')
>>> s.scan_full(r'test ')
'test '
>>> s.pos
5
>>> s.scan_full(r'stri', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.scan_full(r'stri', return_string=False, advance_pointer=False)
4
>>> s.pos
5
"""
regex = get_regex(regex)
self.match = regex.match(self.string, self.pos)
if not self.match:
return
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.match.group(0)
return len(self.match.group(0))
|
Match from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched.
>>> s = Scanner("test string")
>>> s.scan_full(r' ')
>>> s.scan_full(r'test ')
'test '
>>> s.pos
5
>>> s.scan_full(r'stri', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.scan_full(r'stri', return_string=False, advance_pointer=False)
4
>>> s.pos
5
|
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
"""Get the tick locations and labels for an axis of a Lambert Conformal projection."""
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
|
Get the tick locations and labels for an axis of a Lambert Conformal projection.
|
def rpc_reply(id: Union[str, int], result: Optional[object],
warnings: Optional[List[Warning]] = None) -> rpcq.messages.RPCReply:
"""
Create RPC reply
:param str|int id: Request ID
:param result: Result
:param warnings: List of warnings to attach to the message
:return: JSON RPC formatted dict
"""
warnings = warnings or []
return rpcq.messages.RPCReply(
jsonrpc='2.0',
id=id,
result=result,
warnings=[rpc_warning(warning) for warning in warnings]
)
|
Create RPC reply
:param str|int id: Request ID
:param result: Result
:param warnings: List of warnings to attach to the message
:return: JSON RPC formatted dict
|
def _combine_qc_samples(samples):
"""Combine split QC analyses into single samples based on BAM files.
"""
by_bam = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in samples]:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data)
out = []
for data_group in by_bam.values():
data = data_group[0]
alg_qc = []
qc = {}
metrics = {}
for d in data_group:
qc.update(dd.get_summary_qc(d))
metrics.update(dd.get_summary_metrics(d))
alg_qc.extend(dd.get_algorithm_qc(d))
data["config"]["algorithm"]["qc"] = alg_qc
data["summary"]["qc"] = qc
data["summary"]["metrics"] = metrics
out.append([data])
return out
|
Combine split QC analyses into single samples based on BAM files.
|
def fetch_from(self, year: int, month: int):
"""Fetch data from year, month to current year month data"""
self.raw_data = []
self.data = []
today = datetime.datetime.today()
for year, month in self._month_year_iter(month, year, today.month, today.year):
self.raw_data.append(self.fetcher.fetch(year, month, self.sid))
self.data.extend(self.raw_data[-1]['data'])
return self.data
|
Fetch data from year, month to current year month data
|
def read_until_eof(self) -> bool:
"""Consume all the stream. Same as EOF in BNF."""
if self.read_eof():
return True
# TODO: read ALL
self._stream.save_context()
while not self.read_eof():
self._stream.incpos()
return self._stream.validate_context()
|
Consume all the stream. Same as EOF in BNF.
|
def _get_9q_square_qvm(name: str, noisy: bool,
connection: ForestConnection = None,
qvm_type: str = 'qvm') -> QuantumComputer:
"""
A nine-qubit 3x3 square lattice.
This uses a "generic" lattice not tied to any specific device. 9 qubits is large enough
to do vaguely interesting algorithms and small enough to simulate quickly.
:param name: The name of this QVM
:param connection: The connection to use to talk to external services
:param noisy: Whether to construct a noisy quantum computer
:param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.
:return: A pre-configured QuantumComputer
"""
topology = nx.convert_node_labels_to_integers(nx.grid_2d_graph(3, 3))
return _get_qvm_with_topology(name=name, connection=connection,
topology=topology,
noisy=noisy,
requires_executable=True,
qvm_type=qvm_type)
|
A nine-qubit 3x3 square lattice.
This uses a "generic" lattice not tied to any specific device. 9 qubits is large enough
to do vaguely interesting algorithms and small enough to simulate quickly.
:param name: The name of this QVM
:param connection: The connection to use to talk to external services
:param noisy: Whether to construct a noisy quantum computer
:param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.
:return: A pre-configured QuantumComputer
|
def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2):
'''A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined).
'''
atom_name_map = {
'CA' : ' CA ',
'C' : ' C ',
'N' : ' N ',
'O' : ' O ',
}
assert(start_atom.residue.chain == end_atom.residue.chain)
chain_id = start_atom.residue.chain
# Initialize steps
num_new_atoms = float(len(new_atoms))
X, Y, Z = start_atom.x, start_atom.y, start_atom.z
x_step = (end_atom.x - X) / (num_new_atoms + 1.0)
y_step = (end_atom.y - Y) / (num_new_atoms + 1.0)
z_step = (end_atom.z - Z) / (num_new_atoms + 1.0)
D = math.sqrt(x_step * x_step + y_step * y_step + z_step * z_step)
jitter = 0
if jitterbug:
jitter = (((x_step + y_step + z_step) / 3.0) * jitterbug) / D
new_lines = []
next_serial_number = max(sorted(self.atoms.keys())) + 1
round = 0
for new_atom in new_atoms:
X, Y, Z = X + x_step, Y + y_step, Z + z_step
if jitter:
if round % 3 == 0:
X, Y = X + jitter, Y - jitter
elif round % 3 == 1:
Y, Z = Y + jitter, Z - jitter
elif round % 3 == 2:
Z, X = Z + jitter, X - jitter
round += 1
residue_id, residue_type, atom_name = new_atom
assert(len(residue_type) == 3)
assert(len(residue_id) == 6)
new_lines.append('ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '.format(str(next_serial_number).rjust(5), atom_name_map[atom_name], residue_type, residue_id, X, Y, Z))
next_serial_number += 1
new_pdb = []
in_start_residue = False
for l in self.indexed_lines:
if l[0] and l[3].serial_number == start_atom.serial_number:
in_start_residue = True
if in_start_residue and l[3].serial_number != start_atom.serial_number:
new_pdb.extend(new_lines)
#colortext.warning('\n'.join(new_lines))
in_start_residue = False
if l[0]:
#print(l[2])
new_pdb.append(l[2])
else:
#print(l[1])
new_pdb.append(l[1])
return '\n'.join(new_pdb)
|
A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined).
|
def _inplace_subset_var(self, index):
"""Inplace subsetting along variables dimension.
Same as ``adata = adata[:, index]``, but inplace.
"""
adata_subset = self[:, index].copy()
self._init_as_actual(adata_subset, dtype=self._X.dtype)
|
Inplace subsetting along variables dimension.
Same as ``adata = adata[:, index]``, but inplace.
|
def add_result(self, result):
"""
Adds the result of a completed job to the result list, then decrements
the active job count. If the job set is already complete, the result is
simply discarded instead.
"""
if self._active_jobs == 0:
return
self._results.add(result)
self._active_jobs -= 1
if self._active_jobs == 0:
self._done()
|
Adds the result of a completed job to the result list, then decrements
the active job count. If the job set is already complete, the result is
simply discarded instead.
|
def get_gene_modification_language(identifier_qualified: ParserElement) -> ParserElement:
"""Build a gene modification parser."""
gmod_identifier = MatchFirst([
identifier_qualified,
gmod_default_ns,
])
return gmod_tag + nest(
Group(gmod_identifier)(IDENTIFIER)
)
|
Build a gene modification parser.
|
def add_ordered_combo_item(
combo, text, data=None, count_selected_features=None, icon=None):
"""Add a combo item ensuring that all items are listed alphabetically.
Although QComboBox allows you to set an InsertAlphabetically enum
this only has effect when a user interactively adds combo items to
an editable combo. This we have this little function to ensure that
combos are always sorted alphabetically.
:param combo: Combo box receiving the new item.
:type combo: QComboBox
:param text: Display text for the combo.
:type text: str
:param data: Optional UserRole data to be associated with the item.
:type data: QVariant, str
:param count_selected_features: A count to display if the layer has some
selected features. Default to None, nothing will be displayed.
:type count_selected_features: None, int
:param icon: Icon to display in the combobox.
:type icon: QIcon
"""
if count_selected_features is not None:
text += ' (' + tr('{count} selected features').format(
count=count_selected_features) + ')'
size = combo.count()
for combo_index in range(0, size):
item_text = combo.itemText(combo_index)
# see if text alphabetically precedes item_text
if cmp(text.lower(), item_text.lower()) < 0:
if icon:
combo.insertItem(combo_index, icon, text, data)
else:
combo.insertItem(combo_index, text, data)
return
# otherwise just add it to the end
if icon:
combo.insertItem(size, icon, text, data)
else:
combo.insertItem(size, text, data)
|
Add a combo item ensuring that all items are listed alphabetically.
Although QComboBox allows you to set an InsertAlphabetically enum
this only has effect when a user interactively adds combo items to
an editable combo. This we have this little function to ensure that
combos are always sorted alphabetically.
:param combo: Combo box receiving the new item.
:type combo: QComboBox
:param text: Display text for the combo.
:type text: str
:param data: Optional UserRole data to be associated with the item.
:type data: QVariant, str
:param count_selected_features: A count to display if the layer has some
selected features. Default to None, nothing will be displayed.
:type count_selected_features: None, int
:param icon: Icon to display in the combobox.
:type icon: QIcon
|
def parse_eprocess(self, eprocess_data):
"""Parse the EProcess object we get from some rekall output"""
Name = eprocess_data['_EPROCESS']['Cybox']['Name']
PID = eprocess_data['_EPROCESS']['Cybox']['PID']
PPID = eprocess_data['_EPROCESS']['Cybox']['Parent_PID']
return {'Name': Name, 'PID': PID, 'PPID': PPID}
|
Parse the EProcess object we get from some rekall output
|
def multilingual(request):
"""
Returns context variables containing information about available languages.
"""
codes = sorted(get_language_code_list())
return {'LANGUAGE_CODES': codes,
'LANGUAGE_CODES_AND_NAMES': [(c, LANG_DICT.get(c, c)) for c in codes],
'DEFAULT_LANGUAGE_CODE': get_default_language_code(),
'ADMIN_MEDIA_URL': settings.ADMIN_MEDIA_PREFIX}
|
Returns context variables containing information about available languages.
|
def config(data_folder=settings.data_folder,
logs_folder=settings.logs_folder,
imgs_folder=settings.imgs_folder,
cache_folder=settings.cache_folder,
use_cache=settings.use_cache,
log_file=settings.log_file,
log_console=settings.log_console,
log_level=settings.log_level,
log_name=settings.log_name,
log_filename=settings.log_filename,
useful_tags_node=settings.useful_tags_node,
useful_tags_path=settings.useful_tags_path,
osm_xml_node_attrs=settings.osm_xml_node_attrs,
osm_xml_node_tags=settings.osm_xml_node_tags,
osm_xml_way_attrs=settings.osm_xml_way_attrs,
osm_xml_way_tags=settings.osm_xml_way_tags,
default_access=settings.default_access,
default_crs=settings.default_crs,
default_user_agent=settings.default_user_agent,
default_referer=settings.default_referer,
default_accept_language=settings.default_accept_language):
"""
Configure osmnx by setting the default global vars to desired values.
Parameters
---------
data_folder : string
where to save and load data files
logs_folder : string
where to write the log files
imgs_folder : string
where to save figures
cache_folder : string
where to save the http response cache
use_cache : bool
if True, use a local cache to save/retrieve http responses instead of
calling API repetitively for the same request URL
log_file : bool
if true, save log output to a log file in logs_folder
log_console : bool
if true, print log output to the console
log_level : int
one of the logger.level constants
log_name : string
name of the logger
useful_tags_node : list
a list of useful OSM tags to attempt to save from node elements
useful_tags_path : list
a list of useful OSM tags to attempt to save from path elements
default_access : string
default filter for OSM "access" key
default_crs : string
default CRS to set when creating graphs
default_user_agent : string
HTTP header user-agent
default_referer : string
HTTP header referer
default_accept_language : string
HTTP header accept-language
Returns
-------
None
"""
# set each global variable to the passed-in parameter value
settings.use_cache = use_cache
settings.cache_folder = cache_folder
settings.data_folder = data_folder
settings.imgs_folder = imgs_folder
settings.logs_folder = logs_folder
settings.log_console = log_console
settings.log_file = log_file
settings.log_level = log_level
settings.log_name = log_name
settings.log_filename = log_filename
settings.useful_tags_node = useful_tags_node
settings.useful_tags_path = useful_tags_path
settings.useful_tags_node = list(set(
useful_tags_node + osm_xml_node_attrs + osm_xml_node_tags))
settings.useful_tags_path = list(set(
useful_tags_path + osm_xml_way_attrs + osm_xml_way_tags))
settings.osm_xml_node_attrs = osm_xml_node_attrs
settings.osm_xml_node_tags = osm_xml_node_tags
settings.osm_xml_way_attrs = osm_xml_way_attrs
settings.osm_xml_way_tags = osm_xml_way_tags
settings.default_access = default_access
settings.default_crs = default_crs
settings.default_user_agent = default_user_agent
settings.default_referer = default_referer
settings.default_accept_language = default_accept_language
# if logging is turned on, log that we are configured
if settings.log_file or settings.log_console:
log('Configured osmnx')
|
Configure osmnx by setting the default global vars to desired values.
Parameters
---------
data_folder : string
where to save and load data files
logs_folder : string
where to write the log files
imgs_folder : string
where to save figures
cache_folder : string
where to save the http response cache
use_cache : bool
if True, use a local cache to save/retrieve http responses instead of
calling API repetitively for the same request URL
log_file : bool
if true, save log output to a log file in logs_folder
log_console : bool
if true, print log output to the console
log_level : int
one of the logger.level constants
log_name : string
name of the logger
useful_tags_node : list
a list of useful OSM tags to attempt to save from node elements
useful_tags_path : list
a list of useful OSM tags to attempt to save from path elements
default_access : string
default filter for OSM "access" key
default_crs : string
default CRS to set when creating graphs
default_user_agent : string
HTTP header user-agent
default_referer : string
HTTP header referer
default_accept_language : string
HTTP header accept-language
Returns
-------
None
|
def _create_user(
self, username, email, short_name, full_name,
institute, password, is_admin, **extra_fields):
"""Creates a new active person. """
# Create Person
person = self.model(
username=username, email=email,
short_name=short_name, full_name=full_name,
is_admin=is_admin,
institute=institute,
**extra_fields
)
person.set_password(password)
person.save()
return person
|
Creates a new active person.
|
def _iterparse(xmlfile):
"""
Avoid bug in python 3.{2,3}. See http://bugs.python.org/issue9257.
:param xmlfile: XML file or file-like object
"""
try:
return ET.iterparse(xmlfile, events=("start-ns", ))
except TypeError:
return ET.iterparse(xmlfile, events=(b"start-ns", ))
|
Avoid bug in python 3.{2,3}. See http://bugs.python.org/issue9257.
:param xmlfile: XML file or file-like object
|
def graph_to_laplacian(G, normalized=True):
"""
Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None
"""
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass
|
Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None
|
def _state_invalid(self):
"""
If the state is invalid for the transition, return details on what didn't match
:return: Tuple of (state manager, current state, label for current state)
"""
for statemanager, conditions in self.statetransition.transitions.items():
current_state = getattr(self.obj, statemanager.propname)
if conditions['from'] is None:
state_valid = True
else:
mstate = conditions['from'].get(current_state)
state_valid = mstate and mstate(self.obj)
if state_valid and conditions['if']:
state_valid = all(v(self.obj) for v in conditions['if'])
if not state_valid:
return statemanager, current_state, statemanager.lenum.get(current_state)
|
If the state is invalid for the transition, return details on what didn't match
:return: Tuple of (state manager, current state, label for current state)
|
def _find_scc(self):
"""
Set ``self._num_scc`` and ``self._scc_proj``
by calling ``scipy.sparse.csgraph.connected_components``:
* docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
* github.com/scipy/scipy/blob/master/scipy/sparse/csgraph/_traversal.pyx
``self._scc_proj`` is a list of length `n` that assigns to each node
the label of the strongly connected component to which it belongs.
"""
# Find the strongly connected components
self._num_scc, self._scc_proj = \
csgraph.connected_components(self.csgraph, connection='strong')
|
Set ``self._num_scc`` and ``self._scc_proj``
by calling ``scipy.sparse.csgraph.connected_components``:
* docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
* github.com/scipy/scipy/blob/master/scipy/sparse/csgraph/_traversal.pyx
``self._scc_proj`` is a list of length `n` that assigns to each node
the label of the strongly connected component to which it belongs.
|
def reset(cls):
"""Resets the static state. Should only be called by tests."""
cls.stats = StatContainer()
cls.parentMap = {}
cls.containerMap = {}
cls.subId = 0
for stat in gc.get_objects():
if isinstance(stat, Stat):
stat._aggregators = {}
|
Resets the static state. Should only be called by tests.
|
def off_datastream(self, datastream):
"""
To turn off datastream
:param datastream: string
"""
url = '/datastream/' + str(datastream) + '/off'
response = self.http.post(url,"")
return response
|
To turn off datastream
:param datastream: string
|
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*")
|
Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
|
def load(text, match=None):
"""This function reads a string that contains the XML of an Atom Feed, then
returns the
data in a native Python structure (a ``dict`` or ``list``). If you also
provide a tag name or path to match, only the matching sub-elements are
loaded.
:param text: The XML text to load.
:type text: ``string``
:param match: A tag name or path to match (optional).
:type match: ``string``
"""
if text is None: return None
text = text.strip()
if len(text) == 0: return None
nametable = {
'namespaces': [],
'names': {}
}
# Convert to unicode encoding in only python 2 for xml parser
if(sys.version_info < (3, 0, 0) and isinstance(text, unicode)):
text = text.encode('utf-8')
root = XML(text)
items = [root] if match is None else root.findall(match)
count = len(items)
if count == 0:
return None
elif count == 1:
return load_root(items[0], nametable)
else:
return [load_root(item, nametable) for item in items]
|
This function reads a string that contains the XML of an Atom Feed, then
returns the
data in a native Python structure (a ``dict`` or ``list``). If you also
provide a tag name or path to match, only the matching sub-elements are
loaded.
:param text: The XML text to load.
:type text: ``string``
:param match: A tag name or path to match (optional).
:type match: ``string``
|
def add_attribute(self, ont_id: str, ctrl_acct: Account, attributes: Attribute, payer: Account, gas_limit: int,
gas_price: int) -> str:
"""
This interface is used to send a Transaction object which is used to add attribute.
:param ont_id: OntId.
:param ctrl_acct: an Account object which indicate who will sign for the transaction.
:param attributes: a list of attributes we want to add.
:param payer: an Account object which indicate who will pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: a hexadecimal transaction hash value.
"""
if not isinstance(ctrl_acct, Account) or not isinstance(payer, Account):
raise SDKException(ErrorCode.require_acct_params)
pub_key = ctrl_acct.get_public_key_bytes()
b58_payer_address = payer.get_address_base58()
tx = self.new_add_attribute_transaction(ont_id, pub_key, attributes, b58_payer_address, gas_limit, gas_price)
tx.sign_transaction(ctrl_acct)
tx.add_sign_transaction(payer)
tx_hash = self.__sdk.get_network().send_raw_transaction(tx)
return tx_hash
|
This interface is used to send a Transaction object which is used to add attribute.
:param ont_id: OntId.
:param ctrl_acct: an Account object which indicate who will sign for the transaction.
:param attributes: a list of attributes we want to add.
:param payer: an Account object which indicate who will pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: a hexadecimal transaction hash value.
|
def simplify_basic(drawing, process=False, **kwargs):
"""
Merge colinear segments and fit circles.
Parameters
-----------
drawing: Path2D object, will not be modified.
Returns
-----------
simplified: Path2D with circles.
"""
if any(i.__class__.__name__ != 'Line'
for i in drawing.entities):
log.debug('Path contains non- linear entities, skipping')
return drawing
# we are going to do a bookkeeping to avoid having
# to recompute literally everything when simplification is ran
cache = copy.deepcopy(drawing._cache)
# store new values
vertices_new = collections.deque()
entities_new = collections.deque()
# avoid thrashing cache in loop
scale = drawing.scale
# loop through (n, 2) closed paths
for discrete in drawing.discrete:
# check to see if the closed entity is a circle
circle = is_circle(discrete,
scale=scale)
if circle is not None:
# the points are circular enough for our high standards
# so replace them with a closed Arc entity
entities_new.append(entities.Arc(points=np.arange(3) +
len(vertices_new),
closed=True))
vertices_new.extend(circle)
else:
# not a circle, so clean up colinear segments
# then save it as a single line entity
points = merge_colinear(discrete, scale=scale)
# references for new vertices
indexes = np.arange(len(points)) + len(vertices_new)
# discrete curves are always closed
indexes[-1] = indexes[0]
# append new vertices and entity
entities_new.append(entities.Line(points=indexes))
vertices_new.extend(points)
# create the new drawing object
simplified = type(drawing)(
entities=entities_new,
vertices=vertices_new,
metadata=copy.deepcopy(drawing.metadata),
process=process)
# we have changed every path to a single closed entity
# either a closed arc, or a closed line
# so all closed paths are now represented by a single entity
cache.cache.update({
'paths': np.arange(len(entities_new)).reshape((-1, 1)),
'path_valid': np.ones(len(entities_new), dtype=np.bool),
'dangling': np.array([])})
# force recompute of exact bounds
if 'bounds' in cache.cache:
cache.cache.pop('bounds')
simplified._cache = cache
# set the cache ID so it won't dump when a value is requested
simplified._cache.id_set()
return simplified
|
Merge colinear segments and fit circles.
Parameters
-----------
drawing: Path2D object, will not be modified.
Returns
-----------
simplified: Path2D with circles.
|
def genotypesPhenotypesGenerator(self, request):
"""
Returns a generator over the (phenotypes, nextPageToken) pairs
defined by the (JSON string) request
"""
# TODO make paging work using SPARQL?
compoundId = datamodel.PhenotypeAssociationSetCompoundId.parse(
request.phenotype_association_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
phenotypeAssociationSet = dataset.getPhenotypeAssociationSet(
compoundId.phenotypeAssociationSetId)
featureSets = dataset.getFeatureSets()
annotationList = phenotypeAssociationSet.getAssociations(
request, featureSets)
return self._protocolListGenerator(request, annotationList)
|
Returns a generator over the (phenotypes, nextPageToken) pairs
defined by the (JSON string) request
|
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
|
Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
|
def main( gpu:Param("GPU to run on", str)=None ):
"""Distrubuted training of CIFAR-10.
Fastest speed is if you run as follows:
python -m fastai.launch train_cifar.py"""
gpu = setup_distrib(gpu)
n_gpus = num_distrib()
path = url2path(URLs.CIFAR)
ds_tfms = ([*rand_pad(4, 32), flip_lr(p=0.5)], [])
workers = min(16, num_cpus()//n_gpus)
data = ImageDataBunch.from_folder(path, valid='test', ds_tfms=ds_tfms, bs=512//n_gpus,
num_workers=workers).normalize(cifar_stats)
learn = Learner(data, wrn_22(), metrics=accuracy)
if gpu is None: learn.model = nn.DataParallel(learn.model)
else: learn.to_distributed(gpu)
learn.to_fp16()
learn.fit_one_cycle(35, 3e-3, wd=0.4)
|
Distrubuted training of CIFAR-10.
Fastest speed is if you run as follows:
python -m fastai.launch train_cifar.py
|
def __getListMetaInfo(self, inferenceElement):
""" Get field metadata information for inferences that are of list type
TODO: Right now we assume list inferences are associated with the input field
metadata
"""
fieldMetaInfo = []
inferenceLabel = InferenceElement.getLabel(inferenceElement)
for inputFieldMeta in self.__inputFieldsMeta:
if InferenceElement.getInputElement(inferenceElement):
outputFieldMeta = FieldMetaInfo(
name=inputFieldMeta.name + ".actual",
type=inputFieldMeta.type,
special=inputFieldMeta.special
)
predictionField = FieldMetaInfo(
name=inputFieldMeta.name + "." + inferenceLabel,
type=inputFieldMeta.type,
special=inputFieldMeta.special
)
fieldMetaInfo.append(outputFieldMeta)
fieldMetaInfo.append(predictionField)
return fieldMetaInfo
|
Get field metadata information for inferences that are of list type
TODO: Right now we assume list inferences are associated with the input field
metadata
|
def refetch_fields(self, missing_fields):
""" Refetches a list of fields from the DB """
db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return
for k, v in db_fields.items():
self[k] = v
|
Refetches a list of fields from the DB
|
def loads(s, cls=BinaryQuadraticModel, vartype=None):
"""Load a COOrdinate formatted binary quadratic model from a string."""
return load(s.split('\n'), cls=cls, vartype=vartype)
|
Load a COOrdinate formatted binary quadratic model from a string.
|
def underscores_to_camelcase(argument):
''' Converts a camelcase param like the_new_attribute to the equivalent
camelcase version like theNewAttribute. Note that the first letter is
NOT capitalized by this function '''
result = ''
previous_was_underscore = False
for char in argument:
if char != '_':
if previous_was_underscore:
result += char.upper()
else:
result += char
previous_was_underscore = char == '_'
return result
|
Converts a camelcase param like the_new_attribute to the equivalent
camelcase version like theNewAttribute. Note that the first letter is
NOT capitalized by this function
|
def get_fn(elev, name=None):
"""
Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
gcs = elev.grid_coordinates
coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon]
return get_fn_from_coords(coords, name)
|
Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
|
async def _verkey_for(self, target: str) -> str:
"""
Given a DID, retrieve its verification key, looking in wallet, then pool.
Given a verification key or None, return input.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
If no such verification key is on the ledger, raise AbsentNym.
:param target: verification key, or DID to resolve to such
:return: verification key
"""
LOGGER.debug('BaseAnchor._verkey_for >>> target: %s', target)
rv = target
if rv is None or not ok_did(rv): # it's None or already a verification key
LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv)
return rv
if self.wallet.handle:
try:
rv = await did.key_for_local_did(self.wallet.handle, target)
LOGGER.info('Anchor %s got verkey for DID %s from wallet', self.name, target)
LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv)
return rv
except IndyError as x_indy:
if x_indy.error_code != ErrorCode.WalletItemNotFound: # on not found, try the pool
LOGGER.debug(
'BaseAnchor._verkey_for <!< key lookup for local DID %s raised indy error code %s',
target,
x_indy.error_code)
raise
nym = json.loads(await self.get_nym(target))
if not nym:
LOGGER.debug(
'BaseAnchor._verkey_for <!< Wallet %s closed and ledger has no cryptonym for DID %s',
self.name,
target)
raise AbsentNym('Wallet {} closed, and ledger has no cryptonym for DID {}'.format(self.name, target))
rv = json.loads(await self.get_nym(target))['verkey']
LOGGER.info('Anchor %s got verkey for DID %s from pool %s', self.name, target, self.pool.name)
LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv)
return rv
|
Given a DID, retrieve its verification key, looking in wallet, then pool.
Given a verification key or None, return input.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
If no such verification key is on the ledger, raise AbsentNym.
:param target: verification key, or DID to resolve to such
:return: verification key
|
def parse(content, *args, **kwargs):
''' Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed '''
global MECAB_PYTHON3
if 'mecab_loc' not in kwargs and MECAB_PYTHON3 and 'MeCab' in globals():
return MeCab.Tagger(*args).parse(content)
else:
return run_mecab_process(content, *args, **kwargs)
|
Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed
|
def get_posix(self, i):
"""Get POSIX."""
index = i.index
value = ['[']
try:
c = next(i)
if c != ':':
raise ValueError('Not a valid property!')
else:
value.append(c)
c = next(i)
if c == '^':
value.append(c)
c = next(i)
while c != ':':
if c not in _PROPERTY:
raise ValueError('Not a valid property!')
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
value.append(c)
c = next(i)
if c != ']' or not value:
raise ValueError('Unmatched ]')
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None
|
Get POSIX.
|
def repeat(self, count=2):
""" Repeat the last control code a number of times.
Returns a new Control with this one's data and the repeated code.
"""
# Subtracting one from the count means the code mentioned is
# truly repeated exactly `count` times.
# Control().move_up().repeat(3) ==
# Control().move_up().move_up().move_up()
try:
return self.__class__(''.join((
str(self),
self.last_code() * (count - 1),
)))
except TypeError as ex:
raise TypeError(
'`count` must be an integer. Got: {!r}'.format(count)
) from ex
|
Repeat the last control code a number of times.
Returns a new Control with this one's data and the repeated code.
|
def shell():
"Open a shell"
from gui.tools.debug import Shell
shell = Shell()
shell.show()
return shell
|
Open a shell
|
def filter(self, table, cg_snapshots, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [cg_snapshot for cg_snapshot in cg_snapshots
if query in cg_snapshot.name.lower()]
|
Naive case-insensitive search.
|
def put (self, ch):
'''This puts a characters at the current cursor position.
'''
if isinstance(ch, bytes):
ch = self._decode(ch)
self.put_abs (self.cur_r, self.cur_c, ch)
|
This puts a characters at the current cursor position.
|
def print_summary(graph, tails, node_id_map):
"""Print out summary and per-node comparison data."""
# Get comparison data
heads = get_heads(tails)
heights = get_heights(tails)
max_height = max(heights)
common_height, block_ids_at_common_height = get_common_height(tails)
lags = get_lags(heights, max_height)
common_ancestor = graph.root
divergences = get_divergences(heights, graph.root)
# Print summary info
col_1 = 8
col_n = 8
format_str = '{:<' + str(col_1) + '} ' + ('{:<' + str(col_n) + '} ') * 2
header = format_str.format("COMMON", "HEIGHT", "BLOCKS")
print(header)
print("-" * len(header))
print(format_str.format(
"ANCESTOR", common_ancestor.num, common_ancestor.ident[:col_n]))
print(format_str.format(
"HEIGHT", common_height, str(block_ids_at_common_height)))
print()
# Print per-node data
node_col_width = get_col_width_for_num(len(tails), len("NODE"))
num_col_width = get_col_width_for_num(max_height, len("HEIGHT"))
lag_col_width = get_col_width_for_num(max(lags), len("LAG"))
diverg_col_width = get_col_width_for_num(max(divergences), len("DIVERG"))
format_str = (
'{:<' + str(node_col_width) + '} '
'{:<8} '
'{:<' + str(num_col_width) + '} '
'{:<' + str(lag_col_width) + '} '
'{:<' + str(diverg_col_width) + '}'
)
header = format_str.format("NODE", "HEAD", "HEIGHT", "LAG", "DIVERG")
print(header)
print('-' * len(header))
for i, _ in enumerate(tails):
print(format_str.format(
node_id_map[i],
heads[i],
heights[i],
lags[i],
divergences[i],
))
print()
|
Print out summary and per-node comparison data.
|
def sendSMS_multi(self, CorpNum, Sender, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None):
""" ๋จ๋ฌธ ๋ฌธ์๋ฉ์์ง ๋ค๋์ ์ก
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
Sender : ๋ฐ์ ์๋ฒํธ (๋๋ณด์ ์ก์ฉ)
Contents : ๋ฌธ์ ๋ด์ฉ (๋๋ณด์ ์ก์ฉ)
Messages : ๊ฐ๋ณ์ ์ก์ ๋ณด ๋ฐฐ์ด
reserveDT : ์์ฝ์ ์ก์๊ฐ (ํ์. yyyyMMddHHmmss)
UserID : ํ๋นํ์ ์์ด๋
RequestNum : ์ ์ก์์ฒญ๋ฒํธ
return
์ ์๋ฒํธ (receiptNum)
raise
PopbillException
"""
return self.sendMessage("SMS", CorpNum, Sender, '', '', Contents, Messages, reserveDT, adsYN, UserID,
RequestNum)
|
๋จ๋ฌธ ๋ฌธ์๋ฉ์์ง ๋ค๋์ ์ก
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
Sender : ๋ฐ์ ์๋ฒํธ (๋๋ณด์ ์ก์ฉ)
Contents : ๋ฌธ์ ๋ด์ฉ (๋๋ณด์ ์ก์ฉ)
Messages : ๊ฐ๋ณ์ ์ก์ ๋ณด ๋ฐฐ์ด
reserveDT : ์์ฝ์ ์ก์๊ฐ (ํ์. yyyyMMddHHmmss)
UserID : ํ๋นํ์ ์์ด๋
RequestNum : ์ ์ก์์ฒญ๋ฒํธ
return
์ ์๋ฒํธ (receiptNum)
raise
PopbillException
|
def get_bounce_dump(bounce_id, api_key=None, secure=None, test=None,
**request_args):
'''Get the raw email dump for a single bounce.
:param bounce_id: The bounce's id. Get the id with :func:`get_bounces`.
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BounceDumpResponse`
'''
return _default_bounce_dump.get(bounce_id, api_key=api_key, secure=secure,
test=test, **request_args)
|
Get the raw email dump for a single bounce.
:param bounce_id: The bounce's id. Get the id with :func:`get_bounces`.
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BounceDumpResponse`
|
def enable_events(self):
"""enable slow wave and spindle detection if both
annotations and channels are active.
"""
if self.annot is not None and self.parent.channels.groups:
self.action['spindle'].setEnabled(True)
self.action['slow_wave'].setEnabled(True)
self.action['analyze'].setEnabled(True)
else:
self.action['spindle'].setEnabled(False)
self.action['slow_wave'].setEnabled(False)
self.action['analyze'].setEnabled(False)
|
enable slow wave and spindle detection if both
annotations and channels are active.
|
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
|
Run this to be sure output and input (adaptive) softmax weights are tied
|
def get_status(self):
""" Query the device status. Returns JSON of the device internal state """
url = self.base_url + '/status'
try:
r = requests.get(url, timeout=10)
return r.json()
except RequestException as err:
raise Client.ClientError(err)
|
Query the device status. Returns JSON of the device internal state
|
def decode_body(cls, header, f):
"""Generates a `MqttUnsuback` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `unsuback`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttUnsuback
Object extracted from ``f``.
"""
assert header.packet_type == MqttControlPacketType.unsuback
decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len))
packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID)
if header.remaining_len != decoder.num_bytes_consumed:
raise DecodeError('Extra bytes at end of packet.')
return decoder.num_bytes_consumed, MqttUnsuback(packet_id)
|
Generates a `MqttUnsuback` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `unsuback`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttUnsuback
Object extracted from ``f``.
|
def get_bucket_lifecycle(self, bucket):
"""
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?lifecycle"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_lifecycle_config)
return d
|
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
|
def open(self, path, mode='r'):
"""Open stream, returning ``Stream`` object"""
entry = self.find(path)
if entry is None:
if mode == 'r':
raise ValueError("stream does not exists: %s" % path)
entry = self.create_dir_entry(path, 'stream', None)
else:
if not entry.isfile():
raise ValueError("can only open stream type DirEntry's")
if mode == 'w':
logging.debug("stream: %s exists, overwriting" % path)
self.free_fat_chain(entry.sector_id, entry.byte_size < self.min_stream_max_size)
entry.sector_id = None
entry.byte_size = 0
entry.class_id = None
elif mode == 'rw':
pass
s = Stream(self, entry, mode)
return s
|
Open stream, returning ``Stream`` object
|
def value_to_string(self, obj):
"""Convert a field value to a string.
Returns the state name.
"""
statefield = self.to_python(self.value_from_object(obj))
return statefield.state.name
|
Convert a field value to a string.
Returns the state name.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.