code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def write_config(config, config_path=CONFIG_PATH):
"""Write the config to the output path.
Creates the necessary directories if they aren't there.
Args:
config (configparser.ConfigParser): A ConfigParser.
"""
if not os.path.exists(config_path):
os.makedirs(os.path.dirname(config_path))
with open(config_path, 'w', encoding='utf-8') as f:
config.write(f)
|
Write the config to the output path.
Creates the necessary directories if they aren't there.
Args:
config (configparser.ConfigParser): A ConfigParser.
|
def IsKeyPressed(key: int) -> bool:
"""
key: int, a value in class `Keys`.
Return bool.
"""
state = ctypes.windll.user32.GetAsyncKeyState(key)
return bool(state & 0x8000)
|
key: int, a value in class `Keys`.
Return bool.
|
def load_all_modules_in_packages(package_or_set_of_packages):
"""
Recursively loads all modules from a package object, or set of package objects
:param package_or_set_of_packages: package object, or iterable of package objects
:return: list of all unique modules discovered by the function
"""
if isinstance(package_or_set_of_packages, types.ModuleType):
packages = [package_or_set_of_packages]
elif isinstance(package_or_set_of_packages, Iterable) and not isinstance(package_or_set_of_packages, (dict, str)):
packages = package_or_set_of_packages
else:
raise Exception("This function only accepts a module reference, or an iterable of said objects")
imported = packages.copy()
for package in packages:
if not hasattr(package, '__path__'):
raise Exception(
'Package object passed in has no __path__ attribute. '
'Make sure to pass in imported references to the packages in question.'
)
for module_finder, name, ispkg in pkgutil.walk_packages(package.__path__):
module_name = '{}.{}'.format(package.__name__, name)
current_module = importlib.import_module(module_name)
imported.append(current_module)
if ispkg:
imported += load_all_modules_in_packages(current_module)
for module in imported:
# This is to cover cases where simply importing a module doesn't execute all the code/definitions within
# I don't totally understand the reasons for this, but I do know enumerating a module's context (like with dir)
# seems to solve things
dir(module)
return list(
{
module.__name__: module
for module in imported
}.values()
)
|
Recursively loads all modules from a package object, or set of package objects
:param package_or_set_of_packages: package object, or iterable of package objects
:return: list of all unique modules discovered by the function
|
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted value for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
Note
----
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
**kwargs
Other parameters for the prediction.
Returns
-------
predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]
The predicted values.
X_leaves : array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the feature contributions for each sample.
"""
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not isinstance(X, (DataFrame, DataTable)):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration,
pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
|
Return the predicted value for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
Note
----
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
**kwargs
Other parameters for the prediction.
Returns
-------
predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]
The predicted values.
X_leaves : array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the feature contributions for each sample.
|
def present(name,
clients=None,
hosts=None,
options=None,
exports='/etc/exports'):
'''
Ensure that the named export is present with the given options
name
The export path to configure
clients
A list of hosts and the options applied to them.
This option may not be used in combination with
the 'hosts' or 'options' shortcuts.
.. code-block:: yaml
- clients:
# First export
- hosts: '10.0.2.0/24'
options:
- 'rw'
# Second export
- hosts: '*.example.com'
options:
- 'ro'
- 'subtree_check'
hosts
A string matching a number of hosts, for example:
.. code-block:: yaml
hosts: '10.0.2.123'
hosts: '10.0.2.0/24'
hosts: 'minion1.example.com'
hosts: '*.example.com'
hosts: '*'
options
A list of NFS options, for example:
.. code-block:: yaml
options:
- 'rw'
- 'subtree_check'
'''
path = name
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if not clients:
if not hosts:
ret['result'] = False
ret['comment'] = 'Either \'clients\' or \'hosts\' must be defined'
return ret
# options being None is handled by add_export()
clients = [{'hosts': hosts, 'options': options}]
old = __salt__['nfs3.list_exports'](exports)
if path in old:
if old[path] == clients:
ret['result'] = True
ret['comment'] = 'Export {0} already configured'.format(path)
return ret
ret['changes']['new'] = clients
ret['changes']['old'] = old[path]
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Export {0} would be changed'.format(path)
return ret
__salt__['nfs3.del_export'](exports, path)
else:
ret['changes']['old'] = None
ret['changes']['new'] = clients
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Export {0} would be added'.format(path)
return ret
add_export = __salt__['nfs3.add_export']
for exp in clients:
add_export(exports, path, exp['hosts'], exp['options'])
ret['changes']['new'] = clients
try_reload = __salt__['nfs3.reload_exports']()
ret['comment'] = try_reload['stderr']
ret['result'] = try_reload['result']
return ret
|
Ensure that the named export is present with the given options
name
The export path to configure
clients
A list of hosts and the options applied to them.
This option may not be used in combination with
the 'hosts' or 'options' shortcuts.
.. code-block:: yaml
- clients:
# First export
- hosts: '10.0.2.0/24'
options:
- 'rw'
# Second export
- hosts: '*.example.com'
options:
- 'ro'
- 'subtree_check'
hosts
A string matching a number of hosts, for example:
.. code-block:: yaml
hosts: '10.0.2.123'
hosts: '10.0.2.0/24'
hosts: 'minion1.example.com'
hosts: '*.example.com'
hosts: '*'
options
A list of NFS options, for example:
.. code-block:: yaml
options:
- 'rw'
- 'subtree_check'
|
def create_poi_gdf(polygon=None, amenities=None, north=None, south=None, east=None, west=None):
"""
Parse GeoDataFrames from POI json that was returned by Overpass API.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the POIs within
amenities: list
List of amenities that will be used for finding the POIs from the selected area.
See available amenities from: http://wiki.openstreetmap.org/wiki/Key:amenity
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
Returns
-------
Geopandas GeoDataFrame with POIs and the associated attributes.
"""
responses = osm_poi_download(polygon=polygon, amenities=amenities, north=north, south=south, east=east, west=west)
# Parse coordinates from all the nodes in the response
coords = parse_nodes_coords(responses)
# POI nodes
poi_nodes = {}
# POI ways
poi_ways = {}
# A list of POI relations
relations = []
for result in responses['elements']:
if result['type'] == 'node' and 'tags' in result:
poi = parse_osm_node(response=result)
# Add element_type
poi['element_type'] = 'node'
# Add to 'pois'
poi_nodes[result['id']] = poi
elif result['type'] == 'way':
# Parse POI area Polygon
poi_area = parse_polygonal_poi(coords=coords, response=result)
if poi_area:
# Add element_type
poi_area['element_type'] = 'way'
# Add to 'poi_ways'
poi_ways[result['id']] = poi_area
elif result['type'] == 'relation':
# Add relation to a relation list (needs to be parsed after all nodes and ways have been parsed)
relations.append(result)
# Create GeoDataFrames
gdf_nodes = gpd.GeoDataFrame(poi_nodes).T
gdf_nodes.crs = settings.default_crs
gdf_ways = gpd.GeoDataFrame(poi_ways).T
gdf_ways.crs = settings.default_crs
# Parse relations (MultiPolygons) from 'ways'
gdf_ways = parse_osm_relations(relations=relations, osm_way_df=gdf_ways)
# Combine GeoDataFrames
gdf = gdf_nodes.append(gdf_ways, sort=False)
return gdf
|
Parse GeoDataFrames from POI json that was returned by Overpass API.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the POIs within
amenities: list
List of amenities that will be used for finding the POIs from the selected area.
See available amenities from: http://wiki.openstreetmap.org/wiki/Key:amenity
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
Returns
-------
Geopandas GeoDataFrame with POIs and the associated attributes.
|
def get_all_synDelays(self):
"""
Create and load arrays of connection delays per connection on this rank
Get random normally distributed synaptic delays,
returns dict of nested list of same shape as SpCells.
Delays are rounded to dt.
This function takes no kwargs.
Parameters
----------
None
Returns
-------
dict
output[cellindex][populationname][layerindex]`, np.array of
delays per connection.
See also
--------
numpy.random.normal
"""
tic = time()
#ok then, we will draw random numbers across ranks, which have to
#be unique per cell. Now, we simply record the random state,
#change the seed per cell, and put the original state back below.
randomstate = np.random.get_state()
#container
delays = {}
for cellindex in self.RANK_CELLINDICES:
#set the random seed on for each cellindex
np.random.seed(self.POPULATIONSEED + cellindex + 2*self.POPULATION_SIZE)
delays[cellindex] = {}
for j, X in enumerate(self.X):
delays[cellindex][X] = []
for i in self.k_yXL[:, j]:
loc = self.synDelayLoc[j]
loc /= self.dt
scale = self.synDelayScale[j]
if scale is not None:
scale /= self.dt
delay = np.random.normal(loc, scale, i).astype(int)
while np.any(delay < 1):
inds = delay < 1
delay[inds] = np.random.normal(loc, scale,
inds.sum()).astype(int)
delay = delay.astype(float)
delay *= self.dt
else:
delay = np.zeros(i) + self.synDelayLoc[j]
delays[cellindex][X].append(delay)
#reset the random number generator
np.random.set_state(randomstate)
if RANK == 0:
print('found delays in %.2f seconds' % (time()-tic))
return delays
|
Create and load arrays of connection delays per connection on this rank
Get random normally distributed synaptic delays,
returns dict of nested list of same shape as SpCells.
Delays are rounded to dt.
This function takes no kwargs.
Parameters
----------
None
Returns
-------
dict
output[cellindex][populationname][layerindex]`, np.array of
delays per connection.
See also
--------
numpy.random.normal
|
def remove(name=None, pkgs=None, **kwargs):
'''
Removes packages with ``brew uninstall``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](
name, pkgs, **kwargs
)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = 'uninstall {0}'.format(' '.join(targets))
out = _call_brew(cmd)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
|
Removes packages with ``brew uninstall``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
|
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
TARPathSpec: TAR path specification.
"""
location = getattr(self.path_spec, 'location', None)
if location and location.startswith(self._file_system.PATH_SEPARATOR):
# The TAR info name does not have the leading path separator as
# the location string does.
tar_path = location[1:]
# Set of top level sub directories that have been yielded.
processed_directories = set()
tar_file = self._file_system.GetTARFile()
for tar_info in iter(tar_file.getmembers()):
path = tar_info.name
# Determine if the start of the TAR info name is similar to
# the location string. If not the file TAR info refers to is not in
# the same directory.
if not path or not path.startswith(tar_path):
continue
# Ignore the directory itself.
if path == tar_path:
continue
path_segment, suffix = self._file_system.GetPathSegmentAndSuffix(
tar_path, path)
if not path_segment:
continue
# Sometimes the TAR file lacks directories, therefore we will
# provide virtual ones.
if suffix:
path_spec_location = self._file_system.JoinPath([
location, path_segment])
is_directory = True
else:
path_spec_location = self._file_system.JoinPath([path])
is_directory = tar_info.isdir()
if is_directory:
if path_spec_location in processed_directories:
continue
processed_directories.add(path_spec_location)
yield tar_path_spec.TARPathSpec(
location=path_spec_location, parent=self.path_spec.parent)
|
Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
TARPathSpec: TAR path specification.
|
def get_connection(self, alias='default'):
"""
Retrieve a connection, construct it if necessary (only configuration
was passed to us). If a non-string alias has been passed through we
assume it's already a client instance and will just return it as-is.
Raises ``KeyError`` if no client (or its definition) is registered
under the alias.
"""
# do not check isinstance(Elasticsearch) so that people can wrap their
# clients
if not isinstance(alias, string_types):
return alias
# connection already established
try:
return self._conns[alias]
except KeyError:
pass
# if not, try to create it
try:
return self.create_connection(alias, **self._kwargs[alias])
except KeyError:
# no connection and no kwargs to set one up
raise KeyError('There is no connection with alias %r.' % alias)
|
Retrieve a connection, construct it if necessary (only configuration
was passed to us). If a non-string alias has been passed through we
assume it's already a client instance and will just return it as-is.
Raises ``KeyError`` if no client (or its definition) is registered
under the alias.
|
def invalid_request_content(message):
"""
Creates a Lambda Service InvalidRequestContent Response
Parameters
----------
message str
Message to be added to the body of the response
Returns
-------
Flask.Response
A response object representing the InvalidRequestContent Error
"""
exception_tuple = LambdaErrorResponses.InvalidRequestContentException
return BaseLocalService.service_response(
LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, message),
LambdaErrorResponses._construct_headers(exception_tuple[0]),
exception_tuple[1]
)
|
Creates a Lambda Service InvalidRequestContent Response
Parameters
----------
message str
Message to be added to the body of the response
Returns
-------
Flask.Response
A response object representing the InvalidRequestContent Error
|
def memoize(method):
"""
A decorator for functions of sources which memoize the results of the last _CACHE_SIZE calls,
:param method: method to be memoized
:return: the decorated method
"""
cache = method.cache = collections.OrderedDict()
# Put these two methods in the local space (faster)
_get = cache.get
_popitem = cache.popitem
@functools.wraps(method)
def memoizer(instance, x, *args, **kwargs):
if not _WITH_MEMOIZATION or isinstance(x, u.Quantity):
# Memoization is not active or using units, do not use memoization
return method(instance, x, *args, **kwargs)
# Create a tuple because a tuple is hashable
unique_id = tuple(float(yy.value) for yy in instance.parameters.values()) + (x.size, x.min(), x.max())
# Create a unique identifier for this combination of inputs
key = hash(unique_id)
# Let's do it this way so we only look into the dictionary once
result = _get(key)
if result is not None:
return result
else:
result = method(instance, x, *args, **kwargs)
cache[key] = result
if len(cache) > _CACHE_SIZE:
# Remove half of the element (but at least 1, even if _CACHE_SIZE=1, which would be pretty idiotic ;-) )
[_popitem(False) for i in range(max(_CACHE_SIZE // 2, 1))]
return result
# Add the function as a "attribute" so we can access it
memoizer.input_object = method
return memoizer
|
A decorator for functions of sources which memoize the results of the last _CACHE_SIZE calls,
:param method: method to be memoized
:return: the decorated method
|
def fromdescriptor(cls, desc):
"""
Create a :class:`~manticore.core.workspace.Store` instance depending on the descriptor.
Valid descriptors:
* fs:<path>
* redis:<hostname>:<port>
* mem:
:param str desc: Store descriptor
:return: Store instance
"""
type_, uri = ('fs', None) if desc is None else desc.split(':', 1)
for subclass in cls.__subclasses__():
if subclass.store_type == type_:
return subclass(uri)
raise NotImplementedError(f"Storage type '{type_}' not supported.")
|
Create a :class:`~manticore.core.workspace.Store` instance depending on the descriptor.
Valid descriptors:
* fs:<path>
* redis:<hostname>:<port>
* mem:
:param str desc: Store descriptor
:return: Store instance
|
def create(cls, **kwargs):
"""
Create an instance of this model in the database.
Takes the model column values as keyword arguments. Setting a value to
`None` is equivalent to running a CQL `DELETE` on that column.
Returns the instance.
"""
extra_columns = set(kwargs.keys()) - set(cls._columns.keys())
if extra_columns:
raise ValidationError("Incorrect columns passed: {0}".format(extra_columns))
return cls.objects.create(**kwargs)
|
Create an instance of this model in the database.
Takes the model column values as keyword arguments. Setting a value to
`None` is equivalent to running a CQL `DELETE` on that column.
Returns the instance.
|
def a(text, mode='exec', indent=' ', file=None):
"""
Interactive convenience for displaying the AST of a code string.
Writes a pretty-formatted AST-tree to `file`.
Parameters
----------
text : str
Text of Python code to render as AST.
mode : {'exec', 'eval'}, optional
Mode for `ast.parse`. Default is 'exec'.
indent : str, optional
String to use for indenting nested expressions. Default is two spaces.
file : None or file-like object, optional
File to use to print output. If the default of `None` is passed, we
use sys.stdout.
"""
pprint_ast(parse(text, mode=mode), indent=indent, file=file)
|
Interactive convenience for displaying the AST of a code string.
Writes a pretty-formatted AST-tree to `file`.
Parameters
----------
text : str
Text of Python code to render as AST.
mode : {'exec', 'eval'}, optional
Mode for `ast.parse`. Default is 'exec'.
indent : str, optional
String to use for indenting nested expressions. Default is two spaces.
file : None or file-like object, optional
File to use to print output. If the default of `None` is passed, we
use sys.stdout.
|
def disable_jt_ha(self, active_name):
"""
Disable high availability for a MR JobTracker active-standby pair.
@param active_name: name of the JobTracker that will be active after
the disable operation. The other JobTracker and
Failover Controllers will be removed.
@return: Reference to the submitted command.
"""
args = dict(
activeName = active_name,
)
return self._cmd('disableJtHa', data=args)
|
Disable high availability for a MR JobTracker active-standby pair.
@param active_name: name of the JobTracker that will be active after
the disable operation. The other JobTracker and
Failover Controllers will be removed.
@return: Reference to the submitted command.
|
def add_regression_events(obj, events, targets, weights=None, test=False):
"""Add regression events to a TMVA::Factory or TMVA::DataLoader from NumPy arrays.
Parameters
----------
obj : TMVA::Factory or TMVA::DataLoader
A TMVA::Factory or TMVA::DataLoader (TMVA's interface as of ROOT
6.07/04) instance with variables already
booked in exactly the same order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
targets : numpy array of shape [n_events] or [n_events, n_targets]
The target value(s) for each event in ``events``. For multiple target
values, ``targets`` must be a two-dimensional array with a column for
each target in the same order in which you called ``AddTarget()``.
weights : numpy array of shape [n_events], optional
Event weights.
test : bool, optional (default=False)
If True, then the events will be added as test events, otherwise they
are added as training events by default.
Notes
-----
A TMVA::Factory or TMVA::DataLoader requires you to add both training and
test events even if you don't intend to call ``TestAllMethods()``.
"""
if NEW_TMVA_API: # pragma: no cover
if not isinstance(obj, TMVA.DataLoader):
raise TypeError(
"obj must be a TMVA.DataLoader "
"instance for ROOT >= 6.07/04")
else: # pragma: no cover
if not isinstance(obj, TMVA.Factory):
raise TypeError(
"obj must be a TMVA.Factory instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
targets = np.asarray(targets, dtype=np.float64)
if targets.shape[0] != events.shape[0]:
raise ValueError("the lengths of events and targets do not match")
if targets.ndim == 1:
# convert to 2D
targets = targets[:, np.newaxis]
elif targets.ndim > 2:
raise ValueError("targets can not have more than two dimensions")
if weights is not None:
weights = np.asarray(weights, dtype=np.float64)
if weights.shape[0] != events.shape[0]:
raise ValueError("numbers of events and weights do not match")
if weights.ndim != 1:
raise ValueError("weights must be one-dimensional")
_libtmvanumpy.add_events_regression(
ROOT.AsCObject(obj), events, targets, weights, test)
|
Add regression events to a TMVA::Factory or TMVA::DataLoader from NumPy arrays.
Parameters
----------
obj : TMVA::Factory or TMVA::DataLoader
A TMVA::Factory or TMVA::DataLoader (TMVA's interface as of ROOT
6.07/04) instance with variables already
booked in exactly the same order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
targets : numpy array of shape [n_events] or [n_events, n_targets]
The target value(s) for each event in ``events``. For multiple target
values, ``targets`` must be a two-dimensional array with a column for
each target in the same order in which you called ``AddTarget()``.
weights : numpy array of shape [n_events], optional
Event weights.
test : bool, optional (default=False)
If True, then the events will be added as test events, otherwise they
are added as training events by default.
Notes
-----
A TMVA::Factory or TMVA::DataLoader requires you to add both training and
test events even if you don't intend to call ``TestAllMethods()``.
|
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
|
Experimental -- running UMAP on the diffusion components
|
def _group_kwargs_to_options(cls, obj, kwargs):
"Format option group kwargs into canonical options format"
groups = Options._option_groups
if set(kwargs.keys()) - set(groups):
raise Exception("Keyword options %s must be one of %s" % (groups,
','.join(repr(g) for g in groups)))
elif not all(isinstance(v, dict) for v in kwargs.values()):
raise Exception("The %s options must be specified using dictionary groups" %
','.join(repr(k) for k in kwargs.keys()))
# Check whether the user is specifying targets (such as 'Image.Foo')
targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()]
if any(targets) and not all(targets):
raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.")
elif not any(targets):
# Not targets specified - add current object as target
sanitized_group = util.group_sanitizer(obj.group)
if obj.label:
identifier = ('%s.%s.%s' % (
obj.__class__.__name__, sanitized_group,
util.label_sanitizer(obj.label)))
elif sanitized_group != obj.__class__.__name__:
identifier = '%s.%s' % (obj.__class__.__name__, sanitized_group)
else:
identifier = obj.__class__.__name__
options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}}
else:
dfltdict = defaultdict(dict)
for grp, entries in kwargs.items():
for identifier, kws in entries.items():
dfltdict[identifier][grp] = kws
options = dict(dfltdict)
return options
|
Format option group kwargs into canonical options format
|
def generate_contains(self):
"""
Means that array must contain at least one defined item.
.. code-block:: python
{
'contains': {
'type': 'number',
},
}
Valid array is any with at least one number.
"""
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
contains_definition = self._definition['contains']
if contains_definition is False:
self.l('raise JsonSchemaException("{name} is always invalid")')
elif contains_definition is True:
with self.l('if not {variable}:'):
self.l('raise JsonSchemaException("{name} must not be empty")')
else:
self.l('{variable}_contains = False')
with self.l('for {variable}_key in {variable}:'):
with self.l('try:'):
self.generate_func_code_block(
contains_definition,
'{}_key'.format(self._variable),
self._variable_name,
clear_variables=True,
)
self.l('{variable}_contains = True')
self.l('break')
self.l('except JsonSchemaException: pass')
with self.l('if not {variable}_contains:'):
self.l('raise JsonSchemaException("{name} must contain one of contains definition")')
|
Means that array must contain at least one defined item.
.. code-block:: python
{
'contains': {
'type': 'number',
},
}
Valid array is any with at least one number.
|
def _set_sfp(self, v, load=False):
"""
Setter method for sfp, mapped from YANG variable /rbridge_id/system_monitor/sfp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sfp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sfp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=sfp.sfp, is_container='container', presence=False, yang_name="sfp", rest_name="sfp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure alert for component:SFP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sfp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=sfp.sfp, is_container='container', presence=False, yang_name="sfp", rest_name="sfp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure alert for component:SFP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__sfp = t
if hasattr(self, '_set'):
self._set()
|
Setter method for sfp, mapped from YANG variable /rbridge_id/system_monitor/sfp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sfp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sfp() directly.
|
def resetTimeout(self):
"""Reset the timeout count down"""
if self.__timeoutCall is not None and self.timeOut is not None:
self.__timeoutCall.reset(self.timeOut)
|
Reset the timeout count down
|
def delete_merged_branches(self, **kwargs):
"""Delete merged branches.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
"""
path = '/projects/%s/repository/merged_branches' % self.get_id()
self.manager.gitlab.http_delete(path, **kwargs)
|
Delete merged branches.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
|
def sample_from_prior(self):
""" Sample elements of the stochastic transition matrix from the prior
distribution, based on gradable adjectives. """
# simple_path_dict caches the results of the graph traversal that finds
# simple paths between pairs of nodes, so that it doesn't have to be
# executed for every sampled transition matrix.
node_pairs = list(permutations(self.nodes(), 2))
simple_path_dict = {
node_pair: [
list(pairwise(path))
for path in nx.all_simple_paths(self, *node_pair)
]
for node_pair in node_pairs
}
self.transition_matrix_collection = []
elements = self.get_latent_state_components()
for i in range(self.res):
A = pd.DataFrame(
np.identity(2 * len(self)), index=elements, columns=elements
)
for node in self.nodes:
A[f"∂({node})/∂t"][node] = self.Δt
for node_pair in node_pairs:
A[f"∂({node_pair[0]})/∂t"][node_pair[1]] = sum(
np.prod(
[
self.edges[edge[0], edge[1]]["βs"][i]
for edge in simple_path_edge_list
]
)
* self.Δt
for simple_path_edge_list in simple_path_dict[node_pair]
)
self.transition_matrix_collection.append(A)
|
Sample elements of the stochastic transition matrix from the prior
distribution, based on gradable adjectives.
|
def _parse_fc(self, f, natom, dim):
"""Parse force constants part
Physical unit of force cosntants in the file is Ry/au^2.
"""
ndim = np.prod(dim)
fc = np.zeros((natom, natom * ndim, 3, 3), dtype='double', order='C')
for k, l, i, j in np.ndindex((3, 3, natom, natom)):
line = f.readline()
for i_dim in range(ndim):
line = f.readline()
# fc[i, j * ndim + i_dim, k, l] = float(line.split()[3])
fc[j, i * ndim + i_dim, l, k] = float(line.split()[3])
return fc
|
Parse force constants part
Physical unit of force cosntants in the file is Ry/au^2.
|
def make_valid_pyclipper(shape):
"""
Use the pyclipper library to "union" a polygon on its own. This operation
uses the even-odd rule to determine which points are in the interior of
the polygon, and can reconstruct the orientation of the polygon from that.
The pyclipper library is robust, and uses integer coordinates, so should
not produce any additional degeneracies.
Before cleaning the polygon, we remove all degenerate inners. This is
useful to remove inners which have collapsed to points or lines, which can
interfere with the cleaning process.
"""
# drop all degenerate inners
clean_shape = _drop_degenerate_inners(shape)
pc = pyclipper.Pyclipper()
try:
pc.AddPaths(_coords(clean_shape), pyclipper.PT_SUBJECT, True)
# note: Execute2 returns the polygon tree, not the list of paths
result = pc.Execute2(pyclipper.CT_UNION, pyclipper.PFT_EVENODD)
except pyclipper.ClipperException:
return MultiPolygon([])
return _polytree_to_shapely(result)
|
Use the pyclipper library to "union" a polygon on its own. This operation
uses the even-odd rule to determine which points are in the interior of
the polygon, and can reconstruct the orientation of the polygon from that.
The pyclipper library is robust, and uses integer coordinates, so should
not produce any additional degeneracies.
Before cleaning the polygon, we remove all degenerate inners. This is
useful to remove inners which have collapsed to points or lines, which can
interfere with the cleaning process.
|
def resp_set_infrared(self, resp, infrared_brightness=None):
"""Default callback for set_infrared/get_infrared
"""
if infrared_brightness is not None:
self.infrared_brightness = infrared_brightness
elif resp:
self.infrared_brightness = resp.infrared_brightness
|
Default callback for set_infrared/get_infrared
|
def get_root_list(class_path=None, cursor=None, count=50):
"""Gets a list root Pipelines.
Args:
class_path: Optional. If supplied, only return root Pipelines with the
given class_path. By default all root pipelines are returned.
cursor: Optional. When supplied, the cursor returned from the last call to
get_root_list which indicates where to pick up.
count: How many pipeline returns to return.
Returns:
Dictionary with the keys:
pipelines: The list of Pipeline records in the same format as
returned by get_status_tree, but with only the roots listed.
cursor: Cursor to pass back to this function to resume the query. Will
only be present if there is another page of results.
Raises:
PipelineStatusError if any input is bad.
"""
query = _PipelineRecord.all(cursor=cursor)
if class_path:
query.filter('class_path =', class_path)
query.filter('is_root_pipeline =', True)
query.order('-start_time')
root_list = query.fetch(count)
fetch_list = []
for pipeline_record in root_list:
fetch_list.append(db.Key(pipeline_record.params['output_slots']['default']))
fetch_list.append(db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_record.key()))
fetch_list.append(db.Key.from_path(
_StatusRecord.kind(), pipeline_record.key().name()))
pipeline_dict = dict((stage.key(), stage) for stage in root_list)
slot_dict = {}
barrier_dict = {}
status_dict = {}
for entity in db.get(fetch_list):
if isinstance(entity, _BarrierRecord):
barrier_dict[entity.key()] = entity
elif isinstance(entity, _SlotRecord):
slot_dict[entity.key()] = entity
elif isinstance(entity, _StatusRecord):
status_dict[entity.key()] = entity
results = []
for pipeline_record in root_list:
try:
output = _get_internal_status(
pipeline_record.key(),
pipeline_dict=pipeline_dict,
slot_dict=slot_dict,
barrier_dict=barrier_dict,
status_dict=status_dict)
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
except PipelineStatusError, e:
output = {'status': e.message}
output['classPath'] = ''
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
result_dict = {}
cursor = query.cursor()
query.with_cursor(cursor)
if query.get(keys_only=True):
result_dict.update(cursor=cursor)
result_dict.update(pipelines=results)
return result_dict
|
Gets a list root Pipelines.
Args:
class_path: Optional. If supplied, only return root Pipelines with the
given class_path. By default all root pipelines are returned.
cursor: Optional. When supplied, the cursor returned from the last call to
get_root_list which indicates where to pick up.
count: How many pipeline returns to return.
Returns:
Dictionary with the keys:
pipelines: The list of Pipeline records in the same format as
returned by get_status_tree, but with only the roots listed.
cursor: Cursor to pass back to this function to resume the query. Will
only be present if there is another page of results.
Raises:
PipelineStatusError if any input is bad.
|
def _HandleLegacy(self, args, token=None):
"""Retrieves the clients for a hunt."""
hunt_urn = args.hunt_id.ToURN()
hunt_obj = aff4.FACTORY.Open(
hunt_urn, aff4_type=implementation.GRRHunt, token=token)
clients_by_status = hunt_obj.GetClientsByStatus()
hunt_clients = clients_by_status[args.client_status.name]
total_count = len(hunt_clients)
if args.count:
hunt_clients = sorted(hunt_clients)[args.offset:args.offset + args.count]
else:
hunt_clients = sorted(hunt_clients)[args.offset:]
flow_id = "%s:hunt" % hunt_urn.Basename()
results = [
ApiHuntClient(client_id=c.Basename(), flow_id=flow_id)
for c in hunt_clients
]
return ApiListHuntClientsResult(items=results, total_count=total_count)
|
Retrieves the clients for a hunt.
|
def copyto(self,
new_abspath=None,
new_dirpath=None,
new_dirname=None,
new_basename=None,
new_fname=None,
new_ext=None,
overwrite=False,
makedirs=False):
"""
Copy this file to other place.
"""
self.assert_exists()
p = self.change(
new_abspath=new_abspath,
new_dirpath=new_dirpath,
new_dirname=new_dirname,
new_basename=new_basename,
new_fname=new_fname,
new_ext=new_ext,
)
if p.is_not_exist_or_allow_overwrite(overwrite=overwrite):
# 如果两个路径不同, 才进行copy
if self.abspath != p.abspath:
try:
shutil.copy(self.abspath, p.abspath)
except IOError as e:
if makedirs:
os.makedirs(p.parent.abspath)
shutil.copy(self.abspath, p.abspath)
else:
raise e
return p
|
Copy this file to other place.
|
def dst_addr(self):
"""
The packet destination address.
"""
try:
return socket.inet_ntop(self._af, self.raw[self._dst_addr].tobytes())
except (ValueError, socket.error):
pass
|
The packet destination address.
|
def exclude_functions(self, *funcs):
"""
Excludes the contributions from the following functions.
"""
for f in funcs:
f.exclude = True
run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats)
cProfileFuncStat.run_time_s = run_time_s
|
Excludes the contributions from the following functions.
|
def clear_jobs(self, recursive=True):
"""Clear the self.jobs dictionary that contains information
about jobs associated with this `ScatterGather`
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
self._scatter_link.clear_jobs(recursive)
self.jobs.clear()
|
Clear the self.jobs dictionary that contains information
about jobs associated with this `ScatterGather`
If recursive is True this will include jobs from all internal `Link`
|
def get_content_html(request):
"""Retrieve content as HTML using the ident-hash (uuid@version)."""
result = _get_content_json()
media_type = result['mediaType']
if media_type == COLLECTION_MIMETYPE:
content = tree_to_html(result['tree'])
else:
content = result['content']
resp = request.response
resp.body = content
resp.status = "200 OK"
resp.content_type = 'application/xhtml+xml'
return result, resp
|
Retrieve content as HTML using the ident-hash (uuid@version).
|
def parent_subfolders(self, ident, ann_id=None):
'''An unordered generator of parent subfolders for ``ident``.
``ident`` can either be a ``content_id`` or a tuple of
``(content_id, subtopic_id)``.
Parent subfolders are limited to the annotator id given.
:param ident: identifier
:type ident: ``str`` or ``(str, str)``
:param str ann_id: Username
:rtype: generator of ``(folder_id, subfolder_id)``
'''
ann_id = self._annotator(ann_id)
cid, _ = normalize_ident(ident)
for lab in self.label_store.directly_connected(ident):
folder_cid = lab.other(cid)
subfolder_sid = lab.subtopic_for(folder_cid)
if not folder_cid.startswith('topic|'):
continue
folder = self.unwrap_folder_content_id(folder_cid)
subfolder = self.unwrap_subfolder_subtopic_id(subfolder_sid)
if folder['annotator_id'] != ann_id:
continue
yield (folder['folder_id'], subfolder)
|
An unordered generator of parent subfolders for ``ident``.
``ident`` can either be a ``content_id`` or a tuple of
``(content_id, subtopic_id)``.
Parent subfolders are limited to the annotator id given.
:param ident: identifier
:type ident: ``str`` or ``(str, str)``
:param str ann_id: Username
:rtype: generator of ``(folder_id, subfolder_id)``
|
def zoom_leftdown(self, event=None):
"""leftdown event handler for zoom mode"""
self.x_lastmove, self.y_lastmove = None, None
self.zoom_ini = (event.x, event.y, event.xdata, event.ydata)
self.report_leftdown(event=event)
|
leftdown event handler for zoom mode
|
def make_ranges(cls, lines):
"""Convert list of lines into list of line range tuples.
Only will be called if there is one or more entries in the list. Single
lines, will be coverted into tuple with same line.
"""
start_line = last_line = lines.pop(0)
ranges = []
for line in lines:
if line == (last_line + 1):
last_line = line
else:
ranges.append((start_line, last_line))
start_line = line
last_line = line
ranges.append((start_line, last_line))
return ranges
|
Convert list of lines into list of line range tuples.
Only will be called if there is one or more entries in the list. Single
lines, will be coverted into tuple with same line.
|
def get_access_token(tenant_id, application_id, application_secret):
'''get an Azure access token using the adal library.
Args:
tenant_id (str): Tenant id of the user's account.
application_id (str): Application id of a Service Principal account.
application_secret (str): Application secret (password) of the Service Principal account.
Returns:
An Azure authentication token string.
'''
context = adal.AuthenticationContext(
get_auth_endpoint() + tenant_id, api_version=None)
token_response = context.acquire_token_with_client_credentials(
get_resource_endpoint(), application_id, application_secret)
return token_response.get('accessToken')
|
get an Azure access token using the adal library.
Args:
tenant_id (str): Tenant id of the user's account.
application_id (str): Application id of a Service Principal account.
application_secret (str): Application secret (password) of the Service Principal account.
Returns:
An Azure authentication token string.
|
def EnumKey(key, index):
"""This calls the Windows RegEnumKeyEx function in a Unicode safe way."""
regenumkeyex = advapi32["RegEnumKeyExW"]
regenumkeyex.restype = ctypes.c_long
regenumkeyex.argtypes = [
ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.c_wchar_p, LPDWORD,
LPDWORD, ctypes.c_wchar_p, LPDWORD,
ctypes.POINTER(FileTime)
]
buf = ctypes.create_unicode_buffer(257)
length = ctypes.wintypes.DWORD(257)
rc = regenumkeyex(key.handle, index, ctypes.cast(buf, ctypes.c_wchar_p),
ctypes.byref(length), LPDWORD(), ctypes.c_wchar_p(),
LPDWORD(),
ctypes.POINTER(FileTime)())
if rc != 0:
raise ctypes.WinError(2)
return ctypes.wstring_at(buf, length.value).rstrip(u"\x00")
|
This calls the Windows RegEnumKeyEx function in a Unicode safe way.
|
def do_pyscript(self, args: argparse.Namespace) -> bool:
"""Run a Python script file inside the console"""
script_path = os.path.expanduser(args.script_path)
py_return = False
# Save current command line arguments
orig_args = sys.argv
try:
# Overwrite sys.argv to allow the script to take command line arguments
sys.argv = [script_path] + args.script_arguments
# Run the script - use repr formatting to escape things which
# need to be escaped to prevent issues on Windows
py_return = self.do_py("run({!r})".format(script_path))
except KeyboardInterrupt:
pass
finally:
# Restore command line arguments to original state
sys.argv = orig_args
return py_return
|
Run a Python script file inside the console
|
def log(self, msg):
""" Log Normal Messages """
self._execActions('log', msg)
msg = self._execFilters('log', msg)
self._processMsg('log', msg)
self._sendMsg('log', msg)
|
Log Normal Messages
|
def human_uuid():
"""Returns a good UUID for using as a human readable string."""
return base64.b32encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).lower().strip('=')
|
Returns a good UUID for using as a human readable string.
|
def RECEIVE_MESSAGE(op):
'''
This is sample for implement BOT in LINE group
Invite your BOT to group, then BOT will auto accept your invitation
Command availabe :
> hi
> /author
'''
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
try:
# Check content only text message
if msg.contentType == 0:
# Check only group chat
if msg.toType == 2:
# Chat checked request
line.sendChatChecked(receiver, msg_id)
# Get sender contact
contact = line.getContact(sender)
# Command list
if text.lower() == 'hi':
line.log('[%s] %s' % (contact.displayName, text))
line.sendMessage(receiver, 'Hi too! How are you?')
elif text.lower() == '/author':
line.log('[%s] %s' % (contact.displayName, text))
line.sendMessage(receiver, 'My author is linepy')
except Exception as e:
line.log("[RECEIVE_MESSAGE] ERROR : " + str(e))
|
This is sample for implement BOT in LINE group
Invite your BOT to group, then BOT will auto accept your invitation
Command availabe :
> hi
> /author
|
def clip(self, lower=None, upper=None, axis=None, inplace=False,
*args, **kwargs):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
if isinstance(self, ABCPanel):
raise NotImplementedError("clip is not supported yet for panels")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(pd.isnull(lower)):
lower = None
if not is_list_like(upper) and np.any(pd.isnull(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if ((lower is None or (is_scalar(lower) and is_number(lower))) and
(upper is None or (is_scalar(upper) and is_number(upper)))):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(lower, method=self.ge,
axis=axis, inplace=inplace)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(upper, method=self.le,
axis=axis, inplace=inplace)
return result
|
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
|
def has_table(self, name):
"""Return ``True`` if the table *name* exists in the database."""
return len(self.sql("SELECT name FROM sqlite_master WHERE type='table' AND name=?",
parameters=(name,), asrecarray=False, cache=False)) > 0
|
Return ``True`` if the table *name* exists in the database.
|
def get_assessment(self, assessment):
"""
To get Assessment by id
"""
response = self.http.get('/Assessment/' + str(assessment))
assessment = Schemas.Assessment(assessment=response)
return assessment
|
To get Assessment by id
|
def generateSetupFile(self, outpath='.', egg=False):
"""
Generates the setup file for this builder.
"""
outpath = os.path.abspath(outpath)
outfile = os.path.join(outpath, 'setup.py')
opts = {
'name': self.name(),
'distname': self.distributionName(),
'version': self.version(),
'author': self.author(),
'author_email': self.authorEmail(),
'keywords': self.keywords(),
'license': self.license(),
'brief': self.brief(),
'description': self.description(),
'url': self.companyUrl()
}
wrap_dict = lambda x: map(lambda k: "r'{0}': [{1}]".format(k[0],
',\n'.join(wrap_str(k[1]))),
x.items())
opts['dependencies'] = ',\n'.join(wrap_str(self.dependencies()))
opts['classifiers'] = ',\n'.join(wrap_str(self.classifiers()))
if os.path.isfile(self.sourcePath()):
basepath = os.path.normpath(os.path.dirname(self.sourcePath()))
else:
basepath = os.path.normpath(self.sourcePath())
self.generatePlugins(basepath)
exts = set()
for root, folders, files in os.walk(basepath):
for file_ in files:
_, ext = os.path.splitext(file_)
if ext not in ('.py', '.pyc', '.pyo'):
exts.add('*' + ext)
exts = list(exts)
text = templ.SETUPFILE.format(**opts)
# generate the file
if not os.path.exists(outfile):
f = open(outfile, 'w')
f.write(text)
f.close()
# generate the manifest file
manfile = os.path.join(outpath, 'MANIFEST.in')
if not os.path.exists(manfile):
f = open(manfile, 'w')
f.write('include *.md *.txt *.ini *.cfg *.rst\n')
f.write('recursive-include {0} {1}\n'.format(self.name(), ' '.join(exts)))
f.close()
# generate the egg
if egg:
cmd = 'cd {0} && $PYTHON setup.py bdist_egg'.format(outpath)
cmd = os.path.expandvars(cmd)
cmdexec(cmd)
|
Generates the setup file for this builder.
|
def validate_transaction_schema(tx):
"""Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
if tx['operation'] == 'TRANSFER':
_validate_schema(TX_SCHEMA_TRANSFER, tx)
else:
_validate_schema(TX_SCHEMA_CREATE, tx)
|
Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
|
def recover_all(lbn, profile='default'):
'''
Set the all the workers in lbn to recover and activate them if they are not
CLI Examples:
.. code-block:: bash
salt '*' modjk.recover_all loadbalancer1
salt '*' modjk.recover_all loadbalancer1 other-profile
'''
ret = {}
config = get_running(profile)
try:
workers_ = config['worker.{0}.balance_workers'.format(lbn)].split(',')
except KeyError:
return ret
for worker in workers_:
curr_state = worker_status(worker, profile)
if curr_state['activation'] != 'ACT':
worker_activate(worker, lbn, profile)
if not curr_state['state'].startswith('OK'):
worker_recover(worker, lbn, profile)
ret[worker] = worker_status(worker, profile)
return ret
|
Set the all the workers in lbn to recover and activate them if they are not
CLI Examples:
.. code-block:: bash
salt '*' modjk.recover_all loadbalancer1
salt '*' modjk.recover_all loadbalancer1 other-profile
|
def admin(self, server=None):
"""
Get the admin information.
Optional arguments:
* server=None - Get admin information for -
server instead of the current server.
"""
with self.lock:
if not server:
self.send('ADMIN')
else:
self.send('ADMIN %s' % server)
rvalue = []
while self.readable():
admin_ncodes = '257', '258', '259'
msg = self._recv(expected_replies=('256',) + admin_ncodes)
if msg[0] == '256':
pass
elif msg[0] in admin_ncodes:
rvalue.append(' '.join(msg[2:])[1:])
return rvalue
|
Get the admin information.
Optional arguments:
* server=None - Get admin information for -
server instead of the current server.
|
def _empty_value(self, formattype):
'''
returns default empty value
:param formattype:
:param buff:
:param start:
:param end:
'''
if formattype.value.idx <= FormatType.BIN_32.value.idx: # @UndefinedVariable
return b''
elif formattype.value.idx <= FormatType.FIXSTR.value.idx: # @UndefinedVariable
return ''
elif formattype.value.idx <= FormatType.INT_64.value.idx: # @UndefinedVariable
return 0
elif formattype.value.idx <= FormatType.UINT_64.value.idx: # @UndefinedVariable
return 0
elif(formattype is FormatType.FLOAT_32):
return float(0)
elif(formattype is FormatType.FLOAT_64):
return float(0)
|
returns default empty value
:param formattype:
:param buff:
:param start:
:param end:
|
def load(filename, **kwargs):
"""
Loads the given t7 file using default settings; kwargs are forwarded
to `T7Reader`.
"""
with open(filename, 'rb') as f:
reader = T7Reader(f, **kwargs)
return reader.read_obj()
|
Loads the given t7 file using default settings; kwargs are forwarded
to `T7Reader`.
|
def public_ip_address_create_or_update(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Create or update a public IP address within a specified resource group.
:param name: The name of the public IP address to create.
:param resource_group: The resource group name assigned to the
public IP address.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_address_create_or_update test-ip-0 testgroup
'''
if 'location' not in kwargs:
rg_props = __salt__['azurearm_resource.resource_group_get'](
resource_group, **kwargs
)
if 'error' in rg_props:
log.error(
'Unable to determine location from resource group specified.'
)
return False
kwargs['location'] = rg_props['location']
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
pub_ip_model = __utils__['azurearm.create_object_model']('network', 'PublicIPAddress', **kwargs)
except TypeError as exc:
result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
return result
try:
ip = netconn.public_ip_addresses.create_or_update(
resource_group_name=resource_group,
public_ip_address_name=name,
parameters=pub_ip_model
)
ip.wait()
ip_result = ip.result()
result = ip_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
except SerializationError as exc:
result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
return result
|
.. versionadded:: 2019.2.0
Create or update a public IP address within a specified resource group.
:param name: The name of the public IP address to create.
:param resource_group: The resource group name assigned to the
public IP address.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_address_create_or_update test-ip-0 testgroup
|
def backward_sampling(self, M, linear_cost=False, return_ar=False):
"""Generate trajectories using the FFBS (forward filtering backward
sampling) algorithm.
Arguments
---------
M: int
number of trajectories we want to generate
linear_cost: bool
if set to True, the O(N) version is used, see below.
return_ar: bool (default=False)
if set to True, change the output, see below.
Returns
-------
paths: a list of ndarrays
paths[t][n] is component t of trajectory m.
ar: float
the overall acceptance rate of the rejection procedure
Notes
-----
1. if ``linear_cost=False``, complexity is O(TMN); i.e. O(TN^2) for M=N;
if ``linear_cost=True``, complexity is O(T(M+N)), i.e. O(TN) for M=N.
This requires that model has method `upper_bound_trans`, which
provides the log of a constant C_t such that
:math:`p_t(x_t|x_{t-1}) \leq C_t`.
2. main output is ``paths``, a list of T arrays such that
``paths[t][m]`` is component t of trajectory m.
3. if ``linear_cost=True`` and ``return_ar=True``, output is tuple
``(paths, ar)``, where ``paths`` is as above, and ``ar`` is the overall
acceptance rate (of the rejection steps that choose the ancestors);
otherwise output is simply ``paths``.
"""
idx = np.empty((self.T, M), dtype=int)
idx[-1, :] = rs.multinomial(self.wgt[-1].W, M=M)
if linear_cost:
ar = self._backward_sampling_ON(M, idx)
else:
self._backward_sampling_ON2(M, idx)
# When M=1, we want a list of states, not a list of arrays containing
# one state
if M == 1:
idx = idx.squeeze(axis=1)
paths = [self.X[t][idx[t]] for t in range(self.T)]
if linear_cost and return_ar:
return (paths, ar)
else:
return paths
|
Generate trajectories using the FFBS (forward filtering backward
sampling) algorithm.
Arguments
---------
M: int
number of trajectories we want to generate
linear_cost: bool
if set to True, the O(N) version is used, see below.
return_ar: bool (default=False)
if set to True, change the output, see below.
Returns
-------
paths: a list of ndarrays
paths[t][n] is component t of trajectory m.
ar: float
the overall acceptance rate of the rejection procedure
Notes
-----
1. if ``linear_cost=False``, complexity is O(TMN); i.e. O(TN^2) for M=N;
if ``linear_cost=True``, complexity is O(T(M+N)), i.e. O(TN) for M=N.
This requires that model has method `upper_bound_trans`, which
provides the log of a constant C_t such that
:math:`p_t(x_t|x_{t-1}) \leq C_t`.
2. main output is ``paths``, a list of T arrays such that
``paths[t][m]`` is component t of trajectory m.
3. if ``linear_cost=True`` and ``return_ar=True``, output is tuple
``(paths, ar)``, where ``paths`` is as above, and ``ar`` is the overall
acceptance rate (of the rejection steps that choose the ancestors);
otherwise output is simply ``paths``.
|
def compare(molecules, ensemble_lookup, options):
"""
compare stuff
:param molecules:
:param ensemble_lookup:
:param options:
:return:
"""
print(" Analyzing differences ... ")
print('')
sort_order = classification.get_sort_order(molecules)
ensemble1 = sorted(ensemble_lookup.keys())[0]
ensemble2 = sorted(ensemble_lookup.keys())[1]
stats = {}
stats['header'] = [' ']
name = os.path.basename(ensemble1).replace('.csv', '')
stats['header'].append(name)
name = os.path.basename(ensemble2).replace('.csv', '')
stats['header'].append(name)
stats['header'].append('Difference')
stats['header'].append('95% CI')
stats['header'].append('p-value')
molecules1 = copy.deepcopy(molecules)
molecules2 = copy.deepcopy(molecules)
score_structure1 = classification.make_score_structure(molecules1, ensemble_lookup[ensemble1])
score_structure2 = classification.make_score_structure(molecules2, ensemble_lookup[ensemble2])
auc_structure_1 = classification.make_auc_structure(score_structure1)
auc_structure_2 = classification.make_auc_structure(score_structure2)
# calculate auc value differences
auc_diff = classification.calculate_auc_diff(auc_structure_1, auc_structure_2, sort_order)
stats['AUC'] = auc_diff
# calculate enrichment factor differences
fpfList = make_fpfList(options)
for fpf in fpfList:
fpf = float(fpf)
ef_structure1 = classification.make_ef_structure(score_structure1, fpf, sort_order)
ef_structure2 = classification.make_ef_structure(score_structure2, fpf, sort_order)
if ef_structure1 and ef_structure2:
ef_diff = classification.calculate_ef_diff(ef_structure1, ef_structure2, fpf)
title = 'E%s' % fpf
stats[title] = ef_diff
# write results summary
output.write_diff_summary(stats, options)
# write roc curves
if options.write_roc:
print(" Writing ROC data ... ")
print('')
output.write_roc(auc_structure_1, ensemble1, options)
output.write_roc(auc_structure_2, ensemble2, options)
# plot
if options.plot:
print(" Making plots ... ")
print('')
plotter(molecules, ensemble_lookup, options)
|
compare stuff
:param molecules:
:param ensemble_lookup:
:param options:
:return:
|
def execute_job(job, app=Injected, task_router=Injected):
# type: (Job, Zsl, TaskRouter) -> dict
"""Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict
"""
app.logger.info("Job fetched, preparing the task '{0}'.".format(job.path))
task, task_callable = task_router.route(job.path)
jc = JobContext(job, task, task_callable)
app.logger.info("Executing task.")
result = jc.task_callable(jc.task_data)
app.logger.info("Task {0} executed successfully.".format(job.path))
return {'task_name': job.path, 'data': result}
|
Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict
|
def load_categories(self, max_pages=30):
"""
Load all WordPress categories from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading categories")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Category.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/categories".format(self.site_id)
params = {"number": 100}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_categories = response.json().get("categories")
if not api_categories:
# we're done here
break
categories = []
for api_category in api_categories:
# if it exists locally, update local version if anything has changed
existing_category = Category.objects.filter(site_id=self.site_id, wp_id=api_category["ID"]).first()
if existing_category:
self.update_existing_category(existing_category, api_category)
else:
categories.append(self.get_new_category(api_category))
if categories:
Category.objects.bulk_create(categories)
elif not self.full:
# we're done here
break
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return
|
Load all WordPress categories from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
|
def p_additional_catches(p):
'''additional_catches : additional_catches CATCH LPAREN fully_qualified_class_name VARIABLE RPAREN LBRACE inner_statement_list RBRACE
| empty'''
if len(p) == 10:
p[0] = p[1] + [ast.Catch(p[4], ast.Variable(p[5], lineno=p.lineno(5)),
p[8], lineno=p.lineno(2))]
else:
p[0] = []
|
additional_catches : additional_catches CATCH LPAREN fully_qualified_class_name VARIABLE RPAREN LBRACE inner_statement_list RBRACE
| empty
|
def warning(self, text):
""" Ajout d'un message de log de type WARN """
self.logger.warning("{}{}".format(self.message_prefix, text))
|
Ajout d'un message de log de type WARN
|
def _packed_data(self):
'''
Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead.
'''
header = self.header()
packed_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((header['number_of_half_frames'], header['half_frame_bytes'])) # create array of half frames
packed_data = packed_data[::-1, constants.header_offset:] # slice out header and flip half frame order to reverse time ordering
packed_data = packed_data.reshape((header['number_of_half_frames']*(header['half_frame_bytes']- constants.header_offset))) # compact into vector
return packed_data
|
Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead.
|
def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
metadata_conventions = getattr(dataset, 'Metadata_Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
accepted_conventions = 'CF-1.6'
test_ctx.assert_true(conventions == accepted_conventions,
'Conventions attribute is missing or is not equal to CF-1.6: {}'.format(conventions))
test_ctx.assert_true(metadata_conventions == 'Unidata Dataset Discovery v1.0',
"Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}".format(metadata_conventions))
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
test_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
test_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
return test_ctx.to_result()
|
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
|
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
|
A more precise algorithm over the original eatiht algorithm
|
def list_devices(names=None, continue_from=None, **kwargs):
"""List devices in settings file and print versions"""
if not names:
names = [device for device, _type in settings.GOLDEN_DEVICES if _type == 'OpenThread']
if continue_from:
continue_from = names.index(continue_from)
else:
continue_from = 0
for port in names[continue_from:]:
try:
with OpenThreadController(port) as otc:
print('%s: %s' % (port, otc.version))
except:
logger.exception('failed to get version of %s' % port)
|
List devices in settings file and print versions
|
def QA_indicator_OSC(DataFrame, N=20, M=6):
"""变动速率线
震荡量指标OSC,也叫变动速率线。属于超买超卖类指标,是从移动平均线原理派生出来的一种分析指标。
它反应当日收盘价与一段时间内平均收盘价的差离值,从而测出股价的震荡幅度。
按照移动平均线原理,根据OSC的值可推断价格的趋势,如果远离平均线,就很可能向平均线回归。
"""
C = DataFrame['close']
OS = (C - MA(C, N)) * 100
MAOSC = EMA(OS, M)
DICT = {'OSC': OS, 'MAOSC': MAOSC}
return pd.DataFrame(DICT)
|
变动速率线
震荡量指标OSC,也叫变动速率线。属于超买超卖类指标,是从移动平均线原理派生出来的一种分析指标。
它反应当日收盘价与一段时间内平均收盘价的差离值,从而测出股价的震荡幅度。
按照移动平均线原理,根据OSC的值可推断价格的趋势,如果远离平均线,就很可能向平均线回归。
|
def setDateTimeEnd(self, dtime):
"""
Sets the endiing date time for this gantt chart.
:param dtime | <QDateTime>
"""
self._dateEnd = dtime.date()
self._timeEnd = dtime.time()
self._allDay = False
|
Sets the endiing date time for this gantt chart.
:param dtime | <QDateTime>
|
def list_objects_access(f):
"""Access to listObjects() controlled by settings.PUBLIC_OBJECT_LIST."""
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
if not django.conf.settings.PUBLIC_OBJECT_LIST:
trusted(request)
return f(request, *args, **kwargs)
return wrapper
|
Access to listObjects() controlled by settings.PUBLIC_OBJECT_LIST.
|
def vertical_scroll(self, image, padding=True):
"""Returns a list of images which appear to scroll from top to bottom
down the input image when displayed on the LED matrix in order.
The input image is not limited to being 8x16. If the input image is
largerthan this, then all rows will be scrolled through but only the
left-most 8 columns of pixels will be displayed.
Keyword arguments:
image -- The image to scroll down.
padding -- If True, the animation will begin with a blank screen and the
input image will scroll into the blank screen one pixel row at a
time. Similarly, after scrolling down the whole input image, the end
of the image will scroll out of a blank screen one row at a time.
If this is not True, then only the input image will be scroll down
without beginning or ending with "whitespace." (Default = True)
"""
image_list = list()
height = image.size[1]
# Scroll into the blank image.
if padding:
for y in range(16):
section = image.crop((0, 0, 8, y))
display_section = self.create_blank_image()
display_section.paste(section, (0, 8 - y, 8, 16))
image_list.append(display_section)
#Scroll across the input image.
for y in range(16, height + 1):
section = image.crop((0, y - 16, 8, y))
display_section = self.create_blank_image()
display_section.paste(section, (0, 0, 8, 16))
image_list.append(display_section)
#Scroll out, leaving the blank image.
if padding:
for y in range(height - 15, height + 1):
section = image.crop((0, y, 8, height))
display_section = self.create_blank_image()
display_section.paste(section, (0, 0, 8, 7 - (y - (height - 15))))
image_list.append(display_section)
#Return the list of images created
return image_list
|
Returns a list of images which appear to scroll from top to bottom
down the input image when displayed on the LED matrix in order.
The input image is not limited to being 8x16. If the input image is
largerthan this, then all rows will be scrolled through but only the
left-most 8 columns of pixels will be displayed.
Keyword arguments:
image -- The image to scroll down.
padding -- If True, the animation will begin with a blank screen and the
input image will scroll into the blank screen one pixel row at a
time. Similarly, after scrolling down the whole input image, the end
of the image will scroll out of a blank screen one row at a time.
If this is not True, then only the input image will be scroll down
without beginning or ending with "whitespace." (Default = True)
|
def list_replace(iterable, src, dst):
"""
Thanks to "EyDu":
http://www.python-forum.de/viewtopic.php?f=1&t=34539 (de)
>>> list_replace([1,2,3], (1,2), "X")
['X', 3]
>>> list_replace([1,2,3,4], (2,3), 9)
[1, 9, 4]
>>> list_replace([1,2,3], (2,), [9,8])
[1, 9, 8, 3]
>>> list_replace([1,2,3,4,5], (2,3,4), "X")
[1, 'X', 5]
>>> list_replace([1,2,3,4,5], (4,5), "X")
[1, 2, 3, 'X']
>>> list_replace([1,2,3,4,5], (1,2), "X")
['X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), "X")
[1, 2, 'X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), ("A","B","C"))
[1, 2, 'A', 'B', 'C', 3, 4, 5]
>>> list_replace((58, 131, 73, 70), (58, 131), 131)
[131, 73, 70]
"""
result=[]
iterable=list(iterable)
try:
dst=list(dst)
except TypeError: # e.g.: int
dst=[dst]
src=list(src)
src_len=len(src)
index = 0
while index < len(iterable):
element = iterable[index:index+src_len]
# print element, src
if element == src:
result += dst
index += src_len
else:
result.append(iterable[index])
index += 1
return result
|
Thanks to "EyDu":
http://www.python-forum.de/viewtopic.php?f=1&t=34539 (de)
>>> list_replace([1,2,3], (1,2), "X")
['X', 3]
>>> list_replace([1,2,3,4], (2,3), 9)
[1, 9, 4]
>>> list_replace([1,2,3], (2,), [9,8])
[1, 9, 8, 3]
>>> list_replace([1,2,3,4,5], (2,3,4), "X")
[1, 'X', 5]
>>> list_replace([1,2,3,4,5], (4,5), "X")
[1, 2, 3, 'X']
>>> list_replace([1,2,3,4,5], (1,2), "X")
['X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), "X")
[1, 2, 'X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), ("A","B","C"))
[1, 2, 'A', 'B', 'C', 3, 4, 5]
>>> list_replace((58, 131, 73, 70), (58, 131), 131)
[131, 73, 70]
|
def liveReceivers(receivers):
"""Filter sequence of receivers to get resolved, live receivers
This is a generator which will iterate over
the passed sequence, checking for weak references
and resolving them, then returning all live
receivers.
"""
for receiver in receivers:
if isinstance( receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
yield receiver
else:
yield receiver
|
Filter sequence of receivers to get resolved, live receivers
This is a generator which will iterate over
the passed sequence, checking for weak references
and resolving them, then returning all live
receivers.
|
def set_setpoint(self, setpointvalue):
"""Set the setpoint.
Args:
setpointvalue (float): Setpoint [most often in degrees]
"""
_checkSetpointValue( setpointvalue, self.setpoint_max )
self.write_register( 4097, setpointvalue, 1)
|
Set the setpoint.
Args:
setpointvalue (float): Setpoint [most often in degrees]
|
def fmt(self, po_file, mo_file):
"""将 po 文件转换成 mo 文件。
:param string po_file: 待转换的 po 文件路径。
:param string mo_file: 目标 mo 文件的路径。
"""
if not os.path.exists(po_file):
slog.error('The PO file [%s] is non-existen!'%po_file)
return
txt = subprocess.check_output([self._msgfmt,
'--check', "--strict", '--verbose',
"--output-file", mo_file, po_file],
stderr=subprocess.STDOUT,
universal_newlines=True)
slog.info(txt)
|
将 po 文件转换成 mo 文件。
:param string po_file: 待转换的 po 文件路径。
:param string mo_file: 目标 mo 文件的路径。
|
def start(host, port, profiler_stats, dont_start_browser, debug_mode):
"""Starts HTTP server with specified parameters.
Args:
host: Server host name.
port: Server port.
profiler_stats: A dict with collected program stats.
dont_start_browser: Whether to open browser after profiling.
debug_mode: Whether to redirect stderr to /dev/null.
"""
stats_handler = functools.partial(StatsHandler, profiler_stats)
if not debug_mode:
sys.stderr = open(os.devnull, 'w')
print('Starting HTTP server...')
if not dont_start_browser:
webbrowser.open('http://{}:{}/'.format(host, port))
try:
StatsServer((host, port), stats_handler).serve_forever()
except KeyboardInterrupt:
print('Stopping...')
sys.exit(0)
|
Starts HTTP server with specified parameters.
Args:
host: Server host name.
port: Server port.
profiler_stats: A dict with collected program stats.
dont_start_browser: Whether to open browser after profiling.
debug_mode: Whether to redirect stderr to /dev/null.
|
def get_assessments_taken_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentTaken`` objects corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.AssessmentTakenList) - list of
assessments taken
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bins
assessment_taken_list = []
for bank_id in bank_ids:
assessment_taken_list += list(
self.get_assessments_taken_by_bank(bank_id))
return objects.AssessmentTakenList(assessment_taken_list)
|
Gets the list of ``AssessmentTaken`` objects corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.AssessmentTakenList) - list of
assessments taken
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
|
def get_driver(self, desired_capabilities=None):
"""
Creates a Selenium driver on the basis of the configuration file
upon which this object was created.
:param desired_capabilities: Capabilities that the caller
desires to override. This have priority over those
capabilities that are set by the configuration file passed
to the builder.
:type desired_capabilities: class:`dict`
:returns: A driver.
:raises ValueError: When it can't figure out how to create a
browser as specified by the BROWSER
configuration variable.
"""
override_caps = desired_capabilities or {}
desired_capabilities = \
self.config.make_selenium_desired_capabilities()
desired_capabilities.update(override_caps)
browser_string = self.config.browser
chromedriver_version = None
if self.remote:
driver = self.remote_service.build_driver(desired_capabilities)
# There is no equivalent for BrowserStack.
if browser_string == "CHROME" and \
self.remote_service.name == "saucelabs":
chromedriver_version = \
desired_capabilities.get("chromedriver-version", None)
if chromedriver_version is None:
raise ValueError(
"when using Chrome, you must set a "
"``chromedriver-version`` capability so that Selenic "
"can detect which version of Chromedriver will "
"be used.")
else:
if browser_string == "CHROME":
chromedriver_path = self.local_conf["CHROMEDRIVER_PATH"]
driver = webdriver.Chrome(
chromedriver_path,
chrome_options=self.local_conf.get("CHROME_OPTIONS"),
desired_capabilities=desired_capabilities,
service_log_path=self.local_conf["SERVICE_LOG_PATH"],
service_args=self.local_conf.get("SERVICE_ARGS"))
version_line = subprocess.check_output(
[chromedriver_path, "--version"])
version_str = re.match(ur"^ChromeDriver (\d+\.\d+)",
version_line).group(1)
chromedriver_version = StrictVersion(version_str)
elif browser_string == "FIREFOX":
profile = self.local_conf.get("FIREFOX_PROFILE") or \
FirefoxProfile()
binary = self.local_conf.get("FIREFOX_BINARY") or \
FirefoxBinary()
driver = webdriver.Firefox(profile, binary,
capabilities=desired_capabilities)
elif browser_string == "INTERNETEXPLORER":
driver = webdriver.Ie()
elif browser_string == "OPERA":
driver = webdriver.Opera()
else:
# SAFARI
# HTMLUNIT
# HTMLUNITWITHJS
# IPHONE
# IPAD
# ANDROID
# PHANTOMJS
raise ValueError("can't start a local " + browser_string)
# Check that what we get is what the config wanted...
driver_caps = NormalizedCapabilities(driver.desired_capabilities)
browser_version = \
re.sub(r"\..*$", "", driver_caps["browserVersion"])
if driver_caps["platformName"].upper() != self.config.platform:
raise ValueError("the platform you want is not the one "
"you are running selenic on")
if browser_version != self.config.version:
raise ValueError("the version installed is not the one "
"you wanted")
# On BrowserStack we cannot set the version of chromedriver or
# query it. So we make the reasonable assuption that the
# version of chromedriver is greater than 2.13. (There have
# been at least 7 releases after 2.13 at the time of writing.)
if (self.remote_service and
self.remote_service.name == "browserstack") or \
(chromedriver_version is not None and
chromedriver_version > StrictVersion("2.13")):
# We patch ActionChains.
chromedriver_element_center_patch()
# We need to mark the driver as needing the patch.
setattr(driver, CHROMEDRIVER_ELEMENT_CENTER_PATCH_FLAG, True)
driver = self.patch(driver)
return driver
|
Creates a Selenium driver on the basis of the configuration file
upon which this object was created.
:param desired_capabilities: Capabilities that the caller
desires to override. This have priority over those
capabilities that are set by the configuration file passed
to the builder.
:type desired_capabilities: class:`dict`
:returns: A driver.
:raises ValueError: When it can't figure out how to create a
browser as specified by the BROWSER
configuration variable.
|
def update_snapshots(self):
"""Update list of EBS Snapshots for the account / region
Returns:
`None`
"""
self.log.debug('Updating EBSSnapshots for {}/{}'.format(self.account.account_name, self.region))
ec2 = self.session.resource('ec2', region_name=self.region)
try:
existing_snapshots = EBSSnapshot.get_all(self.account, self.region)
snapshots = {x.id: x for x in ec2.snapshots.filter(OwnerIds=[self.account.account_number])}
for data in list(snapshots.values()):
if data.id in existing_snapshots:
snapshot = existing_snapshots[data.id]
if snapshot.update(data):
self.log.debug('Change detected for EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
else:
properties = {
'create_time': data.start_time,
'encrypted': data.encrypted,
'kms_key_id': data.kms_key_id,
'state': data.state,
'state_message': data.state_message,
'volume_id': data.volume_id,
'volume_size': data.volume_size,
}
tags = {t['Key']: t['Value'] for t in data.tags or {}}
snapshot = EBSSnapshot.create(
data.id,
account_id=self.account.account_id,
location=self.region,
properties=properties,
tags=tags
)
self.log.debug('Added new EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
db.session.commit()
vk = set(list(snapshots.keys()))
evk = set(list(existing_snapshots.keys()))
try:
for snapshotID in evk - vk:
db.session.delete(existing_snapshots[snapshotID].resource)
self.log.debug('Deleted EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshotID
))
db.session.commit()
except:
self.log.exception('Failed removing deleted snapshots')
db.session.rollback()
finally:
del ec2
|
Update list of EBS Snapshots for the account / region
Returns:
`None`
|
def humanize_speed(c_per_sec):
"""convert a speed in counts per second to counts per [s, min, h, d], choosing the smallest value greater zero.
"""
scales = [60, 60, 24]
units = ['c/s', 'c/min', 'c/h', 'c/d']
speed = c_per_sec
i = 0
if speed > 0:
while (speed < 1) and (i < len(scales)):
speed *= scales[i]
i += 1
return "{:.1f}{}".format(speed, units[i])
|
convert a speed in counts per second to counts per [s, min, h, d], choosing the smallest value greater zero.
|
def get_response(self, statement=None, **kwargs):
"""
Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict
"""
Statement = self.storage.get_object('statement')
additional_response_selection_parameters = kwargs.pop('additional_response_selection_parameters', {})
persist_values_to_response = kwargs.pop('persist_values_to_response', {})
if isinstance(statement, str):
kwargs['text'] = statement
if isinstance(statement, dict):
kwargs.update(statement)
if statement is None and 'text' not in kwargs:
raise self.ChatBotException(
'Either a statement object or a "text" keyword '
'argument is required. Neither was provided.'
)
if hasattr(statement, 'serialize'):
kwargs.update(**statement.serialize())
tags = kwargs.pop('tags', [])
text = kwargs.pop('text')
input_statement = Statement(text=text, **kwargs)
input_statement.add_tags(*tags)
# Preprocess the input statement
for preprocessor in self.preprocessors:
input_statement = preprocessor(input_statement)
# Make sure the input statement has its search text saved
if not input_statement.search_text:
input_statement.search_text = self.storage.tagger.get_bigram_pair_string(input_statement.text)
if not input_statement.search_in_response_to and input_statement.in_response_to:
input_statement.search_in_response_to = self.storage.tagger.get_bigram_pair_string(input_statement.in_response_to)
response = self.generate_response(input_statement, additional_response_selection_parameters)
# Update any response data that needs to be changed
if persist_values_to_response:
for response_key in persist_values_to_response:
response_value = persist_values_to_response[response_key]
if response_key == 'tags':
input_statement.add_tags(*response_value)
response.add_tags(*response_value)
else:
setattr(input_statement, response_key, response_value)
setattr(response, response_key, response_value)
if not self.read_only:
self.learn_response(input_statement)
# Save the response generated for the input
self.storage.create(**response.serialize())
return response
|
Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict
|
def publish_scene_add(self, scene_id, animation_id, name, color, velocity, config):
"""publish added scene"""
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.scene_add(self.sequence_number, scene_id, animation_id, name, color, velocity, config))
return self.sequence_number
|
publish added scene
|
def replace_each(text, items, count=None, strip=False):
'''
Like ``replace``, where each occurrence in ``items`` is a 2-tuple of
``(old, new)`` pair.
'''
for a,b in items:
text = replace(text, a, b, count=count, strip=strip)
return text
|
Like ``replace``, where each occurrence in ``items`` is a 2-tuple of
``(old, new)`` pair.
|
def set_colors_in_grid(self, some_colors_in_grid):
"""Same as :meth:`set_color_in_grid` but with a collection of
colors in grid.
:param iterable some_colors_in_grid: a collection of colors in grid for
:meth:`set_color_in_grid`
"""
for color_in_grid in some_colors_in_grid:
self._set_pixel_and_convert_color(
color_in_grid.x, color_in_grid.y, color_in_grid.color)
|
Same as :meth:`set_color_in_grid` but with a collection of
colors in grid.
:param iterable some_colors_in_grid: a collection of colors in grid for
:meth:`set_color_in_grid`
|
def parse_geonames_data(lines_iterator):
"""
Parses countries table data from geonames.org, updating or adding records as needed.
currency_symbol is not part of the countries table and is supplemented using the data
obtained from the link provided in the countries table.
:type lines_iterator: collections.iterable
:return: num_updated: int, num_created: int
:raise GeonamesParseError:
"""
data_headers = []
num_created = 0
num_updated = 0
for line in lines_iterator:
line = line.decode()
if line[0] == "#":
if line[0:4] == "#ISO":
data_headers = line.strip('# ').split('\t')
if data_headers != DATA_HEADERS_ORDERED:
raise GeonamesParseError(
"The table headers do not match the expected headers.")
continue
if not data_headers:
raise GeonamesParseError("No table headers found.")
bits = line.split('\t')
data = {DATA_HEADERS_MAP[DATA_HEADERS_ORDERED[x]]: bits[x] for x in range(0, len(bits))}
if 'currency_code' in data and data['currency_code']:
data['currency_symbol'] = CURRENCY_SYMBOLS.get(data['currency_code'])
# Remove empty items
clean_data = {x: y for x, y in data.items() if y}
# Puerto Rico and the Dominican Republic have two phone prefixes in the format "123 and
# 456"
if 'phone' in clean_data:
if 'and' in clean_data['phone']:
clean_data['phone'] = ",".join(re.split('\s*and\s*', clean_data['phone']))
# Avoiding update_or_create to maintain compatibility with Django 1.5
try:
country = Country.objects.get(iso=clean_data['iso'])
created = False
except Country.DoesNotExist:
try:
country = Country.objects.create(**clean_data)
except ValidationError as e:
raise GeonamesParseError("Unexpected field length: %s" % e.message_dict)
created = True
for k, v in six.iteritems(clean_data):
setattr(country, k, v)
try:
country.save()
except ValidationError as e:
raise GeonamesParseError("Unexpected field length: %s" % e.message_dict)
if created:
num_created += 1
else:
num_updated += 1
return num_updated, num_created
|
Parses countries table data from geonames.org, updating or adding records as needed.
currency_symbol is not part of the countries table and is supplemented using the data
obtained from the link provided in the countries table.
:type lines_iterator: collections.iterable
:return: num_updated: int, num_created: int
:raise GeonamesParseError:
|
def get_provider(name, creds):
"""
Generates and memoizes a :class:`~bang.providers.provider.Provider` object
for the given name.
:param str name: The provider name, as given in the config stanza. This
token is used to find the
appropriate :class:`~bang.providers.provider.Provider`.
:param dict creds: The credentials dictionary that is appropriate for the
desired provider. Typically, a sub-dict from the main stack config.
:rtype: :class:`~bang.providers.provider.Provider`
"""
p = _PROVIDERS.get(name)
if not p:
provider = PROVIDER_MAP.get(name)
if not provider:
if name == 'hpcloud':
print "## Warning - 'hpcloud' is not currently supported as" \
"a provider; use hpcloud_v12 or hpcloud_v13. See " \
"release notes."
raise Exception("No provider matches %s; check imports" % name)
p = provider(creds)
_PROVIDERS[name] = p
return p
|
Generates and memoizes a :class:`~bang.providers.provider.Provider` object
for the given name.
:param str name: The provider name, as given in the config stanza. This
token is used to find the
appropriate :class:`~bang.providers.provider.Provider`.
:param dict creds: The credentials dictionary that is appropriate for the
desired provider. Typically, a sub-dict from the main stack config.
:rtype: :class:`~bang.providers.provider.Provider`
|
def get_params_from_func(func: Callable, signature: Signature=None) -> Params:
"""Gets all parameters from a function signature.
:param func: The function to inspect.
:param signature: An inspect.Signature instance.
:returns: A named tuple containing information about all, optional,
required and logic function parameters.
"""
if signature is None:
# Check if the function already parsed the signature
signature = getattr(func, '_doctor_signature', None)
# Otherwise parse the signature
if signature is None:
signature = inspect.signature(func)
# Check if a `req_obj_type` was provided for the function. If so we should
# derrive the parameters from that defined type instead of the signature.
if getattr(func, '_doctor_req_obj_type', None):
annotation = func._doctor_req_obj_type
all_params = list(annotation.properties.keys())
required = annotation.required
optional = list(set(all_params) - set(required))
else:
# Required is a positional argument with no defualt value and it's
# annotation must sub class SuperType. This is so we don't try to
# require parameters passed to a logic function by a decorator that are
# not part of a request.
required = [key for key, p in signature.parameters.items()
if p.default == p.empty and
issubclass(p.annotation, SuperType)]
optional = [key for key, p in signature.parameters.items()
if p.default != p.empty]
all_params = [key for key in signature.parameters.keys()]
# Logic params are all parameters that are part of the logic signature.
logic_params = copy(all_params)
return Params(all_params, required, optional, logic_params)
|
Gets all parameters from a function signature.
:param func: The function to inspect.
:param signature: An inspect.Signature instance.
:returns: A named tuple containing information about all, optional,
required and logic function parameters.
|
def _over_resizer(self, x, y):
"Returns True if mouse is over a resizer"
over_resizer = False
c = self.canvas
ids = c.find_overlapping(x, y, x, y)
if ids:
o = ids[0]
tags = c.gettags(o)
if 'resizer' in tags:
over_resizer = True
return over_resizer
|
Returns True if mouse is over a resizer
|
def query(self, query):
'''Returns a sequence of objects matching criteria expressed in `query`'''
cursor = Cursor(query, self.shard_query_generator(query))
cursor.apply_order() # ordering sharded queries is expensive (no generator)
return cursor
|
Returns a sequence of objects matching criteria expressed in `query`
|
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename):
"""
graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF
"""
maximum_yvalue = -float('inf')
minimum_yvalue = float('inf')
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
# Generate each plot on the graph
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',')
axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label)
axis.legend()
maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count))
minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count))
# Set properties of the plots
axis.yaxis.set_ticks_position('left')
axis.set_xlabel(plots[0].x_label)
axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
axis.set_ylim([minimum_yvalue, maximum_yvalue])
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>')
return True, os.path.join(output_directory, output_filename + '.div')
|
graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF
|
def match_variables(self, pattern, return_type='name'):
''' Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
'''
pattern = re.compile(pattern)
vars_ = [v for v in self.variables.values() if pattern.search(v.name)]
return vars_ if return_type.startswith('var') \
else [v.name for v in vars_]
|
Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
|
def m_seg(p1, p2, rad, dist):
""" move segment by distance
Args:
p1, p2: point(x, y)
rad: relative direction angle(radian)
dist: distance
Return:
translated segment(p1, p2)
"""
v = vector(p1, p2)
m = unit(rotate(v, rad), dist)
return translate(p1, m), translate(p2, m)
|
move segment by distance
Args:
p1, p2: point(x, y)
rad: relative direction angle(radian)
dist: distance
Return:
translated segment(p1, p2)
|
def _resolved_type(self):
"""Return the type for the columns, and a flag to indicate that the
column has codes."""
import datetime
self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None
for test, testf in tests + [(None, None)]}
# If it is more than 5% str, it's a str
try:
if self.type_ratios.get(text_type,0) + self.type_ratios.get(binary_type,0) > .05:
if self.type_counts[text_type] > 0:
return text_type, False
elif self.type_counts[binary_type] > 0:
return binary_type, False
except TypeError as e:
# This is probably the result of the type being unknown
pass
if self.type_counts[datetime.datetime] > 0:
num_type = datetime.datetime
elif self.type_counts[datetime.date] > 0:
num_type = datetime.date
elif self.type_counts[datetime.time] > 0:
num_type = datetime.time
elif self.type_counts[float] > 0:
num_type = float
elif self.type_counts[int] > 0:
num_type = int
elif self.type_counts[text_type] > 0:
num_type = text_type
elif self.type_counts[binary_type] > 0:
num_type = binary_type
else:
num_type = unknown
if self.type_counts[binary_type] > 0 and num_type != binary_type:
has_codes = True
else:
has_codes = False
return num_type, has_codes
|
Return the type for the columns, and a flag to indicate that the
column has codes.
|
def _secondary_min(self):
"""Getter for the minimum series value"""
return (
self.secondary_range[0]
if (self.secondary_range
and self.secondary_range[0] is not None) else
(min(self._secondary_values) if self._secondary_values else None)
)
|
Getter for the minimum series value
|
def swap_deployment(self, service_name, production, source_deployment):
'''
Initiates a virtual IP swap between the staging and production
deployment environments for a service. If the service is currently
running in the staging environment, it will be swapped to the
production environment. If it is running in the production
environment, it will be swapped to staging.
service_name:
Name of the hosted service.
production:
The name of the production deployment.
source_deployment:
The name of the source deployment.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('production', production)
_validate_not_none('source_deployment', source_deployment)
return self._perform_post(self._get_hosted_service_path(service_name),
_XmlSerializer.swap_deployment_to_xml(
production, source_deployment),
as_async=True)
|
Initiates a virtual IP swap between the staging and production
deployment environments for a service. If the service is currently
running in the staging environment, it will be swapped to the
production environment. If it is running in the production
environment, it will be swapped to staging.
service_name:
Name of the hosted service.
production:
The name of the production deployment.
source_deployment:
The name of the source deployment.
|
def model(self):
"""Initialize and cache MigrationHistory model."""
MigrateHistory._meta.database = self.database
MigrateHistory._meta.table_name = self.migrate_table
MigrateHistory._meta.schema = self.schema
MigrateHistory.create_table(True)
return MigrateHistory
|
Initialize and cache MigrationHistory model.
|
def decryption(self, ciphertext, key):
"""
Builds a single cycle AES Decryption circuit
:param WireVector ciphertext: data to decrypt
:param WireVector key: AES key to use to encrypt (AES is symmetric)
:return: a WireVector containing the plaintext
"""
if len(ciphertext) != self._key_len:
raise pyrtl.PyrtlError("Ciphertext length is invalid")
if len(key) != self._key_len:
raise pyrtl.PyrtlError("key length is invalid")
key_list = self._key_gen(key)
t = self._add_round_key(ciphertext, key_list[10])
for round in range(1, 11):
t = self._inv_shift_rows(t)
t = self._sub_bytes(t, True)
t = self._add_round_key(t, key_list[10 - round])
if round != 10:
t = self._mix_columns(t, True)
return t
|
Builds a single cycle AES Decryption circuit
:param WireVector ciphertext: data to decrypt
:param WireVector key: AES key to use to encrypt (AES is symmetric)
:return: a WireVector containing the plaintext
|
def search(self, search_phrase, limit=None):
""" Finds datasets by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of DatasetSearchResult instances.
"""
query, query_params = self._make_query_from_terms(search_phrase, limit=limit)
self._parsed_query = (str(query), query_params)
assert isinstance(query, TextClause)
datasets = {}
def make_result(vid=None, b_score=0, p_score=0):
res = DatasetSearchResult()
res.b_score = b_score
res.p_score = p_score
res.partitions = set()
res.vid = vid
return res
if query_params:
results = self.execute(query, **query_params)
for result in results:
vid, dataset_score = result
datasets[vid] = make_result(vid, b_score=dataset_score)
logger.debug('Extending datasets with partitions.')
for partition in self.backend.partition_index.search(search_phrase):
if partition.dataset_vid not in datasets:
datasets[partition.dataset_vid] = make_result(partition.dataset_vid)
datasets[partition.dataset_vid].p_score += partition.score
datasets[partition.dataset_vid].partitions.add(partition)
return list(datasets.values())
|
Finds datasets by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of DatasetSearchResult instances.
|
def _scale_tile(self, value, width, height):
"""Return the prescaled tile if already exists, otherwise scale and store it."""
try:
return self._scale_cache[value, width, height]
except KeyError:
tile = pygame.transform.smoothscale(self.tiles[value], (width, height))
self._scale_cache[value, width, height] = tile
return tile
|
Return the prescaled tile if already exists, otherwise scale and store it.
|
def parseFilteringOptions(cls, args, filterRead=None, storeQueryIds=False):
"""
Parse command line options (added in C{addSAMFilteringOptions}.
@param args: The command line arguments, as returned by
C{argparse.parse_args}.
@param filterRead: A one-argument function that accepts a read
and returns C{None} if the read should be omitted in filtering
or else a C{Read} instance.
@param storeQueryIds: If C{True}, query ids will be stored as the
SAM/BAM file is read.
@return: A C{SAMFilter} instance.
"""
referenceIds = (set(chain.from_iterable(args.referenceId))
if args.referenceId else None)
return cls(
args.samfile,
filterRead=filterRead,
referenceIds=referenceIds,
storeQueryIds=storeQueryIds,
dropUnmapped=args.dropUnmapped,
dropSecondary=args.dropSecondary,
dropSupplementary=args.dropSupplementary,
dropDuplicates=args.dropDuplicates,
keepQCFailures=args.keepQCFailures,
minScore=args.minScore,
maxScore=args.maxScore)
|
Parse command line options (added in C{addSAMFilteringOptions}.
@param args: The command line arguments, as returned by
C{argparse.parse_args}.
@param filterRead: A one-argument function that accepts a read
and returns C{None} if the read should be omitted in filtering
or else a C{Read} instance.
@param storeQueryIds: If C{True}, query ids will be stored as the
SAM/BAM file is read.
@return: A C{SAMFilter} instance.
|
def union(input, **params):
"""
Union transformation
:param input:
:param params:
:return:
"""
res = []
for col in input:
res.extend(input[col])
return res
|
Union transformation
:param input:
:param params:
:return:
|
def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j
|
cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
|
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
|
[CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
|
def updateRole(self, *args, **kwargs):
"""
Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the role's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
|
Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the role's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.