max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
wemake_python_styleguide/logic/variables.py | cmppoon/wemake-python-styleguide | 1 | 6625151 | <filename>wemake_python_styleguide/logic/variables.py
# -*- coding: utf-8 -*-
import ast
from typing import Union
VarDefinition = Union[ast.AST, ast.expr]
def _is_valid_single(node: VarDefinition) -> bool:
if isinstance(node, ast.Name):
return True
if isinstance(node, ast.Starred) and isinstance(node.value, ast.Name):
return True
return False
def is_valid_block_variable_definition(node: VarDefinition) -> bool:
"""Is used to check either block variables are correctly defined."""
if isinstance(node, ast.Tuple):
for var_definition in node.elts:
if not _is_valid_single(var_definition):
return False
return True
return _is_valid_single(node)
| <filename>wemake_python_styleguide/logic/variables.py
# -*- coding: utf-8 -*-
import ast
from typing import Union
VarDefinition = Union[ast.AST, ast.expr]
def _is_valid_single(node: VarDefinition) -> bool:
if isinstance(node, ast.Name):
return True
if isinstance(node, ast.Starred) and isinstance(node.value, ast.Name):
return True
return False
def is_valid_block_variable_definition(node: VarDefinition) -> bool:
"""Is used to check either block variables are correctly defined."""
if isinstance(node, ast.Tuple):
for var_definition in node.elts:
if not _is_valid_single(var_definition):
return False
return True
return _is_valid_single(node)
| en | 0.813389 | # -*- coding: utf-8 -*- Is used to check either block variables are correctly defined. | 3.271555 | 3 |
sklearn_pandas/dataframe_mapper.py | govorunov/sklearn-pandas | 0 | 6625152 | import contextlib
from datetime import datetime
import pandas as pd
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, TransformerMixin
from .cross_validation import DataWrapper
from .pipeline import make_transformer_pipeline, _call_fit, TransformerPipeline
from . import logger
string_types = text_type = str
def _handle_feature(fea):
"""
Convert 1-dimensional arrays to 2-dimensional column vectors.
"""
if len(fea.shape) == 1:
fea = np.array([fea]).T
return fea
def _build_transformer(transformers):
if isinstance(transformers, list):
transformers = make_transformer_pipeline(*transformers)
return transformers
def _build_feature(columns, transformers, options={}, X=None):
if X is None:
return (columns, _build_transformer(transformers), options)
return (
columns(X) if callable(columns) else columns,
_build_transformer(transformers),
options
)
def _elapsed_secs(t1):
return (datetime.now()-t1).total_seconds()
def _get_feature_names(estimator):
"""
Attempt to extract feature names based on a given estimator
"""
if hasattr(estimator, 'classes_'):
return estimator.classes_
elif hasattr(estimator, 'get_feature_names'):
return estimator.get_feature_names()
return None
@contextlib.contextmanager
def add_column_names_to_exception(column_names):
# Stolen from https://stackoverflow.com/a/17677938/356729
try:
yield
except Exception as ex:
if ex.args:
msg = u'{}: {}'.format(column_names, ex.args[0])
else:
msg = text_type(column_names)
ex.args = (msg,) + ex.args[1:]
raise
class DataFrameMapper(BaseEstimator, TransformerMixin):
"""
Map Pandas data frame column subsets to their own
sklearn transformation.
"""
def __init__(self, features, default=False, sparse=False, df_out=False,
input_df=False, drop_cols=None):
"""
Params:
features a list of tuples with features definitions.
The first element is the pandas column selector. This can
be a string (for one column) or a list of strings.
The second element is an object that supports
sklearn's transform interface, or a list of such objects
The third element is optional and, if present, must be
a dictionary with the options to apply to the
transformation. Example: {'alias': 'day_of_week'}
default default transformer to apply to the columns not
explicitly selected in the mapper. If False (default),
discard them. If None, pass them through untouched. Any
other transformer will be applied to all the unselected
columns as a whole, taken as a 2d-array.
sparse will return sparse matrix if set True and any of the
extracted features is sparse. Defaults to False.
df_out return a pandas data frame, with each column named using
the pandas column that created it (if there's only one
input and output) or the input columns joined with '_'
if there's multiple inputs, and the name concatenated with
'_1', '_2' etc if there's multiple outputs. NB: does not
work if *default* or *sparse* are true
input_df If ``True`` pass the selected columns to the transformers
as a pandas DataFrame or Series. Otherwise pass them as a
numpy array. Defaults to ``False``.
drop_cols List of columns to be dropped. Defaults to None.
"""
self.features = features
self.default = default
self.built_default = None
self.sparse = sparse
self.df_out = df_out
self.input_df = input_df
self.drop_cols = [] if drop_cols is None else drop_cols
self.transformed_names_ = []
if (df_out and (sparse or default)):
raise ValueError("Can not use df_out with sparse or default")
def _build(self, X=None):
"""
Build attributes built_features and built_default.
"""
if isinstance(self.features, list):
self.built_features = [
_build_feature(*f, X=X) for f in self.features
]
else:
self.built_features = _build_feature(*self.features, X=X)
self.built_default = _build_transformer(self.default)
@property
def _selected_columns(self):
"""
Return a set of selected columns in the feature list.
"""
selected_columns = set()
for feature in self.features:
columns = feature[0]
if isinstance(columns, list):
selected_columns = selected_columns.union(set(columns))
else:
selected_columns.add(columns)
return selected_columns
def _unselected_columns(self, X):
"""
Return list of columns present in X and not selected explicitly in the
mapper.
Unselected columns are returned in the order they appear in the
dataframe to avoid issues with different ordering during default fit
and transform steps.
"""
X_columns = list(X.columns)
return [column for column in X_columns if
column not in self._selected_columns
and column not in self.drop_cols]
def __setstate__(self, state):
# compatibility for older versions of sklearn-pandas
super().__setstate__(state)
self.features = [_build_feature(*feat) for feat in state['features']]
self.sparse = state.get('sparse', False)
self.default = state.get('default', False)
self.df_out = state.get('df_out', False)
self.input_df = state.get('input_df', False)
self.drop_cols = state.get('drop_cols', [])
self.built_features = state.get('built_features', self.features)
self.built_default = state.get('built_default', self.default)
self.transformed_names_ = state.get('transformed_names_', [])
def __getstate__(self):
state = super().__getstate__()
state['features'] = self.features
state['sparse'] = self.sparse
state['default'] = self.default
state['df_out'] = self.df_out
state['input_df'] = self.input_df
state['drop_cols'] = self.drop_cols
state['build_features'] = getattr(self, 'built_features', None)
state['built_default'] = self.built_default
state['transformed_names_'] = self.transformed_names_
return state
def _get_col_subset(self, X, cols, input_df=False):
"""
Get a subset of columns from the given table X.
X a Pandas dataframe; the table to select columns from
cols a string or list of strings representing the columns to select.
It can also be a callable that returns True or False, i.e.
compatible with the built-in filter function.
Returns a numpy array with the data from the selected columns
"""
if isinstance(cols, string_types):
return_vector = True
cols = [cols]
else:
return_vector = False
# Needed when using the cross-validation compatibility
# layer for sklearn<0.16.0.
# Will be dropped on sklearn-pandas 2.0.
if isinstance(X, list):
X = [x[cols] for x in X]
X = pd.DataFrame(X)
elif isinstance(X, DataWrapper):
X = X.df # fetch underlying data
if return_vector:
t = X[cols[0]]
else:
t = X[cols]
# return either a DataFrame/Series or a numpy array
if input_df:
return t
else:
return t.values
def fit(self, X, y=None):
"""
Fit a transformation from the pipeline
X the data to fit
y the target vector relative to X, optional
"""
self._build(X=X)
for columns, transformers, options in self.built_features:
t1 = datetime.now()
input_df = options.get('input_df', self.input_df)
if transformers is not None:
with add_column_names_to_exception(columns):
Xt = self._get_col_subset(X, columns, input_df)
_call_fit(transformers.fit, Xt, y)
logger.info(f"[FIT] {columns}: {_elapsed_secs(t1)} secs")
# handle features not explicitly selected
if self.built_default: # not False and not None
unsel_cols = self._unselected_columns(X)
with add_column_names_to_exception(unsel_cols):
Xt = self._get_col_subset(X, unsel_cols, self.input_df)
_call_fit(self.built_default.fit, Xt, y)
return self
def get_names(self, columns, transformer, x, alias=None, prefix='',
suffix=''):
"""
Return verbose names for the transformed columns.
columns name (or list of names) of the original column(s)
transformer transformer - can be a TransformerPipeline
x transformed columns (numpy.ndarray)
alias base name to use for the selected columns
"""
if alias is not None:
name = alias
elif isinstance(columns, list):
name = '_'.join(map(str, columns))
else:
name = columns
num_cols = x.shape[1] if len(x.shape) > 1 else 1
output = []
if num_cols > 1:
# If there are as many columns as classes in the transformer,
# infer column names from classes names.
# If we are dealing with multiple transformers for these columns
# attempt to extract the names from each of them, starting from the
# last one
if isinstance(transformer, TransformerPipeline):
inverse_steps = transformer.steps[::-1]
estimators = (estimator for name, estimator in inverse_steps)
names_steps = (_get_feature_names(e) for e in estimators)
names = next((n for n in names_steps if n is not None), None)
# Otherwise use the only estimator present
else:
names = _get_feature_names(transformer)
if names is not None and len(names) == num_cols:
output = [f"{name}_{o}" for o in names]
# otherwise, return name concatenated with '_1', '_2', etc.
else:
output = [name + '_' + str(o) for o in range(num_cols)]
else:
output = [name]
if prefix == suffix == "":
return output
return ['{}{}{}'.format(prefix, x, suffix) for x in output]
def get_dtypes(self, extracted):
dtypes_features = [self.get_dtype(ex) for ex in extracted]
return [dtype for dtype_feature in dtypes_features
for dtype in dtype_feature]
def get_dtype(self, ex):
if isinstance(ex, np.ndarray) or sparse.issparse(ex):
return [ex.dtype] * ex.shape[1]
elif isinstance(ex, pd.DataFrame):
return list(ex.dtypes)
else:
raise TypeError(type(ex))
def _transform(self, X, y=None, do_fit=False):
"""
Transform the given data with possibility to fit in advance.
Avoids code duplication for implementation of transform and
fit_transform.
"""
if do_fit:
self._build(X=X)
extracted = []
transformed_names_ = []
for columns, transformers, options in self.built_features:
input_df = options.get('input_df', self.input_df)
# columns could be a string or list of
# strings; we don't care because pandas
# will handle either.
Xt = self._get_col_subset(X, columns, input_df)
if transformers is not None:
with add_column_names_to_exception(columns):
if do_fit and hasattr(transformers, 'fit_transform'):
t1 = datetime.now()
Xt = _call_fit(transformers.fit_transform, Xt, y)
logger.info(f"[FIT_TRANSFORM] {columns}: {_elapsed_secs(t1)} secs") # NOQA
else:
if do_fit:
t1 = datetime.now()
_call_fit(transformers.fit, Xt, y)
logger.info(
f"[FIT] {columns}: {_elapsed_secs(t1)} secs")
t1 = datetime.now()
Xt = transformers.transform(Xt)
logger.info(f"[TRANSFORM] {columns}: {_elapsed_secs(t1)} secs") # NOQA
extracted.append(_handle_feature(Xt))
alias = options.get('alias')
prefix = options.get('prefix', '')
suffix = options.get('suffix', '')
transformed_names_ += self.get_names(
columns, transformers, Xt, alias, prefix, suffix)
# handle features not explicitly selected
if self.built_default is not False:
unsel_cols = self._unselected_columns(X)
Xt = self._get_col_subset(X, unsel_cols, self.input_df)
if self.built_default is not None:
with add_column_names_to_exception(unsel_cols):
if do_fit and hasattr(self.built_default, 'fit_transform'):
Xt = _call_fit(self.built_default.fit_transform, Xt, y)
else:
if do_fit:
_call_fit(self.built_default.fit, Xt, y)
Xt = self.built_default.transform(Xt)
transformed_names_ += self.get_names(
unsel_cols, self.built_default, Xt)
else:
# if not applying a default transformer,
# keep column names unmodified
transformed_names_ += unsel_cols
extracted.append(_handle_feature(Xt))
self.transformed_names_ = transformed_names_
# combine the feature outputs into one array.
# at this point we lose track of which features
# were created from which input columns, so it's
# assumed that that doesn't matter to the model.
#
# actually we lose track of columns during transformation
# stage as generally n_input_features != n_transformed_features
if self.df_out: # Output as pandas DataFrame
# output different data types, if appropriate
dtypes = self.get_dtypes(extracted)
extracted_df = [
pd.DataFrame(data).astype(dtype)
for data, dtype in zip(extracted, dtypes)
]
col_names = ['_'.join(feature[0])
for feature in self.built_features]
df_out = pd.concat(extracted_df, keys=col_names, axis=1)
# if no rows were dropped preserve the original index,
# otherwise use a new integer one
if len(X) == len(df_out): # No rows dropped
df_out.index = X.index
return df_out
else: # Output as Numpy or sparse array
# If any of the extracted features is sparse, combine sparsely.
# Otherwise, combine as normal arrays.
if any(sparse.issparse(fea) for fea in extracted):
stacked = sparse.hstack(extracted).tocsr()
# return a sparse matrix only if the mapper was initialized
# with sparse=True
if not self.sparse:
stacked = stacked.toarray()
else:
stacked = np.hstack(extracted)
return stacked
def transform(self, X):
"""
Transform the given data. Assumes that fit has already been called.
X the data to transform
"""
return self._transform(X)
def fit_transform(self, X, y=None):
"""
Fit a transformation from the pipeline and directly apply
it to the given data.
X the data to fit
y the target vector relative to X, optional
"""
return self._transform(X, y, True)
| import contextlib
from datetime import datetime
import pandas as pd
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, TransformerMixin
from .cross_validation import DataWrapper
from .pipeline import make_transformer_pipeline, _call_fit, TransformerPipeline
from . import logger
string_types = text_type = str
def _handle_feature(fea):
"""
Convert 1-dimensional arrays to 2-dimensional column vectors.
"""
if len(fea.shape) == 1:
fea = np.array([fea]).T
return fea
def _build_transformer(transformers):
if isinstance(transformers, list):
transformers = make_transformer_pipeline(*transformers)
return transformers
def _build_feature(columns, transformers, options={}, X=None):
if X is None:
return (columns, _build_transformer(transformers), options)
return (
columns(X) if callable(columns) else columns,
_build_transformer(transformers),
options
)
def _elapsed_secs(t1):
return (datetime.now()-t1).total_seconds()
def _get_feature_names(estimator):
"""
Attempt to extract feature names based on a given estimator
"""
if hasattr(estimator, 'classes_'):
return estimator.classes_
elif hasattr(estimator, 'get_feature_names'):
return estimator.get_feature_names()
return None
@contextlib.contextmanager
def add_column_names_to_exception(column_names):
# Stolen from https://stackoverflow.com/a/17677938/356729
try:
yield
except Exception as ex:
if ex.args:
msg = u'{}: {}'.format(column_names, ex.args[0])
else:
msg = text_type(column_names)
ex.args = (msg,) + ex.args[1:]
raise
class DataFrameMapper(BaseEstimator, TransformerMixin):
"""
Map Pandas data frame column subsets to their own
sklearn transformation.
"""
def __init__(self, features, default=False, sparse=False, df_out=False,
input_df=False, drop_cols=None):
"""
Params:
features a list of tuples with features definitions.
The first element is the pandas column selector. This can
be a string (for one column) or a list of strings.
The second element is an object that supports
sklearn's transform interface, or a list of such objects
The third element is optional and, if present, must be
a dictionary with the options to apply to the
transformation. Example: {'alias': 'day_of_week'}
default default transformer to apply to the columns not
explicitly selected in the mapper. If False (default),
discard them. If None, pass them through untouched. Any
other transformer will be applied to all the unselected
columns as a whole, taken as a 2d-array.
sparse will return sparse matrix if set True and any of the
extracted features is sparse. Defaults to False.
df_out return a pandas data frame, with each column named using
the pandas column that created it (if there's only one
input and output) or the input columns joined with '_'
if there's multiple inputs, and the name concatenated with
'_1', '_2' etc if there's multiple outputs. NB: does not
work if *default* or *sparse* are true
input_df If ``True`` pass the selected columns to the transformers
as a pandas DataFrame or Series. Otherwise pass them as a
numpy array. Defaults to ``False``.
drop_cols List of columns to be dropped. Defaults to None.
"""
self.features = features
self.default = default
self.built_default = None
self.sparse = sparse
self.df_out = df_out
self.input_df = input_df
self.drop_cols = [] if drop_cols is None else drop_cols
self.transformed_names_ = []
if (df_out and (sparse or default)):
raise ValueError("Can not use df_out with sparse or default")
def _build(self, X=None):
"""
Build attributes built_features and built_default.
"""
if isinstance(self.features, list):
self.built_features = [
_build_feature(*f, X=X) for f in self.features
]
else:
self.built_features = _build_feature(*self.features, X=X)
self.built_default = _build_transformer(self.default)
@property
def _selected_columns(self):
"""
Return a set of selected columns in the feature list.
"""
selected_columns = set()
for feature in self.features:
columns = feature[0]
if isinstance(columns, list):
selected_columns = selected_columns.union(set(columns))
else:
selected_columns.add(columns)
return selected_columns
def _unselected_columns(self, X):
"""
Return list of columns present in X and not selected explicitly in the
mapper.
Unselected columns are returned in the order they appear in the
dataframe to avoid issues with different ordering during default fit
and transform steps.
"""
X_columns = list(X.columns)
return [column for column in X_columns if
column not in self._selected_columns
and column not in self.drop_cols]
def __setstate__(self, state):
# compatibility for older versions of sklearn-pandas
super().__setstate__(state)
self.features = [_build_feature(*feat) for feat in state['features']]
self.sparse = state.get('sparse', False)
self.default = state.get('default', False)
self.df_out = state.get('df_out', False)
self.input_df = state.get('input_df', False)
self.drop_cols = state.get('drop_cols', [])
self.built_features = state.get('built_features', self.features)
self.built_default = state.get('built_default', self.default)
self.transformed_names_ = state.get('transformed_names_', [])
def __getstate__(self):
state = super().__getstate__()
state['features'] = self.features
state['sparse'] = self.sparse
state['default'] = self.default
state['df_out'] = self.df_out
state['input_df'] = self.input_df
state['drop_cols'] = self.drop_cols
state['build_features'] = getattr(self, 'built_features', None)
state['built_default'] = self.built_default
state['transformed_names_'] = self.transformed_names_
return state
def _get_col_subset(self, X, cols, input_df=False):
"""
Get a subset of columns from the given table X.
X a Pandas dataframe; the table to select columns from
cols a string or list of strings representing the columns to select.
It can also be a callable that returns True or False, i.e.
compatible with the built-in filter function.
Returns a numpy array with the data from the selected columns
"""
if isinstance(cols, string_types):
return_vector = True
cols = [cols]
else:
return_vector = False
# Needed when using the cross-validation compatibility
# layer for sklearn<0.16.0.
# Will be dropped on sklearn-pandas 2.0.
if isinstance(X, list):
X = [x[cols] for x in X]
X = pd.DataFrame(X)
elif isinstance(X, DataWrapper):
X = X.df # fetch underlying data
if return_vector:
t = X[cols[0]]
else:
t = X[cols]
# return either a DataFrame/Series or a numpy array
if input_df:
return t
else:
return t.values
def fit(self, X, y=None):
"""
Fit a transformation from the pipeline
X the data to fit
y the target vector relative to X, optional
"""
self._build(X=X)
for columns, transformers, options in self.built_features:
t1 = datetime.now()
input_df = options.get('input_df', self.input_df)
if transformers is not None:
with add_column_names_to_exception(columns):
Xt = self._get_col_subset(X, columns, input_df)
_call_fit(transformers.fit, Xt, y)
logger.info(f"[FIT] {columns}: {_elapsed_secs(t1)} secs")
# handle features not explicitly selected
if self.built_default: # not False and not None
unsel_cols = self._unselected_columns(X)
with add_column_names_to_exception(unsel_cols):
Xt = self._get_col_subset(X, unsel_cols, self.input_df)
_call_fit(self.built_default.fit, Xt, y)
return self
def get_names(self, columns, transformer, x, alias=None, prefix='',
suffix=''):
"""
Return verbose names for the transformed columns.
columns name (or list of names) of the original column(s)
transformer transformer - can be a TransformerPipeline
x transformed columns (numpy.ndarray)
alias base name to use for the selected columns
"""
if alias is not None:
name = alias
elif isinstance(columns, list):
name = '_'.join(map(str, columns))
else:
name = columns
num_cols = x.shape[1] if len(x.shape) > 1 else 1
output = []
if num_cols > 1:
# If there are as many columns as classes in the transformer,
# infer column names from classes names.
# If we are dealing with multiple transformers for these columns
# attempt to extract the names from each of them, starting from the
# last one
if isinstance(transformer, TransformerPipeline):
inverse_steps = transformer.steps[::-1]
estimators = (estimator for name, estimator in inverse_steps)
names_steps = (_get_feature_names(e) for e in estimators)
names = next((n for n in names_steps if n is not None), None)
# Otherwise use the only estimator present
else:
names = _get_feature_names(transformer)
if names is not None and len(names) == num_cols:
output = [f"{name}_{o}" for o in names]
# otherwise, return name concatenated with '_1', '_2', etc.
else:
output = [name + '_' + str(o) for o in range(num_cols)]
else:
output = [name]
if prefix == suffix == "":
return output
return ['{}{}{}'.format(prefix, x, suffix) for x in output]
def get_dtypes(self, extracted):
dtypes_features = [self.get_dtype(ex) for ex in extracted]
return [dtype for dtype_feature in dtypes_features
for dtype in dtype_feature]
def get_dtype(self, ex):
if isinstance(ex, np.ndarray) or sparse.issparse(ex):
return [ex.dtype] * ex.shape[1]
elif isinstance(ex, pd.DataFrame):
return list(ex.dtypes)
else:
raise TypeError(type(ex))
def _transform(self, X, y=None, do_fit=False):
"""
Transform the given data with possibility to fit in advance.
Avoids code duplication for implementation of transform and
fit_transform.
"""
if do_fit:
self._build(X=X)
extracted = []
transformed_names_ = []
for columns, transformers, options in self.built_features:
input_df = options.get('input_df', self.input_df)
# columns could be a string or list of
# strings; we don't care because pandas
# will handle either.
Xt = self._get_col_subset(X, columns, input_df)
if transformers is not None:
with add_column_names_to_exception(columns):
if do_fit and hasattr(transformers, 'fit_transform'):
t1 = datetime.now()
Xt = _call_fit(transformers.fit_transform, Xt, y)
logger.info(f"[FIT_TRANSFORM] {columns}: {_elapsed_secs(t1)} secs") # NOQA
else:
if do_fit:
t1 = datetime.now()
_call_fit(transformers.fit, Xt, y)
logger.info(
f"[FIT] {columns}: {_elapsed_secs(t1)} secs")
t1 = datetime.now()
Xt = transformers.transform(Xt)
logger.info(f"[TRANSFORM] {columns}: {_elapsed_secs(t1)} secs") # NOQA
extracted.append(_handle_feature(Xt))
alias = options.get('alias')
prefix = options.get('prefix', '')
suffix = options.get('suffix', '')
transformed_names_ += self.get_names(
columns, transformers, Xt, alias, prefix, suffix)
# handle features not explicitly selected
if self.built_default is not False:
unsel_cols = self._unselected_columns(X)
Xt = self._get_col_subset(X, unsel_cols, self.input_df)
if self.built_default is not None:
with add_column_names_to_exception(unsel_cols):
if do_fit and hasattr(self.built_default, 'fit_transform'):
Xt = _call_fit(self.built_default.fit_transform, Xt, y)
else:
if do_fit:
_call_fit(self.built_default.fit, Xt, y)
Xt = self.built_default.transform(Xt)
transformed_names_ += self.get_names(
unsel_cols, self.built_default, Xt)
else:
# if not applying a default transformer,
# keep column names unmodified
transformed_names_ += unsel_cols
extracted.append(_handle_feature(Xt))
self.transformed_names_ = transformed_names_
# combine the feature outputs into one array.
# at this point we lose track of which features
# were created from which input columns, so it's
# assumed that that doesn't matter to the model.
#
# actually we lose track of columns during transformation
# stage as generally n_input_features != n_transformed_features
if self.df_out: # Output as pandas DataFrame
# output different data types, if appropriate
dtypes = self.get_dtypes(extracted)
extracted_df = [
pd.DataFrame(data).astype(dtype)
for data, dtype in zip(extracted, dtypes)
]
col_names = ['_'.join(feature[0])
for feature in self.built_features]
df_out = pd.concat(extracted_df, keys=col_names, axis=1)
# if no rows were dropped preserve the original index,
# otherwise use a new integer one
if len(X) == len(df_out): # No rows dropped
df_out.index = X.index
return df_out
else: # Output as Numpy or sparse array
# If any of the extracted features is sparse, combine sparsely.
# Otherwise, combine as normal arrays.
if any(sparse.issparse(fea) for fea in extracted):
stacked = sparse.hstack(extracted).tocsr()
# return a sparse matrix only if the mapper was initialized
# with sparse=True
if not self.sparse:
stacked = stacked.toarray()
else:
stacked = np.hstack(extracted)
return stacked
def transform(self, X):
"""
Transform the given data. Assumes that fit has already been called.
X the data to transform
"""
return self._transform(X)
def fit_transform(self, X, y=None):
"""
Fit a transformation from the pipeline and directly apply
it to the given data.
X the data to fit
y the target vector relative to X, optional
"""
return self._transform(X, y, True)
| en | 0.817855 | Convert 1-dimensional arrays to 2-dimensional column vectors. Attempt to extract feature names based on a given estimator # Stolen from https://stackoverflow.com/a/17677938/356729 Map Pandas data frame column subsets to their own sklearn transformation. Params: features a list of tuples with features definitions. The first element is the pandas column selector. This can be a string (for one column) or a list of strings. The second element is an object that supports sklearn's transform interface, or a list of such objects The third element is optional and, if present, must be a dictionary with the options to apply to the transformation. Example: {'alias': 'day_of_week'} default default transformer to apply to the columns not explicitly selected in the mapper. If False (default), discard them. If None, pass them through untouched. Any other transformer will be applied to all the unselected columns as a whole, taken as a 2d-array. sparse will return sparse matrix if set True and any of the extracted features is sparse. Defaults to False. df_out return a pandas data frame, with each column named using the pandas column that created it (if there's only one input and output) or the input columns joined with '_' if there's multiple inputs, and the name concatenated with '_1', '_2' etc if there's multiple outputs. NB: does not work if *default* or *sparse* are true input_df If ``True`` pass the selected columns to the transformers as a pandas DataFrame or Series. Otherwise pass them as a numpy array. Defaults to ``False``. drop_cols List of columns to be dropped. Defaults to None. Build attributes built_features and built_default. Return a set of selected columns in the feature list. Return list of columns present in X and not selected explicitly in the mapper. Unselected columns are returned in the order they appear in the dataframe to avoid issues with different ordering during default fit and transform steps. # compatibility for older versions of sklearn-pandas Get a subset of columns from the given table X. X a Pandas dataframe; the table to select columns from cols a string or list of strings representing the columns to select. It can also be a callable that returns True or False, i.e. compatible with the built-in filter function. Returns a numpy array with the data from the selected columns # Needed when using the cross-validation compatibility # layer for sklearn<0.16.0. # Will be dropped on sklearn-pandas 2.0. # fetch underlying data # return either a DataFrame/Series or a numpy array Fit a transformation from the pipeline X the data to fit y the target vector relative to X, optional # handle features not explicitly selected # not False and not None Return verbose names for the transformed columns. columns name (or list of names) of the original column(s) transformer transformer - can be a TransformerPipeline x transformed columns (numpy.ndarray) alias base name to use for the selected columns # If there are as many columns as classes in the transformer, # infer column names from classes names. # If we are dealing with multiple transformers for these columns # attempt to extract the names from each of them, starting from the # last one # Otherwise use the only estimator present # otherwise, return name concatenated with '_1', '_2', etc. Transform the given data with possibility to fit in advance. Avoids code duplication for implementation of transform and fit_transform. # columns could be a string or list of # strings; we don't care because pandas # will handle either. # NOQA # NOQA # handle features not explicitly selected # if not applying a default transformer, # keep column names unmodified # combine the feature outputs into one array. # at this point we lose track of which features # were created from which input columns, so it's # assumed that that doesn't matter to the model. # # actually we lose track of columns during transformation # stage as generally n_input_features != n_transformed_features # Output as pandas DataFrame # output different data types, if appropriate # if no rows were dropped preserve the original index, # otherwise use a new integer one # No rows dropped # Output as Numpy or sparse array # If any of the extracted features is sparse, combine sparsely. # Otherwise, combine as normal arrays. # return a sparse matrix only if the mapper was initialized # with sparse=True Transform the given data. Assumes that fit has already been called. X the data to transform Fit a transformation from the pipeline and directly apply it to the given data. X the data to fit y the target vector relative to X, optional | 2.419929 | 2 |
src/trydjango/product/models.py | nickk2002/django-web-site | 0 | 6625153 | from django.db import models
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length = 10)
description = models.TextField(blank = False,null=True)
| from django.db import models
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length = 10)
description = models.TextField(blank = False,null=True)
| en | 0.963489 | # Create your models here. | 2.232291 | 2 |
fastapi_users/router/auth.py | eltociear/fastapi-users | 0 | 6625154 | from fastapi import APIRouter, Depends, HTTPException, Response, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi_users import models
from fastapi_users.authentication import Authenticator, BaseAuthentication
from fastapi_users.manager import BaseUserManager, UserManagerDependency
from fastapi_users.router.common import ErrorCode
def get_auth_router(
backend: BaseAuthentication,
get_user_manager: UserManagerDependency[models.UC, models.UD],
authenticator: Authenticator,
requires_verification: bool = False,
) -> APIRouter:
"""Generate a router with login/logout routes for an authentication backend."""
router = APIRouter()
get_current_user = authenticator.current_user(
active=True, verified=requires_verification
)
@router.post("/login", name="auth:login")
async def login(
response: Response,
credentials: OAuth2PasswordRequestForm = Depends(),
user_manager: BaseUserManager[models.UC, models.UD] = Depends(get_user_manager),
):
user = await user_manager.authenticate(credentials)
if user is None or not user.is_active:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.LOGIN_BAD_CREDENTIALS,
)
if requires_verification and not user.is_verified:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.LOGIN_USER_NOT_VERIFIED,
)
return await backend.get_login_response(user, response, user_manager)
if backend.logout:
@router.post("/logout", name="auth:logout")
async def logout(
response: Response,
user=Depends(get_current_user),
user_manager: BaseUserManager[models.UC, models.UD] = Depends(
get_user_manager
),
):
return await backend.get_logout_response(user, response, user_manager)
return router
| from fastapi import APIRouter, Depends, HTTPException, Response, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi_users import models
from fastapi_users.authentication import Authenticator, BaseAuthentication
from fastapi_users.manager import BaseUserManager, UserManagerDependency
from fastapi_users.router.common import ErrorCode
def get_auth_router(
backend: BaseAuthentication,
get_user_manager: UserManagerDependency[models.UC, models.UD],
authenticator: Authenticator,
requires_verification: bool = False,
) -> APIRouter:
"""Generate a router with login/logout routes for an authentication backend."""
router = APIRouter()
get_current_user = authenticator.current_user(
active=True, verified=requires_verification
)
@router.post("/login", name="auth:login")
async def login(
response: Response,
credentials: OAuth2PasswordRequestForm = Depends(),
user_manager: BaseUserManager[models.UC, models.UD] = Depends(get_user_manager),
):
user = await user_manager.authenticate(credentials)
if user is None or not user.is_active:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.LOGIN_BAD_CREDENTIALS,
)
if requires_verification and not user.is_verified:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.LOGIN_USER_NOT_VERIFIED,
)
return await backend.get_login_response(user, response, user_manager)
if backend.logout:
@router.post("/logout", name="auth:logout")
async def logout(
response: Response,
user=Depends(get_current_user),
user_manager: BaseUserManager[models.UC, models.UD] = Depends(
get_user_manager
),
):
return await backend.get_logout_response(user, response, user_manager)
return router
| en | 0.751958 | Generate a router with login/logout routes for an authentication backend. | 2.582062 | 3 |
Classy.Classifier/Classy.Classifier.ClassifierWrapper/classifier/views.py | undeadspez/classy | 0 | 6625155 | from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from rest_framework.decorators import api_view
from classifier.logic import process_and_classify, IMAGE_SIZE
@api_view(['GET'])
def test5(request):
"""
# Endpoint description:
## Params:
`arg0::int`
## Returns:
`arg0 + 5::int`
"""
return HttpResponse(int(request.GET['arg0']) + 5)
@api_view(['GET'])
def get_image_size(request):
"""
# Endpoint description:
## Params:
## Returns:
JSON containing image spatial characteristics. Only important for `classify_multiple` with `scale == false(0)`.
**content-type**: application/json
**example**:
```
{
"width": 224,
"height": 224,
"channels": 3
}
```
"""
return JsonResponse({
"width": IMAGE_SIZE[0],
"height": IMAGE_SIZE[1],
"channels": 3
})
@api_view(['POST'])
def classify_multiple(request):
"""
# Endpoint description:
## Params:
`scale::Union[bool, int]`: Should be either `true/1` or `false/0` (case-insensitive). If `true/1`,
indicates that the images sent for classification are already of needed size and server should not resize them.
## Request Body:
Receives multiple image files in form-data. Keys are not important.
**content-type**: multipart/form-data
## Returns:
JSON containing a class predicted for each image.
**content-type**: application/json
**special status codes**:
- 422: Uprocessable entity, image file is corrupted/cannot be read. Returns the filename of the image file.
**example**:
```
{
"cat_picture.jpg": "Cat',
"bicycle_picture.jpg": "Mountain bike"
}
```
"""
scale_arg = request.GET.get('scale', "True").lower()
if scale_arg in ("true", "1"):
should_scale = True
elif scale_arg in ("false", "0"):
should_scale = False
else:
return HttpResponseBadRequest("scale should be one of [true, 1, false, 0] (case-insensitive)\n"
+ "Yours was {}".format(scale_arg))
image_files = request.FILES.values()
response = process_and_classify(image_files, should_scale)
return response
@api_view(['POST'])
def classify_single(request):
"""
# Endpoint description:
## Params:
## Request Body:
Receives a single image file in form-data with the key `"image"`.
**content-type**: multipart/form-data
## Returns:
JSON containing a class predicted for the image.
**content-type**: application/json
**special status codes**:
- 422: Uprocessable entity, image file is corrupted/cannot be read. Returns the filename of the image file.
**example**:
```
{
"dog_picture.jpg": "Dog',
}
```
"""
image_file = request.FILES.get('image', None)
if image_file is None:
return HttpResponseBadRequest("<image> field not found in form.")
response = process_and_classify([image_file], True)
return response
| from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from rest_framework.decorators import api_view
from classifier.logic import process_and_classify, IMAGE_SIZE
@api_view(['GET'])
def test5(request):
"""
# Endpoint description:
## Params:
`arg0::int`
## Returns:
`arg0 + 5::int`
"""
return HttpResponse(int(request.GET['arg0']) + 5)
@api_view(['GET'])
def get_image_size(request):
"""
# Endpoint description:
## Params:
## Returns:
JSON containing image spatial characteristics. Only important for `classify_multiple` with `scale == false(0)`.
**content-type**: application/json
**example**:
```
{
"width": 224,
"height": 224,
"channels": 3
}
```
"""
return JsonResponse({
"width": IMAGE_SIZE[0],
"height": IMAGE_SIZE[1],
"channels": 3
})
@api_view(['POST'])
def classify_multiple(request):
"""
# Endpoint description:
## Params:
`scale::Union[bool, int]`: Should be either `true/1` or `false/0` (case-insensitive). If `true/1`,
indicates that the images sent for classification are already of needed size and server should not resize them.
## Request Body:
Receives multiple image files in form-data. Keys are not important.
**content-type**: multipart/form-data
## Returns:
JSON containing a class predicted for each image.
**content-type**: application/json
**special status codes**:
- 422: Uprocessable entity, image file is corrupted/cannot be read. Returns the filename of the image file.
**example**:
```
{
"cat_picture.jpg": "Cat',
"bicycle_picture.jpg": "Mountain bike"
}
```
"""
scale_arg = request.GET.get('scale', "True").lower()
if scale_arg in ("true", "1"):
should_scale = True
elif scale_arg in ("false", "0"):
should_scale = False
else:
return HttpResponseBadRequest("scale should be one of [true, 1, false, 0] (case-insensitive)\n"
+ "Yours was {}".format(scale_arg))
image_files = request.FILES.values()
response = process_and_classify(image_files, should_scale)
return response
@api_view(['POST'])
def classify_single(request):
"""
# Endpoint description:
## Params:
## Request Body:
Receives a single image file in form-data with the key `"image"`.
**content-type**: multipart/form-data
## Returns:
JSON containing a class predicted for the image.
**content-type**: application/json
**special status codes**:
- 422: Uprocessable entity, image file is corrupted/cannot be read. Returns the filename of the image file.
**example**:
```
{
"dog_picture.jpg": "Dog',
}
```
"""
image_file = request.FILES.get('image', None)
if image_file is None:
return HttpResponseBadRequest("<image> field not found in form.")
response = process_and_classify([image_file], True)
return response
| en | 0.63829 | # Endpoint description: ## Params: `arg0::int` ## Returns: `arg0 + 5::int` # Endpoint description: ## Params: ## Returns: JSON containing image spatial characteristics. Only important for `classify_multiple` with `scale == false(0)`. **content-type**: application/json **example**: ``` { "width": 224, "height": 224, "channels": 3 } ``` # Endpoint description: ## Params: `scale::Union[bool, int]`: Should be either `true/1` or `false/0` (case-insensitive). If `true/1`, indicates that the images sent for classification are already of needed size and server should not resize them. ## Request Body: Receives multiple image files in form-data. Keys are not important. **content-type**: multipart/form-data ## Returns: JSON containing a class predicted for each image. **content-type**: application/json **special status codes**: - 422: Uprocessable entity, image file is corrupted/cannot be read. Returns the filename of the image file. **example**: ``` { "cat_picture.jpg": "Cat', "bicycle_picture.jpg": "Mountain bike" } ``` # Endpoint description: ## Params: ## Request Body: Receives a single image file in form-data with the key `"image"`. **content-type**: multipart/form-data ## Returns: JSON containing a class predicted for the image. **content-type**: application/json **special status codes**: - 422: Uprocessable entity, image file is corrupted/cannot be read. Returns the filename of the image file. **example**: ``` { "dog_picture.jpg": "Dog', } ``` | 2.40412 | 2 |
airac/__init__.py | scls19fr/python-airac | 1 | 6625156 | <filename>airac/__init__.py
import datetime
AIRAC_DELAY = {
"publication_date_major_changes": datetime.timedelta(days=56),
"publication_date_normal": datetime.timedelta(days=42),
"latest_delivery_date": datetime.timedelta(days=28),
"cut_off_date": datetime.timedelta(days=20),
"fms_data_production": datetime.timedelta(days=15),
"delivery_to_operator": datetime.timedelta(days=7),
}
AIRAC_INTERVAL = datetime.timedelta(days=28)
AIRAC_INITIAL_DATE = datetime.date(2015, 1, 8)
date = datetime.date.today()
def airac_date(date=datetime.date.today()):
if date >= AIRAC_INITIAL_DATE:
diff_cycle = (date - AIRAC_INITIAL_DATE).days // AIRAC_INTERVAL.days
else:
diff_cycle = -((AIRAC_INITIAL_DATE - date).days // AIRAC_INTERVAL.days + 1)
return AIRAC_INITIAL_DATE + diff_cycle * AIRAC_INTERVAL
def airac_first_cycle_date(year):
return airac_date(datetime.date(year - 1, 12, 31)) + AIRAC_INTERVAL
def airac_last_cycle_date(year):
return airac_date(datetime.date(year, 12, 31))
def airac_cycle_dates(year):
start = airac_first_cycle_date(year)
stop = airac_last_cycle_date(year)
while start <= stop:
yield start
start += AIRAC_INTERVAL
def number_airac_cycles(year):
return len(list(airac_cycle_dates(year)))
def airac_cycle_tuple(date=datetime.date.today()):
date = airac_date(date)
airac_year = date.year
cycle = (date - airac_first_cycle_date(airac_year)).days // AIRAC_INTERVAL.days + 1
return (airac_year, cycle)
def airac_cycle(year, cycle):
return (year - 2000) * 100 + cycle
def airac_cycle_ident(date=datetime.date.today()):
t = airac_cycle_tuple(date)
return airac_cycle(t[0], t[1])
class Airac:
def __init__(self, date=datetime.date.today()):
self.date = airac_date(date)
airac_year, cycle = airac_cycle_tuple(date)
ident = airac_cycle(airac_year, cycle)
self.year = airac_year
self.cycle = cycle
self.ident = ident
def __repr__(self) -> str:
return "<Airac %s %s>" % (self.ident, self.date)
def __hash__(self) -> int:
return hash(self.date)
def __eq__(self, o: object) -> bool:
return hash(self) == hash(o)
def __lt__(self, o: object) -> bool:
return self.date < o.date
def __le__(self, o: object) -> bool:
return self.date <= o.date
def __gt__(self, o: object) -> bool:
return self.date > o.date
def __ge__(self, o: object) -> bool:
return self.date >= o.date
@staticmethod
def from_year(year):
return Airac(airac_first_cycle_date(year))
def move(self, cycles_number):
return Airac(self.date + cycles_number * AIRAC_INTERVAL)
def next(self):
return self.move(1)
def previous(self):
return self.move(-1)
@staticmethod
def from_ident(ident):
ident = int(ident)
cycle = ident % 100
airac_year = 2000 + ident // 100
date = airac_first_cycle_date(airac_year) + (cycle - 1) * AIRAC_INTERVAL
if date.year != airac_year:
raise ValueError("can't parse Airac ident %s" % ident)
return Airac(date)
| <filename>airac/__init__.py
import datetime
AIRAC_DELAY = {
"publication_date_major_changes": datetime.timedelta(days=56),
"publication_date_normal": datetime.timedelta(days=42),
"latest_delivery_date": datetime.timedelta(days=28),
"cut_off_date": datetime.timedelta(days=20),
"fms_data_production": datetime.timedelta(days=15),
"delivery_to_operator": datetime.timedelta(days=7),
}
AIRAC_INTERVAL = datetime.timedelta(days=28)
AIRAC_INITIAL_DATE = datetime.date(2015, 1, 8)
date = datetime.date.today()
def airac_date(date=datetime.date.today()):
if date >= AIRAC_INITIAL_DATE:
diff_cycle = (date - AIRAC_INITIAL_DATE).days // AIRAC_INTERVAL.days
else:
diff_cycle = -((AIRAC_INITIAL_DATE - date).days // AIRAC_INTERVAL.days + 1)
return AIRAC_INITIAL_DATE + diff_cycle * AIRAC_INTERVAL
def airac_first_cycle_date(year):
return airac_date(datetime.date(year - 1, 12, 31)) + AIRAC_INTERVAL
def airac_last_cycle_date(year):
return airac_date(datetime.date(year, 12, 31))
def airac_cycle_dates(year):
start = airac_first_cycle_date(year)
stop = airac_last_cycle_date(year)
while start <= stop:
yield start
start += AIRAC_INTERVAL
def number_airac_cycles(year):
return len(list(airac_cycle_dates(year)))
def airac_cycle_tuple(date=datetime.date.today()):
date = airac_date(date)
airac_year = date.year
cycle = (date - airac_first_cycle_date(airac_year)).days // AIRAC_INTERVAL.days + 1
return (airac_year, cycle)
def airac_cycle(year, cycle):
return (year - 2000) * 100 + cycle
def airac_cycle_ident(date=datetime.date.today()):
t = airac_cycle_tuple(date)
return airac_cycle(t[0], t[1])
class Airac:
def __init__(self, date=datetime.date.today()):
self.date = airac_date(date)
airac_year, cycle = airac_cycle_tuple(date)
ident = airac_cycle(airac_year, cycle)
self.year = airac_year
self.cycle = cycle
self.ident = ident
def __repr__(self) -> str:
return "<Airac %s %s>" % (self.ident, self.date)
def __hash__(self) -> int:
return hash(self.date)
def __eq__(self, o: object) -> bool:
return hash(self) == hash(o)
def __lt__(self, o: object) -> bool:
return self.date < o.date
def __le__(self, o: object) -> bool:
return self.date <= o.date
def __gt__(self, o: object) -> bool:
return self.date > o.date
def __ge__(self, o: object) -> bool:
return self.date >= o.date
@staticmethod
def from_year(year):
return Airac(airac_first_cycle_date(year))
def move(self, cycles_number):
return Airac(self.date + cycles_number * AIRAC_INTERVAL)
def next(self):
return self.move(1)
def previous(self):
return self.move(-1)
@staticmethod
def from_ident(ident):
ident = int(ident)
cycle = ident % 100
airac_year = 2000 + ident // 100
date = airac_first_cycle_date(airac_year) + (cycle - 1) * AIRAC_INTERVAL
if date.year != airac_year:
raise ValueError("can't parse Airac ident %s" % ident)
return Airac(date)
| none | 1 | 2.654766 | 3 | |
functions/signs_and_figures.py | EUFAR/asmm-eufar | 0 | 6625157 | from reportlab.platypus import Flowable
class tick(Flowable):
def __init__(self, w1, h1, s, t, w, color='black'):
Flowable.__init__(self)
self.w1 = w1
self.h1 = h1
self.s = s
self.t = t
self.w = w
self.color = color
def draw(self):
s = float(self.s)
self.canv.rect(self.w1, self.h1, self.w1 + s, self.h1 - s)
if self.t == 1:
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.w)
self.canv.line(self.w1+(s*0.15), self.h1-(s*0.35), self.w1+(s*0.35), self.h1-(s*0.15))
self.canv.line(self.w1+(s*0.35), self.h1-(s*0.15), self.w1+(s*0.85), self.h1-(s*0.85))
class tick_2(Flowable):
def __init__(self, x1, y1, x2, y2, t, w, color='black'):
Flowable.__init__(self)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.t = t
self.w = w
self.d1 = float(abs(x1 - x2))
self.d2 = float(abs(y1 - y2))
self.color = color
def draw(self):
self.canv.rect(self.x1, self.y1, self.x2, self.y2)
if self.t == 1:
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.w)
self.canv.line(self.x1 + (self.d1 * 0.15), self.y1 + (self.d2 * 0.55),
self.x1 + (self.d1 * 0.35), self.y1 + (self.d2 * 0.25))
self.canv.line(self.x1 + (self.d1 * 0.35), self.y1 + (self.d2 * 0.25),
self.x1 + (self.d1 * 0.85), self.y1 + (self.d2 * 1.05))
class line(Flowable):
def __init__(self, w1, h1, w2, h2, color):
Flowable.__init__(self)
self.w1 = w1
self.h1 = h1
self.w2 = w2
self.h2 = h2
self.color = color
def draw(self):
self.canv.setStrokeColor(self.color)
self.canv.line(self.w1, self.h1, self.w2, self.h2)
class square(Flowable):
def __init__(self, w1, h1, w2, h2, s, color):
Flowable.__init__(self)
self.w1 = w1
self.h1 = h1
self.w2 = w2
self.h2 = h2
self.s = s
self.color = color
def draw(self):
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.s)
self.canv.rect(self.w1, self.h1, self.w2, self.h2)
class semi_square(Flowable):
def __init__(self, wo, ho, wup, hup, wl, hl, s, color):
Flowable.__init__(self)
self.wo = wo
self.ho = ho
self.wup = wup
self.hup = hup
self.wl = wl
self.hl = hl
self.s = s
self.color = color
def draw(self):
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.s)
self.canv.line(self.wo, self.ho, self.wup, self.hup)
self.canv.line(self.wo, self.ho, self.wl, self.hl)
| from reportlab.platypus import Flowable
class tick(Flowable):
def __init__(self, w1, h1, s, t, w, color='black'):
Flowable.__init__(self)
self.w1 = w1
self.h1 = h1
self.s = s
self.t = t
self.w = w
self.color = color
def draw(self):
s = float(self.s)
self.canv.rect(self.w1, self.h1, self.w1 + s, self.h1 - s)
if self.t == 1:
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.w)
self.canv.line(self.w1+(s*0.15), self.h1-(s*0.35), self.w1+(s*0.35), self.h1-(s*0.15))
self.canv.line(self.w1+(s*0.35), self.h1-(s*0.15), self.w1+(s*0.85), self.h1-(s*0.85))
class tick_2(Flowable):
def __init__(self, x1, y1, x2, y2, t, w, color='black'):
Flowable.__init__(self)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.t = t
self.w = w
self.d1 = float(abs(x1 - x2))
self.d2 = float(abs(y1 - y2))
self.color = color
def draw(self):
self.canv.rect(self.x1, self.y1, self.x2, self.y2)
if self.t == 1:
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.w)
self.canv.line(self.x1 + (self.d1 * 0.15), self.y1 + (self.d2 * 0.55),
self.x1 + (self.d1 * 0.35), self.y1 + (self.d2 * 0.25))
self.canv.line(self.x1 + (self.d1 * 0.35), self.y1 + (self.d2 * 0.25),
self.x1 + (self.d1 * 0.85), self.y1 + (self.d2 * 1.05))
class line(Flowable):
def __init__(self, w1, h1, w2, h2, color):
Flowable.__init__(self)
self.w1 = w1
self.h1 = h1
self.w2 = w2
self.h2 = h2
self.color = color
def draw(self):
self.canv.setStrokeColor(self.color)
self.canv.line(self.w1, self.h1, self.w2, self.h2)
class square(Flowable):
def __init__(self, w1, h1, w2, h2, s, color):
Flowable.__init__(self)
self.w1 = w1
self.h1 = h1
self.w2 = w2
self.h2 = h2
self.s = s
self.color = color
def draw(self):
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.s)
self.canv.rect(self.w1, self.h1, self.w2, self.h2)
class semi_square(Flowable):
def __init__(self, wo, ho, wup, hup, wl, hl, s, color):
Flowable.__init__(self)
self.wo = wo
self.ho = ho
self.wup = wup
self.hup = hup
self.wl = wl
self.hl = hl
self.s = s
self.color = color
def draw(self):
self.canv.setStrokeColor(self.color)
self.canv.setLineWidth(self.s)
self.canv.line(self.wo, self.ho, self.wup, self.hup)
self.canv.line(self.wo, self.ho, self.wl, self.hl)
| none | 1 | 2.981061 | 3 | |
www/sitemaps.py | eyolfson/eyolfson.com | 0 | 6625158 | <reponame>eyolfson/eyolfson.com
from django.contrib import sitemaps
from django.urls import reverse
class StaticViewSitemap(sitemaps.Sitemap):
protocol = 'https'
def items(self):
return ['index']
def location(self, item):
return reverse(item)
| from django.contrib import sitemaps
from django.urls import reverse
class StaticViewSitemap(sitemaps.Sitemap):
protocol = 'https'
def items(self):
return ['index']
def location(self, item):
return reverse(item) | none | 1 | 1.999149 | 2 | |
src/darknet53.py | Tshzzz/jinnan_yolo_baseline | 24 | 6625159 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: tshzzz
"""
import torch
import torch.nn as nn
import numpy as np
from src.layers import conv_block,residual_block
from src.utils import load_conv_bn
class darknet53(nn.Module):
def __init__(self, in_planes=3):
super(darknet53, self).__init__()
self.conv1 = conv_block(in_planes, 32, 3)
self.conv2 = conv_block(32, 64, 3, stride=2, pad=1)
self.block1 = residual_block(64, 64)
self.conv3 = conv_block(64, 128, 3, stride=2, pad=1)
self.block2 = nn.ModuleList()
self.block2.append(residual_block(128, 128))
self.block2.append(residual_block(128, 128))
self.conv4 = conv_block(128, 256, 3, stride=2, pad=1)
self.block3 = nn.ModuleList()
for i in range(8):
self.block3.append(residual_block(256, 256))
self.conv5 = conv_block(256, 512, 3, stride=2, pad=1)
self.block4 = nn.ModuleList()
for i in range(8):
self.block4.append(residual_block(512, 512))
self.conv6 = conv_block(512, 1024, 3, stride=2, pad=1)
self.block5 = nn.ModuleList()
for i in range(4):
self.block5.append(residual_block(1024, 1024))
def load_part(self, buf, start, part):
for idx, m in enumerate(part.modules()):
if isinstance(m, nn.Conv2d):
conv = m
if isinstance(m, nn.BatchNorm2d):
bn = m
start = load_conv_bn(buf, start, conv, bn)
return start
def load_weight(self, weight_file):
if weight_file is not None:
print("Load pretrained models !")
fp = open(weight_file, 'rb')
header = np.fromfile(fp, count=5, dtype=np.int32)
header = torch.from_numpy(header)
buf = np.fromfile(fp, dtype=np.float32)
start = 0
start = self.load_part(buf, start, self.conv1)
start = self.load_part(buf, start, self.conv2)
start = self.load_part(buf, start, self.block1)
start = self.load_part(buf, start, self.conv3)
start = self.load_part(buf, start, self.block2)
start = self.load_part(buf, start, self.conv4)
start = self.load_part(buf, start, self.block3)
start = self.load_part(buf, start, self.conv5)
start = self.load_part(buf, start, self.block4)
start = self.load_part(buf, start, self.conv6)
start = self.load_part(buf, start, self.block5)
print(start, buf.shape[0])
def forward(self, x):
detect_feat = []
out = self.conv1(x)
out = self.conv2(out)
out = self.block1(out)
out = self.conv3(out)
for modu in self.block2:
out = modu(out)
out = self.conv4(out)
for modu in self.block3:
out = modu(out)
detect_feat.append(out)
out = self.conv5(out)
for modu in self.block4:
out = modu(out)
detect_feat.append(out)
out = self.conv6(out)
for modu in self.block5:
out = modu(out)
detect_feat.append(out)
return detect_feat
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: tshzzz
"""
import torch
import torch.nn as nn
import numpy as np
from src.layers import conv_block,residual_block
from src.utils import load_conv_bn
class darknet53(nn.Module):
def __init__(self, in_planes=3):
super(darknet53, self).__init__()
self.conv1 = conv_block(in_planes, 32, 3)
self.conv2 = conv_block(32, 64, 3, stride=2, pad=1)
self.block1 = residual_block(64, 64)
self.conv3 = conv_block(64, 128, 3, stride=2, pad=1)
self.block2 = nn.ModuleList()
self.block2.append(residual_block(128, 128))
self.block2.append(residual_block(128, 128))
self.conv4 = conv_block(128, 256, 3, stride=2, pad=1)
self.block3 = nn.ModuleList()
for i in range(8):
self.block3.append(residual_block(256, 256))
self.conv5 = conv_block(256, 512, 3, stride=2, pad=1)
self.block4 = nn.ModuleList()
for i in range(8):
self.block4.append(residual_block(512, 512))
self.conv6 = conv_block(512, 1024, 3, stride=2, pad=1)
self.block5 = nn.ModuleList()
for i in range(4):
self.block5.append(residual_block(1024, 1024))
def load_part(self, buf, start, part):
for idx, m in enumerate(part.modules()):
if isinstance(m, nn.Conv2d):
conv = m
if isinstance(m, nn.BatchNorm2d):
bn = m
start = load_conv_bn(buf, start, conv, bn)
return start
def load_weight(self, weight_file):
if weight_file is not None:
print("Load pretrained models !")
fp = open(weight_file, 'rb')
header = np.fromfile(fp, count=5, dtype=np.int32)
header = torch.from_numpy(header)
buf = np.fromfile(fp, dtype=np.float32)
start = 0
start = self.load_part(buf, start, self.conv1)
start = self.load_part(buf, start, self.conv2)
start = self.load_part(buf, start, self.block1)
start = self.load_part(buf, start, self.conv3)
start = self.load_part(buf, start, self.block2)
start = self.load_part(buf, start, self.conv4)
start = self.load_part(buf, start, self.block3)
start = self.load_part(buf, start, self.conv5)
start = self.load_part(buf, start, self.block4)
start = self.load_part(buf, start, self.conv6)
start = self.load_part(buf, start, self.block5)
print(start, buf.shape[0])
def forward(self, x):
detect_feat = []
out = self.conv1(x)
out = self.conv2(out)
out = self.block1(out)
out = self.conv3(out)
for modu in self.block2:
out = modu(out)
out = self.conv4(out)
for modu in self.block3:
out = modu(out)
detect_feat.append(out)
out = self.conv5(out)
for modu in self.block4:
out = modu(out)
detect_feat.append(out)
out = self.conv6(out)
for modu in self.block5:
out = modu(out)
detect_feat.append(out)
return detect_feat | en | 0.300401 | #!/usr/bin/env python # -*- coding: utf-8 -*- @author: tshzzz | 2.25335 | 2 |
models.py | dogeplusplus/haiku-transformer | 0 | 6625160 | <filename>models.py
import jax
import typing as t
import haiku as hk
import numpy as np
import jax.numpy as jnp
from einops import rearrange, repeat, reduce
class SelfAttention(hk.Module):
def __init__(self, k: int, heads: int):
super().__init__()
self.k = k
self.heads = heads
self.to_queries = hk.Linear(k*heads, with_bias=False)
self.to_keys = hk.Linear(k*heads, with_bias=False)
self.to_values = hk.Linear(k*heads, with_bias=False)
self.unify_heads = hk.Linear(k)
def __call__(self, x: jnp.ndarray):
h = self.heads
k = self.k
queries = self.to_queries(x)
keys = self.to_keys(x)
values = self.to_values(x)
queries = rearrange(queries, "b t (k h) -> (b h) t k", h=h)
keys = rearrange(keys, "b t (k h) -> (b h) t k", h=h)
values = rearrange(values, "b t (k h) -> (b h) t k", h=h)
queries = queries / (k ** (1/4))
keys = keys / (k ** (1/4))
dot = jax.lax.batch_matmul(queries, rearrange(keys, "b t k -> b k t"))
# send attention heads as additional output
heads = rearrange(dot, "(b h) t k -> b h t k", h=h)
dot = jax.nn.softmax(dot, axis=2)
out = rearrange(jax.lax.batch_matmul(dot, values),
"(b h) t k -> b t (h k)", h=h)
attention = self.unify_heads(out)
return attention, heads
class TransformerBlock(hk.Module):
def __init__(self, k: int, heads: int, dropout: float):
super().__init__()
self.k = k
self.heads = heads
self.dropout = dropout
self.attention = SelfAttention(self.k, self.heads)
self.layer_norm_1 = hk.LayerNorm(
axis=[-2, -1], create_scale=True, create_offset=True)
self.linear_1 = hk.Linear(4*self.k)
self.linear_2 = hk.Linear(self.k)
self.layer_norm_2 = hk.LayerNorm(
axis=[-2, -1], create_scale=True, create_offset=True)
def __call__(self, x: jnp.ndarray, inference=False):
dropout = 0. if inference else self.dropout
x, heads = self.attention(x)
x = self.layer_norm_1(x) + x
key1 = hk.next_rng_key()
key2 = hk.next_rng_key()
forward = self.linear_1(x)
forward = jax.nn.gelu(forward)
forward = hk.dropout(key1, dropout, forward)
forward = self.linear_2(forward)
forward = self.layer_norm_2(forward + x)
out = hk.dropout(key2, dropout, forward)
return out, heads
class VisionTransformer(hk.Module):
def __init__(
self,
k,
heads: int,
depth: int,
num_classes: int,
patch_size: int,
image_size: t.Tuple[int, int],
dropout: float
):
super().__init__()
self.k = k
self.heads = heads
self.depth = depth
self.num_classes = num_classes
self.patch_size = patch_size
self.image_size = image_size
self.dropout = dropout
# Patch embedding is just a dense layer mapping a flattened patch to another array
self.token_emb = hk.Linear(self.k)
self.blocks = [
TransformerBlock(self.k, self.heads, dropout) for _ in range(self.depth)
]
self.classification = hk.Linear(self.num_classes)
height, width = image_size
num_patches = (height // patch_size) * (width // patch_size) + 1
self.pos_emb = hk.Embed(vocab_size=num_patches, embed_dim=self.k)
self.cls_token = hk.get_parameter(
"cls", shape=[k], init=hk.initializers.RandomNormal())
self.classification = hk.Sequential([
hk.LayerNorm(axis=[-2, -1], create_scale=True, create_offset=True),
hk.Linear(self.num_classes),
])
def __call__(self, x, inference=False):
dropout = 0. if inference else self.dropout
batch_size = x.shape[0]
x = rearrange(x, "b (h p1) (w p2) c -> b (h w) (p1 p2 c)",
p1=self.patch_size, p2=self.patch_size)
tokens = self.token_emb(x)
cls_token = repeat(self.cls_token, "k -> b 1 k", b=batch_size)
combined_tokens = jnp.concatenate([cls_token, tokens], axis=1)
positions = jnp.arange(combined_tokens.shape[1])
pos_emb = self.pos_emb(positions)
x = pos_emb + combined_tokens
x = hk.dropout(hk.next_rng_key(), dropout, x)
attention_heads = []
for block in self.blocks:
x, heads = block(x)
attention_heads.append(heads)
rollout = attention_rollout(attention_heads, head_fusion="max", discard_ratio=0.5)
x = x[:, 0]
x = self.classification(x)
return x, rollout
def attention_rollout(
attention_heads: t.List[jnp.ndarray],
head_fusion: str,
discard_ratio: float = 0,
) -> jnp.ndarray:
batch, _, tokens, _ = attention_heads[0].shape
rollout = repeat(jnp.eye(tokens), "h1 h2 -> b h1 h2", b=batch)
# Multiply attention in each block together
for attention in attention_heads:
if head_fusion == "mean":
attention_heads_fused = attention.mean(axis=1)
elif head_fusion == "max":
attention_heads_fused = attention.max(axis=1)
elif head_fusion == "min":
attention_heads_fused = attention.min(axis=1)
else:
raise ValueError("Attention head fusion type Not supported")
if discard_ratio != 0:
flat_attn = rearrange(attention_heads_fused, "b h w -> b (h w)")
# Take the top percentile across the last axis
threshold = jnp.percentile(flat_attn, (1 - discard_ratio) * 100, axis=-1, keepdims=True)
# Mask to keep the class token
cls_indices = np.zeros(flat_attn.shape)
cls_indices[:, 0] = 1
cls_indices = jnp.array(cls_indices)
# Keep values that are in the top percentile or are the cls indices
keep_mask = jnp.logical_or(flat_attn > threshold, cls_indices)
flat_attn = jnp.where(keep_mask, flat_attn, 0)
filtered_attn = rearrange(flat_attn, "b (h w) -> b h w", h=tokens, w=tokens)
else:
filtered_attn = attention_heads_fused
# Compute attention rollout
identity = repeat(jnp.eye(tokens), "x y -> b x y", b=batch)
a = (filtered_attn + 1.0 * identity) / 2
# Normalize values over embedding axis
a = a / reduce(a, "b h1 h2 -> b h1 1", "sum")
rollout = jax.lax.batch_matmul(a, rollout)
masks = rollout[:, 0, 1:]
width = int((tokens - 1) ** 0.5)
masks = rearrange(masks, "b (w1 w2) -> b w1 w2", w1=width, w2=width)
masks = masks / reduce(masks, "b w1 w2 -> b 1 1", "max")
return rollout
| <filename>models.py
import jax
import typing as t
import haiku as hk
import numpy as np
import jax.numpy as jnp
from einops import rearrange, repeat, reduce
class SelfAttention(hk.Module):
def __init__(self, k: int, heads: int):
super().__init__()
self.k = k
self.heads = heads
self.to_queries = hk.Linear(k*heads, with_bias=False)
self.to_keys = hk.Linear(k*heads, with_bias=False)
self.to_values = hk.Linear(k*heads, with_bias=False)
self.unify_heads = hk.Linear(k)
def __call__(self, x: jnp.ndarray):
h = self.heads
k = self.k
queries = self.to_queries(x)
keys = self.to_keys(x)
values = self.to_values(x)
queries = rearrange(queries, "b t (k h) -> (b h) t k", h=h)
keys = rearrange(keys, "b t (k h) -> (b h) t k", h=h)
values = rearrange(values, "b t (k h) -> (b h) t k", h=h)
queries = queries / (k ** (1/4))
keys = keys / (k ** (1/4))
dot = jax.lax.batch_matmul(queries, rearrange(keys, "b t k -> b k t"))
# send attention heads as additional output
heads = rearrange(dot, "(b h) t k -> b h t k", h=h)
dot = jax.nn.softmax(dot, axis=2)
out = rearrange(jax.lax.batch_matmul(dot, values),
"(b h) t k -> b t (h k)", h=h)
attention = self.unify_heads(out)
return attention, heads
class TransformerBlock(hk.Module):
def __init__(self, k: int, heads: int, dropout: float):
super().__init__()
self.k = k
self.heads = heads
self.dropout = dropout
self.attention = SelfAttention(self.k, self.heads)
self.layer_norm_1 = hk.LayerNorm(
axis=[-2, -1], create_scale=True, create_offset=True)
self.linear_1 = hk.Linear(4*self.k)
self.linear_2 = hk.Linear(self.k)
self.layer_norm_2 = hk.LayerNorm(
axis=[-2, -1], create_scale=True, create_offset=True)
def __call__(self, x: jnp.ndarray, inference=False):
dropout = 0. if inference else self.dropout
x, heads = self.attention(x)
x = self.layer_norm_1(x) + x
key1 = hk.next_rng_key()
key2 = hk.next_rng_key()
forward = self.linear_1(x)
forward = jax.nn.gelu(forward)
forward = hk.dropout(key1, dropout, forward)
forward = self.linear_2(forward)
forward = self.layer_norm_2(forward + x)
out = hk.dropout(key2, dropout, forward)
return out, heads
class VisionTransformer(hk.Module):
def __init__(
self,
k,
heads: int,
depth: int,
num_classes: int,
patch_size: int,
image_size: t.Tuple[int, int],
dropout: float
):
super().__init__()
self.k = k
self.heads = heads
self.depth = depth
self.num_classes = num_classes
self.patch_size = patch_size
self.image_size = image_size
self.dropout = dropout
# Patch embedding is just a dense layer mapping a flattened patch to another array
self.token_emb = hk.Linear(self.k)
self.blocks = [
TransformerBlock(self.k, self.heads, dropout) for _ in range(self.depth)
]
self.classification = hk.Linear(self.num_classes)
height, width = image_size
num_patches = (height // patch_size) * (width // patch_size) + 1
self.pos_emb = hk.Embed(vocab_size=num_patches, embed_dim=self.k)
self.cls_token = hk.get_parameter(
"cls", shape=[k], init=hk.initializers.RandomNormal())
self.classification = hk.Sequential([
hk.LayerNorm(axis=[-2, -1], create_scale=True, create_offset=True),
hk.Linear(self.num_classes),
])
def __call__(self, x, inference=False):
dropout = 0. if inference else self.dropout
batch_size = x.shape[0]
x = rearrange(x, "b (h p1) (w p2) c -> b (h w) (p1 p2 c)",
p1=self.patch_size, p2=self.patch_size)
tokens = self.token_emb(x)
cls_token = repeat(self.cls_token, "k -> b 1 k", b=batch_size)
combined_tokens = jnp.concatenate([cls_token, tokens], axis=1)
positions = jnp.arange(combined_tokens.shape[1])
pos_emb = self.pos_emb(positions)
x = pos_emb + combined_tokens
x = hk.dropout(hk.next_rng_key(), dropout, x)
attention_heads = []
for block in self.blocks:
x, heads = block(x)
attention_heads.append(heads)
rollout = attention_rollout(attention_heads, head_fusion="max", discard_ratio=0.5)
x = x[:, 0]
x = self.classification(x)
return x, rollout
def attention_rollout(
attention_heads: t.List[jnp.ndarray],
head_fusion: str,
discard_ratio: float = 0,
) -> jnp.ndarray:
batch, _, tokens, _ = attention_heads[0].shape
rollout = repeat(jnp.eye(tokens), "h1 h2 -> b h1 h2", b=batch)
# Multiply attention in each block together
for attention in attention_heads:
if head_fusion == "mean":
attention_heads_fused = attention.mean(axis=1)
elif head_fusion == "max":
attention_heads_fused = attention.max(axis=1)
elif head_fusion == "min":
attention_heads_fused = attention.min(axis=1)
else:
raise ValueError("Attention head fusion type Not supported")
if discard_ratio != 0:
flat_attn = rearrange(attention_heads_fused, "b h w -> b (h w)")
# Take the top percentile across the last axis
threshold = jnp.percentile(flat_attn, (1 - discard_ratio) * 100, axis=-1, keepdims=True)
# Mask to keep the class token
cls_indices = np.zeros(flat_attn.shape)
cls_indices[:, 0] = 1
cls_indices = jnp.array(cls_indices)
# Keep values that are in the top percentile or are the cls indices
keep_mask = jnp.logical_or(flat_attn > threshold, cls_indices)
flat_attn = jnp.where(keep_mask, flat_attn, 0)
filtered_attn = rearrange(flat_attn, "b (h w) -> b h w", h=tokens, w=tokens)
else:
filtered_attn = attention_heads_fused
# Compute attention rollout
identity = repeat(jnp.eye(tokens), "x y -> b x y", b=batch)
a = (filtered_attn + 1.0 * identity) / 2
# Normalize values over embedding axis
a = a / reduce(a, "b h1 h2 -> b h1 1", "sum")
rollout = jax.lax.batch_matmul(a, rollout)
masks = rollout[:, 0, 1:]
width = int((tokens - 1) ** 0.5)
masks = rearrange(masks, "b (w1 w2) -> b w1 w2", w1=width, w2=width)
masks = masks / reduce(masks, "b w1 w2 -> b 1 1", "max")
return rollout
| en | 0.792177 | # send attention heads as additional output # Patch embedding is just a dense layer mapping a flattened patch to another array # Multiply attention in each block together # Take the top percentile across the last axis # Mask to keep the class token # Keep values that are in the top percentile or are the cls indices # Compute attention rollout # Normalize values over embedding axis | 2.248858 | 2 |
GAN.py | rberezdivin/DCGAN-CIFAR10 | 20 | 6625161 | #-*- coding: utf-8 -*-
from __future__ import division
import os
import time
import tensorflow as tf
import numpy as np
from ops import *
from utils import *
#from datetime import datetime
#import matplotlib.pyplot as plt
class GAN(object):
def __init__(self, sess, epoch, batch_size, dataset_name, checkpoint_dir, result_dir, log_dir):
self.sess = sess
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.result_dir = result_dir
self.log_dir = log_dir
self.epoch = epoch
self.batch_size = batch_size
self.model_name = "GAN" # name for checkpoint
if dataset_name == 'mnist' or dataset_name == 'fashion-mnist': # fix
# parameters
self.input_height = 28
self.input_width = 28
self.output_height = 28
self.output_width = 28
self.z_dim = 62 # dimension of noise-vector
self.c_dim = 1
# train
self.learning_rate = 0.0002
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# load mnist
self.data_X, self.data_y = load_mnist(self.dataset_name)
# get number of batches for a single epoch
self.num_batches = len(self.data_X) // self.batch_size # 700 = 70000 / 100
elif dataset_name == 'cifar10':
# parameters
self.input_height = 32
self.input_width = 32
self.output_height = 32
self.output_width = 32
self.z_dim = 100 # dimension of noise-vector
self.c_dim = 3 # color dimension
# train
#self.learning_rate = 0.0002 # 1e-3, 1e-4
self.learningRateD = 1e-3
self.learningRateG = 1e-4
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# load cifar10
self.data_X, self.data_y = load_cifar10()
#validatin images
'''
# revice image data // M*N*3 // RGB float32 : value must set between 0. with 1.
vMin = np.amin(self.data_X[0])
vMax = np.amax(self.data_X[0])
img_arr = self.data_X[0].reshape(32*32*3,1) # flatten
for i, v in enumerate(img_arr):
img_arr[i] = (v-vMin)/(vMax-vMin)
img_arr = img_arr.reshape(32,32,3) # M*N*3
# matplot display
plt.subplot(1,1,1),plt.imshow(img_arr, interpolation='nearest')
plt.title("pred.:{}".format(np.argmax(self.data_y[0]),fontsize=10))
plt.axis("off")
imgName = "{}.png".format(datetime.now())
imgName = imgName.replace(":","_")
#plt.savefig(os.path.join(".\\pic_result",imgName))
plt.savefig(imgName)
plt.show()
'''
# get number of batches for a single epoch
#print(len(self.data_X),len(self.data_y))
#self.num_batches = self.data_X.get_shape()[0] // self.batch_size
self.num_batches = len(self.data_X) // self.batch_size
#print(self.num_batches)
else:
raise NotImplementedError
def discriminator(self, x, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S
with tf.variable_scope("discriminator", reuse=reuse):
if self.dataset_name == 'cifar10':
print("D:",x.get_shape()) # 32, 32, 3 = 3072
net = lrelu(conv2d(x, 64, 5, 5, 2, 2, name='d_conv1'+'_'+self.dataset_name))
print("D:",net.get_shape())
net = lrelu(bn(conv2d(net, 128, 5, 5, 2, 2, name='d_conv2'+'_'+self.dataset_name), is_training=is_training, scope='d_bn2'))
print("D:",net.get_shape())
net = lrelu(bn(conv2d(net, 256, 5, 5, 2, 2, name='d_conv3'+'_'+self.dataset_name), is_training=is_training, scope='d_bn3'))
print("D:",net.get_shape())
net = lrelu(bn(conv2d(net, 512, 5, 5, 2, 2, name='d_conv4'+'_'+self.dataset_name), is_training=is_training, scope='d_bn4'))
print("D:",net.get_shape())
net = tf.reshape(net, [self.batch_size, -1])
print("D:",net.get_shape())
out_logit = linear(net, 1, scope='d_fc5'+'_'+self.dataset_name)
print("D:",net.get_shape())
out = tf.nn.sigmoid(out_logit)
print("D:",out.get_shape())
print("------------------------")
else: # mnist / fashion mnist
#print(x.get_shape())
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'+'_'+self.dataset_name))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'+'_'+self.dataset_name), is_training=is_training, scope='d_bn2'))
net = tf.reshape(net, [self.batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='d_fc3'+'_'+self.dataset_name), is_training=is_training, scope='d_bn3'))
out_logit = linear(net, 1, scope='d_fc4'+'_'+self.dataset_name)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
def generator(self, z, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
with tf.variable_scope("generator", reuse=reuse):
if self.dataset_name == 'cifar10':
h_size = 32
h_size_2 = 16
h_size_4 = 8
h_size_8 = 4
h_size_16 = 2
print("G:",z.get_shape())
net = linear(z, 512*h_size_16*h_size_16, scope='g_fc1'+'_'+self.dataset_name)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(tf.reshape(net, [self.batch_size, h_size_16, h_size_16, 512]),is_training=is_training, scope='g_bn1')
)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_8, h_size_8, 256], 5, 5, 2, 2, name='g_dc2'+'_'+self.dataset_name),is_training=is_training, scope='g_bn2')
)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_4, h_size_4, 128], 5, 5, 2, 2, name='g_dc3'+'_'+self.dataset_name),is_training=is_training, scope='g_bn3')
)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_2, h_size_2, 64], 5, 5, 2, 2, name='g_dc4'+'_'+self.dataset_name),is_training=is_training, scope='g_bn4')
)
print("G:",net.get_shape())
out = tf.nn.tanh(
deconv2d(net, [self.batch_size, self.output_height, self.output_width, self.c_dim], 5, 5, 2, 2, name='g_dc5'+'_'+self.dataset_name)
)
print("G:",out.get_shape())
print("------------------------")
else: # mnist / fashon mnist
h_size = 28
h_size_2 = 14
h_size_4 = 7
net = tf.nn.relu(bn(linear(z, 1024, scope='g_fc1'+'_'+self.dataset_name), is_training=is_training, scope='g_bn1'))
net = tf.nn.relu(bn(linear(net, 128 * h_size_4 * h_size_4, scope='g_fc2'+'_'+self.dataset_name), is_training=is_training, scope='g_bn2'))
net = tf.reshape(net, [self.batch_size, h_size_4, h_size_4, 128]) # 8 8 128
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_2, h_size_2, 64], 4, 4, 2, 2, name='g_dc3'+'_'+self.dataset_name), is_training=is_training,scope='g_bn3')
)
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, self.output_height, self.output_width, self.c_dim], 4, 4, 2, 2, name='g_dc4'+'_'+self.dataset_name))
return out
def build_model(self):
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size # 100
""" Graph Input """
# images
self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
# noises
self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
D_real, D_real_logits, _ = self.discriminator(self.inputs, is_training=True, reuse=False)
# output of D for fake images
G = self.generator(self.z, is_training=True, reuse=False)
D_fake, D_fake_logits, _ = self.discriminator(G, is_training=True, reuse=True)
# get loss for discriminator
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake)))
self.d_loss = d_loss_real + d_loss_fake
# get loss for generator
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake)))
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(self.learningRateD, beta1=self.beta1).minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learningRateG, beta1=self.beta1).minimize(self.g_loss, var_list=g_vars)
#self.g_optim = tf.train.AdamOptimizer(self.learning_rate*5, beta1=self.beta1).minimize(self.g_loss, var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
# final summary operations
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# graph inputs for visualize training results
self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size , self.z_dim)) # 100, 62
self.test_images = self.data_X[0:self.batch_size]
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '\\' + self.model_name, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.num_batches)
start_batch_id = checkpoint_counter - start_epoch * self.num_batches
counter = checkpoint_counter
print(" [*] Load SUCCESS")
print(" [!] START_EPOCH is ",start_epoch," START_BATCH_ID is ", start_batch_id)
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
# get batch data
for idx in range(start_batch_id, self.num_batches):
batch_images = self.data_X[idx*self.batch_size:(idx+1)*self.batch_size]
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
# update D network
_, summary_str, d_loss = self.sess.run([self.d_optim, self.d_sum, self.d_loss],
feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# update G network
#self.sess.run([self.g_optim], feed_dict={self.inputs: batch_images, self.z: batch_z})
# update G twice to make sure that d_loss does not go to zero
_, _, summary_str, g_loss = self.sess.run([self.g_optim, self.g_optim, self.g_sum, self.g_loss], feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# display training status
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.num_batches, time.time() - start_time, d_loss, g_loss))
# save training results for every 300 steps
if np.mod(counter, 300) == 0:
samples = self.sess.run(self.fake_images,
feed_dict={self.z: self.sample_z, self.inputs: self.test_images})
tot_num_samples = min(self.sample_num, self.batch_size) # 64
manifold_h = int(np.floor(np.sqrt(tot_num_samples))) # 8
manifold_w = int(np.floor(np.sqrt(tot_num_samples))) # 8
save_images(samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w],
'.\\' + self.result_dir + '\\' + self.model_name + '_train_{:02d}_{:04d}.png'.format(
epoch, idx))
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model
self.save(self.checkpoint_dir, counter)
# show temporal results
self.visualize_results(epoch)
# save model for final step
self.save(self.checkpoint_dir, counter)
def visualize_results(self, epoch):
tot_num_samples = min(self.sample_num, self.batch_size) # 64, 100
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples))) # 8
""" random condition, random noise """
z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)) # 100, 100
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample})
save_matplot_img(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
#save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
# self.result_dir + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_height, self.output_width)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,os.path.join(checkpoint_dir, self.model_name+'.model'), global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read [{}], counter [{}]".format(ckpt_name,counter))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
| #-*- coding: utf-8 -*-
from __future__ import division
import os
import time
import tensorflow as tf
import numpy as np
from ops import *
from utils import *
#from datetime import datetime
#import matplotlib.pyplot as plt
class GAN(object):
def __init__(self, sess, epoch, batch_size, dataset_name, checkpoint_dir, result_dir, log_dir):
self.sess = sess
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.result_dir = result_dir
self.log_dir = log_dir
self.epoch = epoch
self.batch_size = batch_size
self.model_name = "GAN" # name for checkpoint
if dataset_name == 'mnist' or dataset_name == 'fashion-mnist': # fix
# parameters
self.input_height = 28
self.input_width = 28
self.output_height = 28
self.output_width = 28
self.z_dim = 62 # dimension of noise-vector
self.c_dim = 1
# train
self.learning_rate = 0.0002
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# load mnist
self.data_X, self.data_y = load_mnist(self.dataset_name)
# get number of batches for a single epoch
self.num_batches = len(self.data_X) // self.batch_size # 700 = 70000 / 100
elif dataset_name == 'cifar10':
# parameters
self.input_height = 32
self.input_width = 32
self.output_height = 32
self.output_width = 32
self.z_dim = 100 # dimension of noise-vector
self.c_dim = 3 # color dimension
# train
#self.learning_rate = 0.0002 # 1e-3, 1e-4
self.learningRateD = 1e-3
self.learningRateG = 1e-4
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# load cifar10
self.data_X, self.data_y = load_cifar10()
#validatin images
'''
# revice image data // M*N*3 // RGB float32 : value must set between 0. with 1.
vMin = np.amin(self.data_X[0])
vMax = np.amax(self.data_X[0])
img_arr = self.data_X[0].reshape(32*32*3,1) # flatten
for i, v in enumerate(img_arr):
img_arr[i] = (v-vMin)/(vMax-vMin)
img_arr = img_arr.reshape(32,32,3) # M*N*3
# matplot display
plt.subplot(1,1,1),plt.imshow(img_arr, interpolation='nearest')
plt.title("pred.:{}".format(np.argmax(self.data_y[0]),fontsize=10))
plt.axis("off")
imgName = "{}.png".format(datetime.now())
imgName = imgName.replace(":","_")
#plt.savefig(os.path.join(".\\pic_result",imgName))
plt.savefig(imgName)
plt.show()
'''
# get number of batches for a single epoch
#print(len(self.data_X),len(self.data_y))
#self.num_batches = self.data_X.get_shape()[0] // self.batch_size
self.num_batches = len(self.data_X) // self.batch_size
#print(self.num_batches)
else:
raise NotImplementedError
def discriminator(self, x, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S
with tf.variable_scope("discriminator", reuse=reuse):
if self.dataset_name == 'cifar10':
print("D:",x.get_shape()) # 32, 32, 3 = 3072
net = lrelu(conv2d(x, 64, 5, 5, 2, 2, name='d_conv1'+'_'+self.dataset_name))
print("D:",net.get_shape())
net = lrelu(bn(conv2d(net, 128, 5, 5, 2, 2, name='d_conv2'+'_'+self.dataset_name), is_training=is_training, scope='d_bn2'))
print("D:",net.get_shape())
net = lrelu(bn(conv2d(net, 256, 5, 5, 2, 2, name='d_conv3'+'_'+self.dataset_name), is_training=is_training, scope='d_bn3'))
print("D:",net.get_shape())
net = lrelu(bn(conv2d(net, 512, 5, 5, 2, 2, name='d_conv4'+'_'+self.dataset_name), is_training=is_training, scope='d_bn4'))
print("D:",net.get_shape())
net = tf.reshape(net, [self.batch_size, -1])
print("D:",net.get_shape())
out_logit = linear(net, 1, scope='d_fc5'+'_'+self.dataset_name)
print("D:",net.get_shape())
out = tf.nn.sigmoid(out_logit)
print("D:",out.get_shape())
print("------------------------")
else: # mnist / fashion mnist
#print(x.get_shape())
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'+'_'+self.dataset_name))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'+'_'+self.dataset_name), is_training=is_training, scope='d_bn2'))
net = tf.reshape(net, [self.batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='d_fc3'+'_'+self.dataset_name), is_training=is_training, scope='d_bn3'))
out_logit = linear(net, 1, scope='d_fc4'+'_'+self.dataset_name)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
def generator(self, z, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
with tf.variable_scope("generator", reuse=reuse):
if self.dataset_name == 'cifar10':
h_size = 32
h_size_2 = 16
h_size_4 = 8
h_size_8 = 4
h_size_16 = 2
print("G:",z.get_shape())
net = linear(z, 512*h_size_16*h_size_16, scope='g_fc1'+'_'+self.dataset_name)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(tf.reshape(net, [self.batch_size, h_size_16, h_size_16, 512]),is_training=is_training, scope='g_bn1')
)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_8, h_size_8, 256], 5, 5, 2, 2, name='g_dc2'+'_'+self.dataset_name),is_training=is_training, scope='g_bn2')
)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_4, h_size_4, 128], 5, 5, 2, 2, name='g_dc3'+'_'+self.dataset_name),is_training=is_training, scope='g_bn3')
)
print("G:",net.get_shape())
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_2, h_size_2, 64], 5, 5, 2, 2, name='g_dc4'+'_'+self.dataset_name),is_training=is_training, scope='g_bn4')
)
print("G:",net.get_shape())
out = tf.nn.tanh(
deconv2d(net, [self.batch_size, self.output_height, self.output_width, self.c_dim], 5, 5, 2, 2, name='g_dc5'+'_'+self.dataset_name)
)
print("G:",out.get_shape())
print("------------------------")
else: # mnist / fashon mnist
h_size = 28
h_size_2 = 14
h_size_4 = 7
net = tf.nn.relu(bn(linear(z, 1024, scope='g_fc1'+'_'+self.dataset_name), is_training=is_training, scope='g_bn1'))
net = tf.nn.relu(bn(linear(net, 128 * h_size_4 * h_size_4, scope='g_fc2'+'_'+self.dataset_name), is_training=is_training, scope='g_bn2'))
net = tf.reshape(net, [self.batch_size, h_size_4, h_size_4, 128]) # 8 8 128
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, h_size_2, h_size_2, 64], 4, 4, 2, 2, name='g_dc3'+'_'+self.dataset_name), is_training=is_training,scope='g_bn3')
)
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, self.output_height, self.output_width, self.c_dim], 4, 4, 2, 2, name='g_dc4'+'_'+self.dataset_name))
return out
def build_model(self):
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size # 100
""" Graph Input """
# images
self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
# noises
self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
D_real, D_real_logits, _ = self.discriminator(self.inputs, is_training=True, reuse=False)
# output of D for fake images
G = self.generator(self.z, is_training=True, reuse=False)
D_fake, D_fake_logits, _ = self.discriminator(G, is_training=True, reuse=True)
# get loss for discriminator
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake)))
self.d_loss = d_loss_real + d_loss_fake
# get loss for generator
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake)))
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(self.learningRateD, beta1=self.beta1).minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learningRateG, beta1=self.beta1).minimize(self.g_loss, var_list=g_vars)
#self.g_optim = tf.train.AdamOptimizer(self.learning_rate*5, beta1=self.beta1).minimize(self.g_loss, var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
# final summary operations
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# graph inputs for visualize training results
self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size , self.z_dim)) # 100, 62
self.test_images = self.data_X[0:self.batch_size]
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '\\' + self.model_name, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.num_batches)
start_batch_id = checkpoint_counter - start_epoch * self.num_batches
counter = checkpoint_counter
print(" [*] Load SUCCESS")
print(" [!] START_EPOCH is ",start_epoch," START_BATCH_ID is ", start_batch_id)
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
# get batch data
for idx in range(start_batch_id, self.num_batches):
batch_images = self.data_X[idx*self.batch_size:(idx+1)*self.batch_size]
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
# update D network
_, summary_str, d_loss = self.sess.run([self.d_optim, self.d_sum, self.d_loss],
feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# update G network
#self.sess.run([self.g_optim], feed_dict={self.inputs: batch_images, self.z: batch_z})
# update G twice to make sure that d_loss does not go to zero
_, _, summary_str, g_loss = self.sess.run([self.g_optim, self.g_optim, self.g_sum, self.g_loss], feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# display training status
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.num_batches, time.time() - start_time, d_loss, g_loss))
# save training results for every 300 steps
if np.mod(counter, 300) == 0:
samples = self.sess.run(self.fake_images,
feed_dict={self.z: self.sample_z, self.inputs: self.test_images})
tot_num_samples = min(self.sample_num, self.batch_size) # 64
manifold_h = int(np.floor(np.sqrt(tot_num_samples))) # 8
manifold_w = int(np.floor(np.sqrt(tot_num_samples))) # 8
save_images(samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w],
'.\\' + self.result_dir + '\\' + self.model_name + '_train_{:02d}_{:04d}.png'.format(
epoch, idx))
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model
self.save(self.checkpoint_dir, counter)
# show temporal results
self.visualize_results(epoch)
# save model for final step
self.save(self.checkpoint_dir, counter)
def visualize_results(self, epoch):
tot_num_samples = min(self.sample_num, self.batch_size) # 64, 100
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples))) # 8
""" random condition, random noise """
z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)) # 100, 100
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample})
save_matplot_img(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
#save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
# self.result_dir + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_height, self.output_width)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,os.path.join(checkpoint_dir, self.model_name+'.model'), global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read [{}], counter [{}]".format(ckpt_name,counter))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
| en | 0.483444 | #-*- coding: utf-8 -*- #from datetime import datetime #import matplotlib.pyplot as plt # name for checkpoint # fix # parameters # dimension of noise-vector # train # test # number of generated images to be saved # load mnist # get number of batches for a single epoch # 700 = 70000 / 100 # parameters # dimension of noise-vector # color dimension # train #self.learning_rate = 0.0002 # 1e-3, 1e-4 # test # number of generated images to be saved # load cifar10 #validatin images # revice image data // M*N*3 // RGB float32 : value must set between 0. with 1. vMin = np.amin(self.data_X[0]) vMax = np.amax(self.data_X[0]) img_arr = self.data_X[0].reshape(32*32*3,1) # flatten for i, v in enumerate(img_arr): img_arr[i] = (v-vMin)/(vMax-vMin) img_arr = img_arr.reshape(32,32,3) # M*N*3 # matplot display plt.subplot(1,1,1),plt.imshow(img_arr, interpolation='nearest') plt.title("pred.:{}".format(np.argmax(self.data_y[0]),fontsize=10)) plt.axis("off") imgName = "{}.png".format(datetime.now()) imgName = imgName.replace(":","_") #plt.savefig(os.path.join(".\\pic_result",imgName)) plt.savefig(imgName) plt.show() # get number of batches for a single epoch #print(len(self.data_X),len(self.data_y)) #self.num_batches = self.data_X.get_shape()[0] // self.batch_size #print(self.num_batches) # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S # 32, 32, 3 = 3072 # mnist / fashion mnist #print(x.get_shape()) # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S # mnist / fashon mnist # 8 8 128 # some parameters # 100 Graph Input # images # noises Loss Function # output of D for real images # output of D for fake images # get loss for discriminator # get loss for generator Training # divide trainable variables into a group for D and a group for G # optimizers #self.g_optim = tf.train.AdamOptimizer(self.learning_rate*5, beta1=self.beta1).minimize(self.g_loss, var_list=g_vars) " Testing # for test Summary # final summary operations # initialize all variables # graph inputs for visualize training results # 100, 62 # saver to save model # summary writer # restore check-point if it exits # loop for epoch # get batch data # update D network # update G network #self.sess.run([self.g_optim], feed_dict={self.inputs: batch_images, self.z: batch_z}) # update G twice to make sure that d_loss does not go to zero # display training status # save training results for every 300 steps # 64 # 8 # 8 # After an epoch, start_batch_id is set to zero # non-zero value is only for the first epoch after loading pre-trained model # save model # show temporal results # save model for final step # 64, 100 # 8 random condition, random noise # 100, 100 #save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim], # self.result_dir + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png') | 2.627783 | 3 |
examples/gradient_magnitude.py | rqssouza/opencv-gui-parameter-tuner | 1 | 6625162 | <reponame>rqssouza/opencv-gui-parameter-tuner<gh_stars>1-10
#!/bin/env python3
import cv2 as cv
import numpy as np
import argparse
import tuner.tuner as tuner
def mag(gradient_x, gradient_y):
gradient_mag = np.sqrt(np.square(gradient_x) + np.square(gradient_y))
return np.uint8(255 * (gradient_mag / np.max(gradient_mag)))
def ths(img, ths_min, ths_max):
ret = np.zeros_like(img)
ret[(img >= ths_min) & (img <= ths_max)] = 255
return ret
def process(image, args):
adj_k = lambda ksize : ksize + (ksize + 1) % 2
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gradient_x = cv.Sobel(
src = gray,
ddepth = cv.CV_64F,
dx = 1,
dy = 0,
ksize = adj_k(args.kernel_size),
)
gradient_y = cv.Sobel(
src = gray,
ddepth = cv.CV_64F,
dx = 0,
dy = 1,
ksize = adj_k(args.kernel_size),
)
gradient_mag = ths(mag(gradient_x, gradient_y), args.ths_min, args.ths_max)
return ((1, 1), [gradient_mag])
CFG = [
['kernel_size', 3, 30],
['ths_min', 20, 255],
['ths_max', 100, 255],
]
if __name__ == '__main__':
tuner.Tuner_App(
process,
CFG,
'Gradient Magnitude',
'Tune gradient magnitude parameters',
) | #!/bin/env python3
import cv2 as cv
import numpy as np
import argparse
import tuner.tuner as tuner
def mag(gradient_x, gradient_y):
gradient_mag = np.sqrt(np.square(gradient_x) + np.square(gradient_y))
return np.uint8(255 * (gradient_mag / np.max(gradient_mag)))
def ths(img, ths_min, ths_max):
ret = np.zeros_like(img)
ret[(img >= ths_min) & (img <= ths_max)] = 255
return ret
def process(image, args):
adj_k = lambda ksize : ksize + (ksize + 1) % 2
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gradient_x = cv.Sobel(
src = gray,
ddepth = cv.CV_64F,
dx = 1,
dy = 0,
ksize = adj_k(args.kernel_size),
)
gradient_y = cv.Sobel(
src = gray,
ddepth = cv.CV_64F,
dx = 0,
dy = 1,
ksize = adj_k(args.kernel_size),
)
gradient_mag = ths(mag(gradient_x, gradient_y), args.ths_min, args.ths_max)
return ((1, 1), [gradient_mag])
CFG = [
['kernel_size', 3, 30],
['ths_min', 20, 255],
['ths_max', 100, 255],
]
if __name__ == '__main__':
tuner.Tuner_App(
process,
CFG,
'Gradient Magnitude',
'Tune gradient magnitude parameters',
) | ru | 0.167759 | #!/bin/env python3 | 2.303523 | 2 |
pygbe/compute_boundary_force.py | barbagroup/pygbe | 36 | 6625163 | <filename>pygbe/compute_boundary_force.py<gh_stars>10-100
"""
This function reads in a phi.txt resulting from the linear system
in a regular pygbe run, and computing the reaction field
"""
import os
import re
import sys
import time
import glob
import numpy
import pickle
import subprocess
from datetime import datetime
from argparse import ArgumentParser
# Import self made modules
import pygbe
from pygbe.gmres import gmres_mgs
from pygbe.classes import Timing, Parameters, IndexConstant
from pygbe.gpuio import dataTransfer
from pygbe.class_initialization import initialize_surface, initialize_field
from pygbe.output import print_summary
from pygbe.matrixfree import (generateRHS, generateRHS_gpu, calculate_solvation_energy,
coulomb_energy, calculate_surface_energy)
from pygbe.projection import get_dphirdr_gpu
from pygbe.util.read_data import read_parameters, read_electric_field
from pygbe.tree.FMMutils import computeIndices, precomputeTerms, generateList
try:
from pygbe.tree.cuda_kernels import kernels
except:
pass
#courtesy of http://stackoverflow.com/a/5916874
class Logger(object):
"""
Allow writing both to STDOUT on screen and sending text to file
in conjunction with the command
`sys.stdout = Logger("desired_log_file.txt")`
"""
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
"""Required for Python 3"""
pass
def read_inputs(args):
"""
Parse command-line arguments to determine which config and param files to run
Assumes that in the absence of specific command line arguments that pygbe
problem folder resembles the following structure
lys/
- lys.param
- lys.config
- built_parse.pqr
- geometry/Lys1.face
- geometry/Lys1.vert
- output/
"""
parser = ArgumentParser(description='Manage PyGBe command line arguments')
parser.add_argument('problem_folder',
type=str,
help="Path to folder containing problem files")
parser.add_argument('-c',
'--config',
dest='config',
type=str,
default=None,
help="Path to problem config file")
parser.add_argument('-p',
'--param',
dest='param',
type=str,
default=None,
help="Path to problem param file")
parser.add_argument('-o',
'--output',
dest='output',
type=str,
default='output',
help="Output folder")
parser.add_argument('-g',
'--geometry',
dest='geometry',
type=str,
help="Custom geometry folder prefix")
parser.add_argument('-x0',
'--initial_guess',
dest='initial_guess',
type=str,
help="File containing an initial guess for the linear solver")
parser.add_argument('-phi',
'--phi_sol',
dest='phi_file',
type=str,
help="File containing solution of phi for boundary force calculation")
return parser.parse_args(args)
def check_file_exists(filename):
"""Try to open the file `filename` and return True if it's valid """
return os.path.exists(filename)
def find_config_files(cliargs):
"""
Check that .config and .param files exist and can be opened.
If either file isn't found, PyGBe exits (and should print which
file was not found). Otherwise return the path to the config and
param files
Arguments
---------
cliargs: parser
parser containing cli arguments passed to PyGBe
Returns
-------
cliargs.config: string
path to config file
cliargs.param: string
path to param file
"""
prob_path = cliargs.problem_folder
full_path = os.path.abspath(prob_path)
os.environ['PYGBE_PROBLEM_FOLDER'] = full_path
if cliargs.config is None:
cliargs.config = next(glob.iglob(os.path.join(full_path, '*.config')))
else:
cliargs.config = resolve_relative_config_file(cliargs.config, full_path)
if cliargs.param is None:
cliargs.param = next(glob.iglob(os.path.join(full_path, '*.param')))
else:
cliargs.param = resolve_relative_config_file(cliargs.param, full_path)
return cliargs.config, cliargs.param
def resolve_relative_config_file(config_file, full_path):
"""
Does its level-headed best to find the config files specified by the user
Arguments
---------
config_file: str
the given path to a .param or .config file from the command line
full_path: str
the full path to the problem folder
"""
if check_file_exists(config_file):
return config_file
elif check_file_exists(os.path.abspath(config_file)):
return os.path.join(os.getcwd(), config_file)
elif check_file_exists(os.path.join(full_path, config_file)):
return os.path.join(full_path, config_file)
else:
sys.exit('Did not find expected config files\n'
'Could not find {}'.format(config_file))
def check_for_nvcc():
"""Check system PATH for nvcc, exit if not found"""
try:
subprocess.check_output(['which', 'nvcc'])
return True
except subprocess.CalledProcessError:
print(
"Could not find `nvcc` on your PATH. Is cuda installed? "
"PyGBe will continue to run but will run significantly slower. "
"For optimal performance, add `nvcc` to your PATH"
)
return False
def main(argv=sys.argv, log_output=True, return_output_fname=False,
return_results_dict=False, field=None):
"""
Run a PyGBe problem, write outputs to STDOUT and to log file in
problem directory
Arguments
----------
log_output : Bool, default True.
If False, output is written only to STDOUT and not to a log file.
return_output_fname: Bool, default False.
If True, function main() returns the name of the
output log file. This is used for the regression tests.
return_results_dict: Bool, default False.
If True, function main() returns the results of the run
packed in a dictionary. Used in testing and programmatic
use of PyGBe
field : Dictionary, defaults to None.
If passed, this dictionary will supercede any config file found, useful in
programmatically stepping through slight changes in a problem
Returns
--------
output_fname : str, if kwarg is True.
The name of the log file containing problem output
"""
args = read_inputs(argv[1:])
configFile, paramfile = find_config_files(args)
full_path = os.environ.get('PYGBE_PROBLEM_FOLDER')
#check if a custom geometry location has been specified
#if it has, add an ENV_VAR to handle it
if args.geometry:
geo_path = os.path.abspath(args.geometry)
if os.path.isdir(geo_path):
os.environ['PYGBE_GEOMETRY'] = geo_path
else:
sys.exit('Invalid geometry prefix provided (Folder not found)')
else:
geo_path = os.path.join(full_path, 'geometry')
#try to expand ~ if present in output path
args.output = os.path.expanduser(args.output)
#if output path is absolute, use that, otherwise prepend
#problem path
if not os.path.isdir(args.output):
output_dir = os.path.join(full_path, args.output)
else:
output_dir = args.output
# create output directory if it doesn't already exist
try:
os.makedirs(output_dir)
except OSError:
pass
results_dict = {}
timestamp = time.localtime()
outputfname = '{:%Y-%m-%d-%H%M%S}-output.log'.format(datetime.now())
results_dict['output_file'] = outputfname
if log_output:
restore_stdout = sys.stdout
sys.stdout = Logger(os.path.join(output_dir, outputfname))
# Time stamp
print('Run started on:')
print('\tDate: {}/{}/{}'.format(timestamp.tm_year, timestamp.tm_mon,
timestamp.tm_mday))
print('\tTime: {}:{}:{}'.format(timestamp.tm_hour, timestamp.tm_min,
timestamp.tm_sec))
print('\tPyGBe version: {}'.format(pygbe.__version__))
TIC = time.time()
print('Config file: {}'.format(configFile))
print('Parameter file: {}'.format(paramfile))
print('Geometry folder: {}'.format(geo_path))
print('Running in: {}'.format(full_path))
results_dict['config_file'] = configFile
results_dict['param_file'] = paramfile
results_dict['geo_file'] = geo_path
results_dict['full_path'] = full_path
### Read parameters
param = Parameters()
precision = read_parameters(param, paramfile)
param.Nm = (param.P + 1) * (param.P + 2) * (
param.P + 3) // 6 # Number of terms in Taylor expansion
param.BlocksPerTwig = int(numpy.ceil(param.NCRIT / float(param.BSZ))
) # CUDA blocks that fit per twig
HAS_GPU = check_for_nvcc()
if param.GPU == 1 and not HAS_GPU:
print('\n\n\n\n')
print('{:-^{}}'.format('No GPU DETECTED', 60))
print("Your param file has `GPU = 1` but CUDA was not detected.\n"
"Continuing using CPU. If you do not want this, use Ctrl-C\n"
"to stop the program and check that your CUDA installation\n"
"is on your $PATH")
print('{:-^{}}'.format('No GPU DETECTED', 60))
print('\n\n\n\n')
param.GPU = 0
### Generate array of fields
if field:
field_array = initialize_field(configFile, param, field)
else:
field_array = initialize_field(configFile, param)
### Generate array of surfaces and read in elements
surf_array = initialize_surface(field_array, configFile, param)
### Fill surface class
time_sort = 0.
for i in range(len(surf_array)):
time_sort += surf_array[i].fill_surface(param)
### Output setup summary
param.N = 0
param.Neq = 0
for s in surf_array:
N_aux = len(s.triangle)
param.N += N_aux
if s.surf_type in ['dirichlet_surface', 'neumann_surface', 'asc_surface']:
param.Neq += N_aux
else:
param.Neq += 2 * N_aux
print('\nTotal elements : {}'.format(param.N))
print('Total equations: {}'.format(param.Neq))
results_dict['total_elements'] = param.N
results_dict['N_equation'] = param.Neq
results_dict = print_summary(surf_array, field_array, param, results_dict)
### Precomputation
ind0 = IndexConstant()
computeIndices(param.P, ind0)
precomputeTerms(param.P, ind0)
### Load CUDA code
if param.GPU == 1:
kernel = kernels(param.BSZ, param.Nm, param.K_fine, param.P, precision)
else:
kernel = 1
### Generate interaction list
print('Generate interaction list')
tic = time.time()
generateList(surf_array, field_array, param)
toc = time.time()
list_time = toc - tic
### Transfer data to GPU
print('Transfer data to GPU')
tic = time.time()
if param.GPU == 1:
dataTransfer(surf_array, field_array, ind0, param, kernel)
toc = time.time()
transfer_time = toc - tic
phi_sol = numpy.loadtxt(args.phi_file)
phi_vals = phi_sol[:len(surf_array[0].xi)]
dphi_int = phi_sol[len(surf_array[0].xi):2*len(surf_array[0].xi)]
dphi_ext = surf_array[0].E_hat * dphi_int
boundary_force_x = -1/2.*(surf_array[0].Eout-surf_array[0].Ein)*dphi_ext*dphi_int*surf_array[0].normal[:,0]
boundary_force_y = -1/2.*(surf_array[0].Eout-surf_array[0].Ein)*dphi_ext*dphi_int*surf_array[0].normal[:,1]
boundary_force_z = -1/2.*(surf_array[0].Eout-surf_array[0].Ein)*dphi_ext*dphi_int*surf_array[0].normal[:,2]
total_boundary_force_x = numpy.sum(boundary_force_x*surf_array[0].area)
total_boundary_force_y = numpy.sum(boundary_force_y*surf_array[0].area)
total_boundary_force_z = numpy.sum(boundary_force_z*surf_array[0].area)
print("Total boundary force:")
print(total_boundary_force_x, total_boundary_force_y, total_boundary_force_z)
ionic_force_x = -1/2.*(surf_array[0].Eout)*phi_vals**2*surf_array[0].normal[:,0]*field_array[0].kappa**2
ionic_force_y = -1/2.*(surf_array[0].Eout)*phi_vals**2*surf_array[0].normal[:,1]*field_array[0].kappa**2
ionic_force_z = -1/2.*(surf_array[0].Eout)*phi_vals**2*surf_array[0].normal[:,2]*field_array[0].kappa**2
total_ionic_force_x = numpy.sum(ionic_force_x*surf_array[0].area)
total_ionic_force_y = numpy.sum(ionic_force_y*surf_array[0].area)
total_ionic_force_z = numpy.sum(ionic_force_z*surf_array[0].area)
print("Total ionic force:")
print(total_ionic_force_x, total_ionic_force_y, total_ionic_force_z)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| <filename>pygbe/compute_boundary_force.py<gh_stars>10-100
"""
This function reads in a phi.txt resulting from the linear system
in a regular pygbe run, and computing the reaction field
"""
import os
import re
import sys
import time
import glob
import numpy
import pickle
import subprocess
from datetime import datetime
from argparse import ArgumentParser
# Import self made modules
import pygbe
from pygbe.gmres import gmres_mgs
from pygbe.classes import Timing, Parameters, IndexConstant
from pygbe.gpuio import dataTransfer
from pygbe.class_initialization import initialize_surface, initialize_field
from pygbe.output import print_summary
from pygbe.matrixfree import (generateRHS, generateRHS_gpu, calculate_solvation_energy,
coulomb_energy, calculate_surface_energy)
from pygbe.projection import get_dphirdr_gpu
from pygbe.util.read_data import read_parameters, read_electric_field
from pygbe.tree.FMMutils import computeIndices, precomputeTerms, generateList
try:
from pygbe.tree.cuda_kernels import kernels
except:
pass
#courtesy of http://stackoverflow.com/a/5916874
class Logger(object):
"""
Allow writing both to STDOUT on screen and sending text to file
in conjunction with the command
`sys.stdout = Logger("desired_log_file.txt")`
"""
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
"""Required for Python 3"""
pass
def read_inputs(args):
"""
Parse command-line arguments to determine which config and param files to run
Assumes that in the absence of specific command line arguments that pygbe
problem folder resembles the following structure
lys/
- lys.param
- lys.config
- built_parse.pqr
- geometry/Lys1.face
- geometry/Lys1.vert
- output/
"""
parser = ArgumentParser(description='Manage PyGBe command line arguments')
parser.add_argument('problem_folder',
type=str,
help="Path to folder containing problem files")
parser.add_argument('-c',
'--config',
dest='config',
type=str,
default=None,
help="Path to problem config file")
parser.add_argument('-p',
'--param',
dest='param',
type=str,
default=None,
help="Path to problem param file")
parser.add_argument('-o',
'--output',
dest='output',
type=str,
default='output',
help="Output folder")
parser.add_argument('-g',
'--geometry',
dest='geometry',
type=str,
help="Custom geometry folder prefix")
parser.add_argument('-x0',
'--initial_guess',
dest='initial_guess',
type=str,
help="File containing an initial guess for the linear solver")
parser.add_argument('-phi',
'--phi_sol',
dest='phi_file',
type=str,
help="File containing solution of phi for boundary force calculation")
return parser.parse_args(args)
def check_file_exists(filename):
"""Try to open the file `filename` and return True if it's valid """
return os.path.exists(filename)
def find_config_files(cliargs):
"""
Check that .config and .param files exist and can be opened.
If either file isn't found, PyGBe exits (and should print which
file was not found). Otherwise return the path to the config and
param files
Arguments
---------
cliargs: parser
parser containing cli arguments passed to PyGBe
Returns
-------
cliargs.config: string
path to config file
cliargs.param: string
path to param file
"""
prob_path = cliargs.problem_folder
full_path = os.path.abspath(prob_path)
os.environ['PYGBE_PROBLEM_FOLDER'] = full_path
if cliargs.config is None:
cliargs.config = next(glob.iglob(os.path.join(full_path, '*.config')))
else:
cliargs.config = resolve_relative_config_file(cliargs.config, full_path)
if cliargs.param is None:
cliargs.param = next(glob.iglob(os.path.join(full_path, '*.param')))
else:
cliargs.param = resolve_relative_config_file(cliargs.param, full_path)
return cliargs.config, cliargs.param
def resolve_relative_config_file(config_file, full_path):
"""
Does its level-headed best to find the config files specified by the user
Arguments
---------
config_file: str
the given path to a .param or .config file from the command line
full_path: str
the full path to the problem folder
"""
if check_file_exists(config_file):
return config_file
elif check_file_exists(os.path.abspath(config_file)):
return os.path.join(os.getcwd(), config_file)
elif check_file_exists(os.path.join(full_path, config_file)):
return os.path.join(full_path, config_file)
else:
sys.exit('Did not find expected config files\n'
'Could not find {}'.format(config_file))
def check_for_nvcc():
"""Check system PATH for nvcc, exit if not found"""
try:
subprocess.check_output(['which', 'nvcc'])
return True
except subprocess.CalledProcessError:
print(
"Could not find `nvcc` on your PATH. Is cuda installed? "
"PyGBe will continue to run but will run significantly slower. "
"For optimal performance, add `nvcc` to your PATH"
)
return False
def main(argv=sys.argv, log_output=True, return_output_fname=False,
return_results_dict=False, field=None):
"""
Run a PyGBe problem, write outputs to STDOUT and to log file in
problem directory
Arguments
----------
log_output : Bool, default True.
If False, output is written only to STDOUT and not to a log file.
return_output_fname: Bool, default False.
If True, function main() returns the name of the
output log file. This is used for the regression tests.
return_results_dict: Bool, default False.
If True, function main() returns the results of the run
packed in a dictionary. Used in testing and programmatic
use of PyGBe
field : Dictionary, defaults to None.
If passed, this dictionary will supercede any config file found, useful in
programmatically stepping through slight changes in a problem
Returns
--------
output_fname : str, if kwarg is True.
The name of the log file containing problem output
"""
args = read_inputs(argv[1:])
configFile, paramfile = find_config_files(args)
full_path = os.environ.get('PYGBE_PROBLEM_FOLDER')
#check if a custom geometry location has been specified
#if it has, add an ENV_VAR to handle it
if args.geometry:
geo_path = os.path.abspath(args.geometry)
if os.path.isdir(geo_path):
os.environ['PYGBE_GEOMETRY'] = geo_path
else:
sys.exit('Invalid geometry prefix provided (Folder not found)')
else:
geo_path = os.path.join(full_path, 'geometry')
#try to expand ~ if present in output path
args.output = os.path.expanduser(args.output)
#if output path is absolute, use that, otherwise prepend
#problem path
if not os.path.isdir(args.output):
output_dir = os.path.join(full_path, args.output)
else:
output_dir = args.output
# create output directory if it doesn't already exist
try:
os.makedirs(output_dir)
except OSError:
pass
results_dict = {}
timestamp = time.localtime()
outputfname = '{:%Y-%m-%d-%H%M%S}-output.log'.format(datetime.now())
results_dict['output_file'] = outputfname
if log_output:
restore_stdout = sys.stdout
sys.stdout = Logger(os.path.join(output_dir, outputfname))
# Time stamp
print('Run started on:')
print('\tDate: {}/{}/{}'.format(timestamp.tm_year, timestamp.tm_mon,
timestamp.tm_mday))
print('\tTime: {}:{}:{}'.format(timestamp.tm_hour, timestamp.tm_min,
timestamp.tm_sec))
print('\tPyGBe version: {}'.format(pygbe.__version__))
TIC = time.time()
print('Config file: {}'.format(configFile))
print('Parameter file: {}'.format(paramfile))
print('Geometry folder: {}'.format(geo_path))
print('Running in: {}'.format(full_path))
results_dict['config_file'] = configFile
results_dict['param_file'] = paramfile
results_dict['geo_file'] = geo_path
results_dict['full_path'] = full_path
### Read parameters
param = Parameters()
precision = read_parameters(param, paramfile)
param.Nm = (param.P + 1) * (param.P + 2) * (
param.P + 3) // 6 # Number of terms in Taylor expansion
param.BlocksPerTwig = int(numpy.ceil(param.NCRIT / float(param.BSZ))
) # CUDA blocks that fit per twig
HAS_GPU = check_for_nvcc()
if param.GPU == 1 and not HAS_GPU:
print('\n\n\n\n')
print('{:-^{}}'.format('No GPU DETECTED', 60))
print("Your param file has `GPU = 1` but CUDA was not detected.\n"
"Continuing using CPU. If you do not want this, use Ctrl-C\n"
"to stop the program and check that your CUDA installation\n"
"is on your $PATH")
print('{:-^{}}'.format('No GPU DETECTED', 60))
print('\n\n\n\n')
param.GPU = 0
### Generate array of fields
if field:
field_array = initialize_field(configFile, param, field)
else:
field_array = initialize_field(configFile, param)
### Generate array of surfaces and read in elements
surf_array = initialize_surface(field_array, configFile, param)
### Fill surface class
time_sort = 0.
for i in range(len(surf_array)):
time_sort += surf_array[i].fill_surface(param)
### Output setup summary
param.N = 0
param.Neq = 0
for s in surf_array:
N_aux = len(s.triangle)
param.N += N_aux
if s.surf_type in ['dirichlet_surface', 'neumann_surface', 'asc_surface']:
param.Neq += N_aux
else:
param.Neq += 2 * N_aux
print('\nTotal elements : {}'.format(param.N))
print('Total equations: {}'.format(param.Neq))
results_dict['total_elements'] = param.N
results_dict['N_equation'] = param.Neq
results_dict = print_summary(surf_array, field_array, param, results_dict)
### Precomputation
ind0 = IndexConstant()
computeIndices(param.P, ind0)
precomputeTerms(param.P, ind0)
### Load CUDA code
if param.GPU == 1:
kernel = kernels(param.BSZ, param.Nm, param.K_fine, param.P, precision)
else:
kernel = 1
### Generate interaction list
print('Generate interaction list')
tic = time.time()
generateList(surf_array, field_array, param)
toc = time.time()
list_time = toc - tic
### Transfer data to GPU
print('Transfer data to GPU')
tic = time.time()
if param.GPU == 1:
dataTransfer(surf_array, field_array, ind0, param, kernel)
toc = time.time()
transfer_time = toc - tic
phi_sol = numpy.loadtxt(args.phi_file)
phi_vals = phi_sol[:len(surf_array[0].xi)]
dphi_int = phi_sol[len(surf_array[0].xi):2*len(surf_array[0].xi)]
dphi_ext = surf_array[0].E_hat * dphi_int
boundary_force_x = -1/2.*(surf_array[0].Eout-surf_array[0].Ein)*dphi_ext*dphi_int*surf_array[0].normal[:,0]
boundary_force_y = -1/2.*(surf_array[0].Eout-surf_array[0].Ein)*dphi_ext*dphi_int*surf_array[0].normal[:,1]
boundary_force_z = -1/2.*(surf_array[0].Eout-surf_array[0].Ein)*dphi_ext*dphi_int*surf_array[0].normal[:,2]
total_boundary_force_x = numpy.sum(boundary_force_x*surf_array[0].area)
total_boundary_force_y = numpy.sum(boundary_force_y*surf_array[0].area)
total_boundary_force_z = numpy.sum(boundary_force_z*surf_array[0].area)
print("Total boundary force:")
print(total_boundary_force_x, total_boundary_force_y, total_boundary_force_z)
ionic_force_x = -1/2.*(surf_array[0].Eout)*phi_vals**2*surf_array[0].normal[:,0]*field_array[0].kappa**2
ionic_force_y = -1/2.*(surf_array[0].Eout)*phi_vals**2*surf_array[0].normal[:,1]*field_array[0].kappa**2
ionic_force_z = -1/2.*(surf_array[0].Eout)*phi_vals**2*surf_array[0].normal[:,2]*field_array[0].kappa**2
total_ionic_force_x = numpy.sum(ionic_force_x*surf_array[0].area)
total_ionic_force_y = numpy.sum(ionic_force_y*surf_array[0].area)
total_ionic_force_z = numpy.sum(ionic_force_z*surf_array[0].area)
print("Total ionic force:")
print(total_ionic_force_x, total_ionic_force_y, total_ionic_force_z)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| en | 0.713093 | This function reads in a phi.txt resulting from the linear system in a regular pygbe run, and computing the reaction field # Import self made modules #courtesy of http://stackoverflow.com/a/5916874 Allow writing both to STDOUT on screen and sending text to file in conjunction with the command `sys.stdout = Logger("desired_log_file.txt")` Required for Python 3 Parse command-line arguments to determine which config and param files to run Assumes that in the absence of specific command line arguments that pygbe problem folder resembles the following structure lys/ - lys.param - lys.config - built_parse.pqr - geometry/Lys1.face - geometry/Lys1.vert - output/ Try to open the file `filename` and return True if it's valid Check that .config and .param files exist and can be opened. If either file isn't found, PyGBe exits (and should print which file was not found). Otherwise return the path to the config and param files Arguments --------- cliargs: parser parser containing cli arguments passed to PyGBe Returns ------- cliargs.config: string path to config file cliargs.param: string path to param file Does its level-headed best to find the config files specified by the user Arguments --------- config_file: str the given path to a .param or .config file from the command line full_path: str the full path to the problem folder Check system PATH for nvcc, exit if not found Run a PyGBe problem, write outputs to STDOUT and to log file in problem directory Arguments ---------- log_output : Bool, default True. If False, output is written only to STDOUT and not to a log file. return_output_fname: Bool, default False. If True, function main() returns the name of the output log file. This is used for the regression tests. return_results_dict: Bool, default False. If True, function main() returns the results of the run packed in a dictionary. Used in testing and programmatic use of PyGBe field : Dictionary, defaults to None. If passed, this dictionary will supercede any config file found, useful in programmatically stepping through slight changes in a problem Returns -------- output_fname : str, if kwarg is True. The name of the log file containing problem output #check if a custom geometry location has been specified #if it has, add an ENV_VAR to handle it #try to expand ~ if present in output path #if output path is absolute, use that, otherwise prepend #problem path # create output directory if it doesn't already exist # Time stamp ### Read parameters # Number of terms in Taylor expansion # CUDA blocks that fit per twig ### Generate array of fields ### Generate array of surfaces and read in elements ### Fill surface class ### Output setup summary ### Precomputation ### Load CUDA code ### Generate interaction list ### Transfer data to GPU | 2.851232 | 3 |
proselint/checks/misc/greylist.py | ankita240796/proselint | 4,163 | 6625164 | <reponame>ankita240796/proselint
"""Use of greylisted words.
---
layout: post
source: Strunk & White
source_url: ???
title: Use of greylisted words
date: 2014-06-10 12:31:19
categories: writing
---
Strunk & White say:
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Check the text."""
err = "strunk_white.greylist"
msg = "Use of '{}'. {}"
bad_words = [
"obviously",
"utilize"
]
explanations = {
"obviously":
"This is obviously an inadvisable word to use.",
"utilize":
r"Do you know anyone who *needs* to utilize the word utilize?"
}
errors = []
for word in bad_words:
occ = [m for m in re.finditer(word, text.lower())]
for o in occ:
errors.append((
o.start(),
o.end(),
err,
msg.format(word, explanations[word]),
None))
return errors
| """Use of greylisted words.
---
layout: post
source: Strunk & White
source_url: ???
title: Use of greylisted words
date: 2014-06-10 12:31:19
categories: writing
---
Strunk & White say:
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Check the text."""
err = "strunk_white.greylist"
msg = "Use of '{}'. {}"
bad_words = [
"obviously",
"utilize"
]
explanations = {
"obviously":
"This is obviously an inadvisable word to use.",
"utilize":
r"Do you know anyone who *needs* to utilize the word utilize?"
}
errors = []
for word in bad_words:
occ = [m for m in re.finditer(word, text.lower())]
for o in occ:
errors.append((
o.start(),
o.end(),
err,
msg.format(word, explanations[word]),
None))
return errors | en | 0.695155 | Use of greylisted words. --- layout: post source: Strunk & White source_url: ??? title: Use of greylisted words date: 2014-06-10 12:31:19 categories: writing --- Strunk & White say: Check the text. | 2.808532 | 3 |
youtube_dl_gui/downloaders.py | oleksis/youtube-dl-gui | 527 | 6625165 | # type: ignore[misc]
"""Python module to download videos.
This module contains the actual downloaders responsible
for downloading the video files.
"""
# -*- coding: future_annotations -*-
import os
import signal
import subprocess
from pathlib import Path
from queue import Queue
from threading import Thread
from time import sleep
from typing import IO, Any, Callable
from .utils import IS_WINDOWS, get_encoding
# noinspection PyUnresolvedReferences
class PipeReader(Thread):
"""Helper class to avoid deadlocks when reading from subprocess pipes.
This class uses python threads and queues in order to read from subprocess
pipes in an asynchronous way.
Attributes:
WAIT_TIME (float): Time in seconds to sleep.
Args:
queue (Queue): Python queue to store the output of the subprocess.
Warnings:
All the operations are based on 'str' types.
"""
WAIT_TIME = 0.1
def __init__(self, queue: Queue):
super().__init__()
self._filedescriptor: "IO[str] | None" = None
self._running: bool = True
self._queue: Queue[str] = queue
self.start()
def run(self) -> None:
# Flag to ignore specific lines
ignore_line: bool = False
while self._running:
if self._filedescriptor is not None and not self._filedescriptor.closed:
pipedata: str = self._filedescriptor.read()
for line in pipedata.splitlines():
# Ignore ffmpeg stderr
if "ffmpeg version" in line:
ignore_line = True
if not ignore_line and line:
self._queue.put_nowait(line)
ignore_line = False
sleep(self.WAIT_TIME)
def attach_filedescriptor(self, filedesc: "IO[str] | None" = None) -> None:
"""Attach a filedescriptor to the PipeReader."""
self._filedescriptor = filedesc
def join(self, timeout=None) -> None:
self._running = False
super().join(timeout)
class YoutubeDLDownloader:
"""Python class for downloading videos using youtube-dl & subprocess.
Attributes:
OK, ERROR, STOPPED, ALREADY, FILESIZE_ABORT, WARNING (int): Integers
that describe the return code from the download() method. The
larger the number the higher is the hierarchy of the code.
Codes with smaller hierachy cannot overwrite codes with higher
hierarchy.
Args:
youtubedl_path (str): Absolute path to youtube-dl binary.
data_hook (Callable): Optional callback function to retrieve download
process data.
log_data (Callable): Optional callback function to write data to
the log file.
Warnings:
The caller is responsible for calling the close() method after he has
finished with the object in order for the object to be able to properly
close down itself.
Example:
How to use YoutubeDLDownloader from a python script.
from downloaders import YoutubeDLDownloader
def data_hook(data):
print(data)
downloader = YoutubeDLDownloader('/usr/bin/youtube-dl', data_hook)
downloader.download(<URL STRING>, ['-f', 'flv'])
"""
OK = 0
WARNING = 1
ERROR = 2
FILESIZE_ABORT = 3
ALREADY = 4
STOPPED = 5
def __init__(
self,
youtubedl_path: str,
data_hook: "Callable[[dict[str, Any]], None] | None" = None,
log_data: "Callable[[str], None] | None" = None,
):
self.youtubedl_path: str = youtubedl_path
self.data_hook = data_hook
self.log_data = log_data
self._return_code: int = self.OK
self._proc: "subprocess.Popen | None" = None
self._stderr_queue: Queue = Queue()
self._stderr_reader = PipeReader(self._stderr_queue)
def download(self, url: str, options: "list[str] | None" = None) -> int:
"""Download url using given options.
Args:
url (str): URL string to download.
options (list): Python list that contains youtube-dl options.
Returns:
An integer that shows the status of the download process.
There are 6 different return codes.
OK (0): The download process completed successfully.
WARNING (1): A warning occured during the download process.
ERROR (2): An error occured during the download process.
FILESIZE_ABORT (3): The corresponding url video file was larger or
smaller from the given filesize limit.
ALREADY (4): The given url is already downloaded.
STOPPED (5): The download process was stopped by the user.
"""
self._return_code = self.OK
cmd = self._get_cmd(url, options)
self._create_process(cmd)
if self._proc is not None:
self._stderr_reader.attach_filedescriptor(self._proc.stderr)
while self._proc_is_alive():
stdout: str = ""
if not self._proc.stdout.closed:
try:
stdout = self._proc.stdout.readline().rstrip()
except ValueError:
# I/O operation on closed file
pass
if stdout:
data_dict = extract_data(stdout)
self._extract_info(data_dict)
self._hook_data(data_dict)
# Read stderr after download process has been completed
# We don't need to read stderr in real time
while not self._stderr_queue.empty():
stderr = str(self._stderr_queue.get_nowait()).rstrip()
self._log(stderr)
if self._is_warning(stderr):
self._set_returncode(self.WARNING)
if self._proc and self._proc.returncode > 0:
proc_return_code = self._proc.returncode
self._log(f"Child process exited with non-zero code: {proc_return_code}")
self._set_returncode(self.ERROR)
self._last_data_hook()
return self._return_code
def stop(self) -> None:
"""Stop the download process and set return code to STOPPED."""
if self._proc_is_alive():
self._proc.stdout.close()
self._proc.stderr.close()
try:
if IS_WINDOWS:
# os.killpg is not available on Windows
# See: https://bugs.python.org/issue5115
self._proc.kill()
# When we kill the child process on Windows the return code
# gets set to 1, so we want to reset the return code back to 0
# in order to avoid creating logging output in the download(...)
# method
self._proc.returncode = 0
else:
# TODO: Test in Unix os.killpg ?
os.killpg(self._proc.pid, signal.SIGKILL) # type: ignore
except ProcessLookupError:
pass
self._set_returncode(self.STOPPED)
def close(self) -> None:
"""Destructor like function for the object."""
self._stderr_reader.join()
def _set_returncode(self, code) -> None:
"""Set self._return_code only if the hierarchy of the given code is
higher than the current self._return_code."""
if code >= self._return_code:
self._return_code = code
@staticmethod
def _is_warning(stderr: str) -> bool:
warning_error = str(stderr).split(":")[0]
warning_error = warning_error.strip()
return warning_error in ["WARNING", "ERROR"]
def _last_data_hook(self) -> None:
"""Set the last data information based on the return code."""
data_dictionary: "dict[str, str]" = {
"status": "",
"speed": "",
"eta": "",
}
if self._return_code == self.OK:
data_dictionary["status"] = "Finished"
elif self._return_code == self.ERROR:
data_dictionary["status"] = "Error"
elif self._return_code == self.WARNING:
data_dictionary["status"] = "Warning"
elif self._return_code == self.STOPPED:
data_dictionary["status"] = "Stopped"
elif self._return_code == self.ALREADY:
data_dictionary["status"] = "Already Downloaded"
else:
data_dictionary["status"] = "Filesize Abort"
self._hook_data(data_dictionary)
def _extract_info(self, data: "dict[str, Any]") -> None:
"""Extract informations about the download process from the given data.
Args:
data (dict): Python dictionary that contains different
keys. The keys are not standar the dictionary can also be
empty when there are no data to extract. See extract_data().
"""
if "status" in data:
if data["status"] == "Already Downloaded":
# Set self._return_code to already downloaded
# and trash that key
self._set_returncode(self.ALREADY)
data["status"] = None
if data["status"] == "Filesize Abort":
# Set self._return_code to filesize abort
# and trash that key
self._set_returncode(self.FILESIZE_ABORT)
data["status"] = None
def _log(self, data: str) -> None:
"""Log data using the callback function."""
if self.log_data is not None:
self.log_data(data)
def _hook_data(self, data: "dict[str, Any]"):
"""Pass data back to the caller."""
if self.data_hook is not None:
self.data_hook(data)
def _proc_is_alive(self) -> bool:
"""Returns True if self._proc is alive else False."""
if self._proc is None:
return False
return self._proc.poll() is None
def _get_cmd(self, url: str, options: "list[str] | None" = None) -> "list[str]":
"""Build the subprocess command.
Args:
url (str): URL string to download.
options (list): Python list that contains youtube-dl options.
Returns:
Python list that contains the command to execute.
"""
cmd_list: "list[str]" = [self.youtubedl_path]
if options:
cmd_list.extend(options)
cmd_list.append(url)
return cmd_list
def _create_process(self, cmd: "list[str]") -> None:
"""Create new subprocess.
Args:
cmd (list): Python list that contains the command to execute.
"""
info = None
kwargs = dict(
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding=get_encoding(),
creationflags=0,
)
if os.name == "nt":
# Hide subprocess window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
else:
kwargs["start_new_session"] = True
try:
self._proc = subprocess.Popen(cmd, startupinfo=info, **kwargs) # type: ignore
except (ValueError, OSError) as error:
self._log(f"Failed to start process: {cmd}")
self._log(str(error))
def extract_filename(input_data: str) -> "tuple[str, str, str]":
"""Extract the component of the filename
Args:
input_data (str): Filename with extension
Returns:
Python tuple with path, filename and extension
"""
_filename = Path(input_data.strip('"'))
path: str = str(_filename.parent) if str(_filename.parent) != "." else ""
filename: str = _filename.stem
extension: str = _filename.suffix
return path, filename, extension
def extract_data(stdout: str) -> "dict[str, str]":
"""Extract data from youtube-dl stdout.
Args:
stdout (str): String that contains the youtube-dl stdout.
Returns:
Python dictionary. The returned dictionary can be empty if there are
no data to extract else it may contain one or more of the
following keys:
'status' : Contains the status of the download process.
'path' : Destination path.
'extension' : The file extension.
'filename' : The filename without the extension.
'percent' : The percentage of the video being downloaded.
'eta' : Estimated time for the completion of the download process.
'speed' : Download speed.
'filesize' : The size of the video file being downloaded.
'playlist_index' : The playlist index of the current video file being downloaded.
'playlist_size' : The number of videos in the playlist.
"""
# REFACTOR
# noinspection PyShadowingNames
data_dictionary: "dict[str, str]" = {}
if not stdout:
return data_dictionary
# We want to keep the spaces in order to extract filenames with
# multiple whitespaces correctly.
stdout_list: "list[str]" = stdout.split()
stdout_list[0] = stdout_list[0].lstrip("\r")
if stdout_list[0] == "[download]":
data_dictionary["status"] = "Downloading"
# Get path, filename & extension
if stdout_list[1] == "Destination:":
path, filename, extension = extract_filename(" ".join(stdout_list[2:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get progress info
if "%" in stdout_list[1]:
if stdout_list[1] == "100%":
data_dictionary["speed"] = ""
data_dictionary["eta"] = ""
data_dictionary["percent"] = "100%"
data_dictionary["filesize"] = stdout_list[3]
else:
data_dictionary["percent"] = stdout_list[1]
data_dictionary["filesize"] = stdout_list[3]
data_dictionary["speed"] = stdout_list[5]
data_dictionary["eta"] = stdout_list[7]
# Get playlist info
if stdout_list[1] == "Downloading" and stdout_list[2] == "video":
data_dictionary["playlist_index"] = stdout_list[3]
data_dictionary["playlist_size"] = stdout_list[5]
# Remove the 'and merged' part from stdout when using ffmpeg to merge the formats
if stdout_list[-3] == "downloaded" and stdout_list[-1] == "merged":
stdout_list = stdout_list[:-2]
data_dictionary["percent"] = "100%"
# Get file already downloaded status
if stdout_list[-1] == "downloaded":
data_dictionary["status"] = "Already Downloaded"
path, filename, extension = extract_filename(" ".join(stdout_list[1:-4]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get filesize abort status
if stdout_list[-1] == "Aborting.":
data_dictionary["status"] = "Filesize Abort"
elif stdout_list[0] == "[hlsnative]":
# native hls extractor
# see: https://github.com/rg3/youtube-dl/blob/master/youtube_dl/downloader/hls.py#L54
data_dictionary["status"] = "Downloading"
if len(stdout_list) == 7:
segment_no = float(stdout_list[6])
current_segment = float(stdout_list[4])
# Get the percentage
percent = f"{current_segment / segment_no * 100:.1f}%"
data_dictionary["percent"] = percent
elif stdout_list[0] == "[ffmpeg]":
data_dictionary["status"] = "Post Processing"
# Get final extension after merging process
if stdout_list[1] == "Merging":
path, filename, extension = extract_filename(" ".join(stdout_list[4:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get final extension ffmpeg post process simple (not file merge)
if stdout_list[1] == "Destination:":
path, filename, extension = extract_filename(" ".join(stdout_list[2:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get final extension after recoding process
if stdout_list[1] == "Converting":
path, filename, extension = extract_filename(" ".join(stdout_list[8:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
elif stdout_list[0][0] == "[" and stdout_list[0] != "[debug]":
data_dictionary["status"] = "Pre Processing"
return data_dictionary
| # type: ignore[misc]
"""Python module to download videos.
This module contains the actual downloaders responsible
for downloading the video files.
"""
# -*- coding: future_annotations -*-
import os
import signal
import subprocess
from pathlib import Path
from queue import Queue
from threading import Thread
from time import sleep
from typing import IO, Any, Callable
from .utils import IS_WINDOWS, get_encoding
# noinspection PyUnresolvedReferences
class PipeReader(Thread):
"""Helper class to avoid deadlocks when reading from subprocess pipes.
This class uses python threads and queues in order to read from subprocess
pipes in an asynchronous way.
Attributes:
WAIT_TIME (float): Time in seconds to sleep.
Args:
queue (Queue): Python queue to store the output of the subprocess.
Warnings:
All the operations are based on 'str' types.
"""
WAIT_TIME = 0.1
def __init__(self, queue: Queue):
super().__init__()
self._filedescriptor: "IO[str] | None" = None
self._running: bool = True
self._queue: Queue[str] = queue
self.start()
def run(self) -> None:
# Flag to ignore specific lines
ignore_line: bool = False
while self._running:
if self._filedescriptor is not None and not self._filedescriptor.closed:
pipedata: str = self._filedescriptor.read()
for line in pipedata.splitlines():
# Ignore ffmpeg stderr
if "ffmpeg version" in line:
ignore_line = True
if not ignore_line and line:
self._queue.put_nowait(line)
ignore_line = False
sleep(self.WAIT_TIME)
def attach_filedescriptor(self, filedesc: "IO[str] | None" = None) -> None:
"""Attach a filedescriptor to the PipeReader."""
self._filedescriptor = filedesc
def join(self, timeout=None) -> None:
self._running = False
super().join(timeout)
class YoutubeDLDownloader:
"""Python class for downloading videos using youtube-dl & subprocess.
Attributes:
OK, ERROR, STOPPED, ALREADY, FILESIZE_ABORT, WARNING (int): Integers
that describe the return code from the download() method. The
larger the number the higher is the hierarchy of the code.
Codes with smaller hierachy cannot overwrite codes with higher
hierarchy.
Args:
youtubedl_path (str): Absolute path to youtube-dl binary.
data_hook (Callable): Optional callback function to retrieve download
process data.
log_data (Callable): Optional callback function to write data to
the log file.
Warnings:
The caller is responsible for calling the close() method after he has
finished with the object in order for the object to be able to properly
close down itself.
Example:
How to use YoutubeDLDownloader from a python script.
from downloaders import YoutubeDLDownloader
def data_hook(data):
print(data)
downloader = YoutubeDLDownloader('/usr/bin/youtube-dl', data_hook)
downloader.download(<URL STRING>, ['-f', 'flv'])
"""
OK = 0
WARNING = 1
ERROR = 2
FILESIZE_ABORT = 3
ALREADY = 4
STOPPED = 5
def __init__(
self,
youtubedl_path: str,
data_hook: "Callable[[dict[str, Any]], None] | None" = None,
log_data: "Callable[[str], None] | None" = None,
):
self.youtubedl_path: str = youtubedl_path
self.data_hook = data_hook
self.log_data = log_data
self._return_code: int = self.OK
self._proc: "subprocess.Popen | None" = None
self._stderr_queue: Queue = Queue()
self._stderr_reader = PipeReader(self._stderr_queue)
def download(self, url: str, options: "list[str] | None" = None) -> int:
"""Download url using given options.
Args:
url (str): URL string to download.
options (list): Python list that contains youtube-dl options.
Returns:
An integer that shows the status of the download process.
There are 6 different return codes.
OK (0): The download process completed successfully.
WARNING (1): A warning occured during the download process.
ERROR (2): An error occured during the download process.
FILESIZE_ABORT (3): The corresponding url video file was larger or
smaller from the given filesize limit.
ALREADY (4): The given url is already downloaded.
STOPPED (5): The download process was stopped by the user.
"""
self._return_code = self.OK
cmd = self._get_cmd(url, options)
self._create_process(cmd)
if self._proc is not None:
self._stderr_reader.attach_filedescriptor(self._proc.stderr)
while self._proc_is_alive():
stdout: str = ""
if not self._proc.stdout.closed:
try:
stdout = self._proc.stdout.readline().rstrip()
except ValueError:
# I/O operation on closed file
pass
if stdout:
data_dict = extract_data(stdout)
self._extract_info(data_dict)
self._hook_data(data_dict)
# Read stderr after download process has been completed
# We don't need to read stderr in real time
while not self._stderr_queue.empty():
stderr = str(self._stderr_queue.get_nowait()).rstrip()
self._log(stderr)
if self._is_warning(stderr):
self._set_returncode(self.WARNING)
if self._proc and self._proc.returncode > 0:
proc_return_code = self._proc.returncode
self._log(f"Child process exited with non-zero code: {proc_return_code}")
self._set_returncode(self.ERROR)
self._last_data_hook()
return self._return_code
def stop(self) -> None:
"""Stop the download process and set return code to STOPPED."""
if self._proc_is_alive():
self._proc.stdout.close()
self._proc.stderr.close()
try:
if IS_WINDOWS:
# os.killpg is not available on Windows
# See: https://bugs.python.org/issue5115
self._proc.kill()
# When we kill the child process on Windows the return code
# gets set to 1, so we want to reset the return code back to 0
# in order to avoid creating logging output in the download(...)
# method
self._proc.returncode = 0
else:
# TODO: Test in Unix os.killpg ?
os.killpg(self._proc.pid, signal.SIGKILL) # type: ignore
except ProcessLookupError:
pass
self._set_returncode(self.STOPPED)
def close(self) -> None:
"""Destructor like function for the object."""
self._stderr_reader.join()
def _set_returncode(self, code) -> None:
"""Set self._return_code only if the hierarchy of the given code is
higher than the current self._return_code."""
if code >= self._return_code:
self._return_code = code
@staticmethod
def _is_warning(stderr: str) -> bool:
warning_error = str(stderr).split(":")[0]
warning_error = warning_error.strip()
return warning_error in ["WARNING", "ERROR"]
def _last_data_hook(self) -> None:
"""Set the last data information based on the return code."""
data_dictionary: "dict[str, str]" = {
"status": "",
"speed": "",
"eta": "",
}
if self._return_code == self.OK:
data_dictionary["status"] = "Finished"
elif self._return_code == self.ERROR:
data_dictionary["status"] = "Error"
elif self._return_code == self.WARNING:
data_dictionary["status"] = "Warning"
elif self._return_code == self.STOPPED:
data_dictionary["status"] = "Stopped"
elif self._return_code == self.ALREADY:
data_dictionary["status"] = "Already Downloaded"
else:
data_dictionary["status"] = "Filesize Abort"
self._hook_data(data_dictionary)
def _extract_info(self, data: "dict[str, Any]") -> None:
"""Extract informations about the download process from the given data.
Args:
data (dict): Python dictionary that contains different
keys. The keys are not standar the dictionary can also be
empty when there are no data to extract. See extract_data().
"""
if "status" in data:
if data["status"] == "Already Downloaded":
# Set self._return_code to already downloaded
# and trash that key
self._set_returncode(self.ALREADY)
data["status"] = None
if data["status"] == "Filesize Abort":
# Set self._return_code to filesize abort
# and trash that key
self._set_returncode(self.FILESIZE_ABORT)
data["status"] = None
def _log(self, data: str) -> None:
"""Log data using the callback function."""
if self.log_data is not None:
self.log_data(data)
def _hook_data(self, data: "dict[str, Any]"):
"""Pass data back to the caller."""
if self.data_hook is not None:
self.data_hook(data)
def _proc_is_alive(self) -> bool:
"""Returns True if self._proc is alive else False."""
if self._proc is None:
return False
return self._proc.poll() is None
def _get_cmd(self, url: str, options: "list[str] | None" = None) -> "list[str]":
"""Build the subprocess command.
Args:
url (str): URL string to download.
options (list): Python list that contains youtube-dl options.
Returns:
Python list that contains the command to execute.
"""
cmd_list: "list[str]" = [self.youtubedl_path]
if options:
cmd_list.extend(options)
cmd_list.append(url)
return cmd_list
def _create_process(self, cmd: "list[str]") -> None:
"""Create new subprocess.
Args:
cmd (list): Python list that contains the command to execute.
"""
info = None
kwargs = dict(
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding=get_encoding(),
creationflags=0,
)
if os.name == "nt":
# Hide subprocess window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
else:
kwargs["start_new_session"] = True
try:
self._proc = subprocess.Popen(cmd, startupinfo=info, **kwargs) # type: ignore
except (ValueError, OSError) as error:
self._log(f"Failed to start process: {cmd}")
self._log(str(error))
def extract_filename(input_data: str) -> "tuple[str, str, str]":
"""Extract the component of the filename
Args:
input_data (str): Filename with extension
Returns:
Python tuple with path, filename and extension
"""
_filename = Path(input_data.strip('"'))
path: str = str(_filename.parent) if str(_filename.parent) != "." else ""
filename: str = _filename.stem
extension: str = _filename.suffix
return path, filename, extension
def extract_data(stdout: str) -> "dict[str, str]":
"""Extract data from youtube-dl stdout.
Args:
stdout (str): String that contains the youtube-dl stdout.
Returns:
Python dictionary. The returned dictionary can be empty if there are
no data to extract else it may contain one or more of the
following keys:
'status' : Contains the status of the download process.
'path' : Destination path.
'extension' : The file extension.
'filename' : The filename without the extension.
'percent' : The percentage of the video being downloaded.
'eta' : Estimated time for the completion of the download process.
'speed' : Download speed.
'filesize' : The size of the video file being downloaded.
'playlist_index' : The playlist index of the current video file being downloaded.
'playlist_size' : The number of videos in the playlist.
"""
# REFACTOR
# noinspection PyShadowingNames
data_dictionary: "dict[str, str]" = {}
if not stdout:
return data_dictionary
# We want to keep the spaces in order to extract filenames with
# multiple whitespaces correctly.
stdout_list: "list[str]" = stdout.split()
stdout_list[0] = stdout_list[0].lstrip("\r")
if stdout_list[0] == "[download]":
data_dictionary["status"] = "Downloading"
# Get path, filename & extension
if stdout_list[1] == "Destination:":
path, filename, extension = extract_filename(" ".join(stdout_list[2:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get progress info
if "%" in stdout_list[1]:
if stdout_list[1] == "100%":
data_dictionary["speed"] = ""
data_dictionary["eta"] = ""
data_dictionary["percent"] = "100%"
data_dictionary["filesize"] = stdout_list[3]
else:
data_dictionary["percent"] = stdout_list[1]
data_dictionary["filesize"] = stdout_list[3]
data_dictionary["speed"] = stdout_list[5]
data_dictionary["eta"] = stdout_list[7]
# Get playlist info
if stdout_list[1] == "Downloading" and stdout_list[2] == "video":
data_dictionary["playlist_index"] = stdout_list[3]
data_dictionary["playlist_size"] = stdout_list[5]
# Remove the 'and merged' part from stdout when using ffmpeg to merge the formats
if stdout_list[-3] == "downloaded" and stdout_list[-1] == "merged":
stdout_list = stdout_list[:-2]
data_dictionary["percent"] = "100%"
# Get file already downloaded status
if stdout_list[-1] == "downloaded":
data_dictionary["status"] = "Already Downloaded"
path, filename, extension = extract_filename(" ".join(stdout_list[1:-4]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get filesize abort status
if stdout_list[-1] == "Aborting.":
data_dictionary["status"] = "Filesize Abort"
elif stdout_list[0] == "[hlsnative]":
# native hls extractor
# see: https://github.com/rg3/youtube-dl/blob/master/youtube_dl/downloader/hls.py#L54
data_dictionary["status"] = "Downloading"
if len(stdout_list) == 7:
segment_no = float(stdout_list[6])
current_segment = float(stdout_list[4])
# Get the percentage
percent = f"{current_segment / segment_no * 100:.1f}%"
data_dictionary["percent"] = percent
elif stdout_list[0] == "[ffmpeg]":
data_dictionary["status"] = "Post Processing"
# Get final extension after merging process
if stdout_list[1] == "Merging":
path, filename, extension = extract_filename(" ".join(stdout_list[4:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get final extension ffmpeg post process simple (not file merge)
if stdout_list[1] == "Destination:":
path, filename, extension = extract_filename(" ".join(stdout_list[2:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
# Get final extension after recoding process
if stdout_list[1] == "Converting":
path, filename, extension = extract_filename(" ".join(stdout_list[8:]))
data_dictionary["path"] = path
data_dictionary["filename"] = filename
data_dictionary["extension"] = extension
elif stdout_list[0][0] == "[" and stdout_list[0] != "[debug]":
data_dictionary["status"] = "Pre Processing"
return data_dictionary
| en | 0.748058 | # type: ignore[misc] Python module to download videos. This module contains the actual downloaders responsible for downloading the video files. # -*- coding: future_annotations -*- # noinspection PyUnresolvedReferences Helper class to avoid deadlocks when reading from subprocess pipes. This class uses python threads and queues in order to read from subprocess pipes in an asynchronous way. Attributes: WAIT_TIME (float): Time in seconds to sleep. Args: queue (Queue): Python queue to store the output of the subprocess. Warnings: All the operations are based on 'str' types. # Flag to ignore specific lines # Ignore ffmpeg stderr Attach a filedescriptor to the PipeReader. Python class for downloading videos using youtube-dl & subprocess. Attributes: OK, ERROR, STOPPED, ALREADY, FILESIZE_ABORT, WARNING (int): Integers that describe the return code from the download() method. The larger the number the higher is the hierarchy of the code. Codes with smaller hierachy cannot overwrite codes with higher hierarchy. Args: youtubedl_path (str): Absolute path to youtube-dl binary. data_hook (Callable): Optional callback function to retrieve download process data. log_data (Callable): Optional callback function to write data to the log file. Warnings: The caller is responsible for calling the close() method after he has finished with the object in order for the object to be able to properly close down itself. Example: How to use YoutubeDLDownloader from a python script. from downloaders import YoutubeDLDownloader def data_hook(data): print(data) downloader = YoutubeDLDownloader('/usr/bin/youtube-dl', data_hook) downloader.download(<URL STRING>, ['-f', 'flv']) Download url using given options. Args: url (str): URL string to download. options (list): Python list that contains youtube-dl options. Returns: An integer that shows the status of the download process. There are 6 different return codes. OK (0): The download process completed successfully. WARNING (1): A warning occured during the download process. ERROR (2): An error occured during the download process. FILESIZE_ABORT (3): The corresponding url video file was larger or smaller from the given filesize limit. ALREADY (4): The given url is already downloaded. STOPPED (5): The download process was stopped by the user. # I/O operation on closed file # Read stderr after download process has been completed # We don't need to read stderr in real time Stop the download process and set return code to STOPPED. # os.killpg is not available on Windows # See: https://bugs.python.org/issue5115 # When we kill the child process on Windows the return code # gets set to 1, so we want to reset the return code back to 0 # in order to avoid creating logging output in the download(...) # method # TODO: Test in Unix os.killpg ? # type: ignore Destructor like function for the object. Set self._return_code only if the hierarchy of the given code is higher than the current self._return_code. Set the last data information based on the return code. Extract informations about the download process from the given data. Args: data (dict): Python dictionary that contains different keys. The keys are not standar the dictionary can also be empty when there are no data to extract. See extract_data(). # Set self._return_code to already downloaded # and trash that key # Set self._return_code to filesize abort # and trash that key Log data using the callback function. Pass data back to the caller. Returns True if self._proc is alive else False. Build the subprocess command. Args: url (str): URL string to download. options (list): Python list that contains youtube-dl options. Returns: Python list that contains the command to execute. Create new subprocess. Args: cmd (list): Python list that contains the command to execute. # Hide subprocess window # type: ignore Extract the component of the filename Args: input_data (str): Filename with extension Returns: Python tuple with path, filename and extension Extract data from youtube-dl stdout. Args: stdout (str): String that contains the youtube-dl stdout. Returns: Python dictionary. The returned dictionary can be empty if there are no data to extract else it may contain one or more of the following keys: 'status' : Contains the status of the download process. 'path' : Destination path. 'extension' : The file extension. 'filename' : The filename without the extension. 'percent' : The percentage of the video being downloaded. 'eta' : Estimated time for the completion of the download process. 'speed' : Download speed. 'filesize' : The size of the video file being downloaded. 'playlist_index' : The playlist index of the current video file being downloaded. 'playlist_size' : The number of videos in the playlist. # REFACTOR # noinspection PyShadowingNames # We want to keep the spaces in order to extract filenames with # multiple whitespaces correctly. # Get path, filename & extension # Get progress info # Get playlist info # Remove the 'and merged' part from stdout when using ffmpeg to merge the formats # Get file already downloaded status # Get filesize abort status # native hls extractor # see: https://github.com/rg3/youtube-dl/blob/master/youtube_dl/downloader/hls.py#L54 # Get the percentage # Get final extension after merging process # Get final extension ffmpeg post process simple (not file merge) # Get final extension after recoding process | 2.734824 | 3 |
Assignments/Assignment 2/DS_Assignment2_201911189/A4-4.py | h0han/SE274_2020_spring | 0 | 6625166 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#A4-4(1 point)
from positional_list import PositionalList
class personal_PL_4(PositionalList):
def max(self):
k = 0
iterator = iter(self)
for i in iterator:
if type(i) != int or type(i) != float:
raise TypeError('Check your element type')
else:
if k < i:
k = i
return k
| #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#A4-4(1 point)
from positional_list import PositionalList
class personal_PL_4(PositionalList):
def max(self):
k = 0
iterator = iter(self)
for i in iterator:
if type(i) != int or type(i) != float:
raise TypeError('Check your element type')
else:
if k < i:
k = i
return k
| en | 0.48048 | #!/usr/bin/env python # coding: utf-8 # In[ ]: #A4-4(1 point) | 3.250236 | 3 |
setup.py | jwrichardson/dtt2hdf | 0 | 6625167 | <reponame>jwrichardson/dtt2hdf
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import os
import sys
from distutils.sysconfig import get_python_lib
import setup_helper
from setuptools import find_packages, setup
version = '1.0.1'
cmdclass = setup_helper.version_checker(version, 'dtt2hdf')
setup(
name='dtt2hdf',
version=version,
url='',
author='<NAME>',
author_email='<EMAIL>',
description=(
'Extract data from LIGO Diagnostics test tools XML format.'
),
license = 'Apache v2',
packages=find_packages(exclude=['doc']),
#include_package_data=True,
#scripts=[''],
entry_points={
'console_scripts': [
'dtt2hdf=dtt2hdf.dtt2hdf:main',
]},
install_requires=[
'declarative[hdf]',
],
cmdclass = cmdclass,
extras_require = {},
zip_safe = False,
keywords = 'LIGO diagnostics file reader',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import os
import sys
from distutils.sysconfig import get_python_lib
import setup_helper
from setuptools import find_packages, setup
version = '1.0.1'
cmdclass = setup_helper.version_checker(version, 'dtt2hdf')
setup(
name='dtt2hdf',
version=version,
url='',
author='<NAME>',
author_email='<EMAIL>',
description=(
'Extract data from LIGO Diagnostics test tools XML format.'
),
license = 'Apache v2',
packages=find_packages(exclude=['doc']),
#include_package_data=True,
#scripts=[''],
entry_points={
'console_scripts': [
'dtt2hdf=dtt2hdf.dtt2hdf:main',
]},
install_requires=[
'declarative[hdf]',
],
cmdclass = cmdclass,
extras_require = {},
zip_safe = False,
keywords = 'LIGO diagnostics file reader',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
) | en | 0.223788 | #!/usr/bin/env python # -*- coding: utf-8 -*- #include_package_data=True, #scripts=[''], | 1.522532 | 2 |
examples/lof_example.py | marcelflygare/pyod | 2 | 6625168 | <filename>examples/lof_example.py
# -*- coding: utf-8 -*-
"""Example of using LOF for outlier detection
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
import matplotlib.pyplot as plt
from pyod.models.lof import LOF
from pyod.utils.data import generate_data
from pyod.utils.data import get_outliers_inliers
from pyod.utils.data import check_consistent_shape
from pyod.utils.data import evaluate_print
def visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False):
"""Utility function for visualizing the results in examples.
Internal use only.
Parameters
----------
clf_name : str
The name of the detector.
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
show_figure : bool, optional (default=True)
If set to True, show the figure.
save_figure : bool, optional (default=False)
If set to True, save the figure to the local.
"""
def _add_sub_plot(X_inliers, X_outliers, sub_plot_title,
inlier_color='blue', outlier_color='orange'):
"""Internal method to add subplot of inliers and outliers.
Parameters
----------
X_inliers : numpy array of shape (n_samples, n_features)
Outliers.
X_outliers : numpy array of shape (n_samples, n_features)
Inliers.
sub_plot_title : str
Subplot title.
inlier_color : str, optional (default='blue')
The color of inliers.
outlier_color : str, optional (default='orange')
The color of outliers.
"""
plt.axis("equal")
plt.scatter(X_inliers[:, 0], X_inliers[:, 1], label='inliers',
color=inlier_color, s=40)
plt.scatter(X_outliers[:, 0], X_outliers[:, 1],
label='outliers', color=outlier_color, s=50, marker='^')
plt.title(sub_plot_title, fontsize=15)
plt.xticks([])
plt.yticks([])
plt.legend(loc=3, prop={'size': 10})
return
# check input data shapes are consistent
X_train, y_train, X_test, y_test, y_train_pred, y_test_pred = \
check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred)
if X_train.shape[1] != 2:
raise ValueError("Input data has to be 2-d for visualization. The "
"input data has {shape}.".format(shape=X_train.shape))
X_train_outliers, X_train_inliers = get_outliers_inliers(X_train, y_train)
X_train_outliers_pred, X_train_inliers_pred = get_outliers_inliers(
X_train, y_train_pred)
X_test_outliers, X_test_inliers = get_outliers_inliers(X_test, y_test)
X_test_outliers_pred, X_test_inliers_pred = get_outliers_inliers(
X_test, y_test_pred)
# plot ground truth vs. predicted results
fig = plt.figure(figsize=(12, 10))
plt.suptitle("Demo of {clf_name} Detector".format(clf_name=clf_name),
fontsize=15)
fig.add_subplot(221)
_add_sub_plot(X_train_inliers, X_train_outliers, 'Train Set Ground Truth',
inlier_color='blue', outlier_color='orange')
fig.add_subplot(222)
_add_sub_plot(X_train_inliers_pred, X_train_outliers_pred,
'Train Set Prediction', inlier_color='blue',
outlier_color='orange')
fig.add_subplot(223)
_add_sub_plot(X_test_inliers, X_test_outliers, 'Test Set Ground Truth',
inlier_color='green', outlier_color='red')
fig.add_subplot(224)
_add_sub_plot(X_test_inliers_pred, X_test_outliers_pred,
'Test Set Prediction', inlier_color='green',
outlier_color='red')
if save_figure:
plt.savefig('{clf_name}.png'.format(clf_name=clf_name), dpi=300)
if show_figure:
plt.show()
return
if __name__ == "__main__":
contamination = 0.1 # percentage of outliers
n_train = 200 # number of training points
n_test = 100 # number of testing points
# Generate sample data
X_train, y_train, X_test, y_test = \
generate_data(n_train=n_train,
n_test=n_test,
n_features=2,
contamination=contamination,
random_state=42)
# train LOF detector
clf_name = 'LOF'
clf = LOF()
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
# get the prediction on the test data
y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)
y_test_scores = clf.decision_function(X_test) # outlier scores
# evaluate and print the results
print("\nOn Training Data:")
evaluate_print(clf_name, y_train, y_train_scores)
print("\nOn Test Data:")
evaluate_print(clf_name, y_test, y_test_scores)
# visualize the results
visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False)
| <filename>examples/lof_example.py
# -*- coding: utf-8 -*-
"""Example of using LOF for outlier detection
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
import matplotlib.pyplot as plt
from pyod.models.lof import LOF
from pyod.utils.data import generate_data
from pyod.utils.data import get_outliers_inliers
from pyod.utils.data import check_consistent_shape
from pyod.utils.data import evaluate_print
def visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False):
"""Utility function for visualizing the results in examples.
Internal use only.
Parameters
----------
clf_name : str
The name of the detector.
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
show_figure : bool, optional (default=True)
If set to True, show the figure.
save_figure : bool, optional (default=False)
If set to True, save the figure to the local.
"""
def _add_sub_plot(X_inliers, X_outliers, sub_plot_title,
inlier_color='blue', outlier_color='orange'):
"""Internal method to add subplot of inliers and outliers.
Parameters
----------
X_inliers : numpy array of shape (n_samples, n_features)
Outliers.
X_outliers : numpy array of shape (n_samples, n_features)
Inliers.
sub_plot_title : str
Subplot title.
inlier_color : str, optional (default='blue')
The color of inliers.
outlier_color : str, optional (default='orange')
The color of outliers.
"""
plt.axis("equal")
plt.scatter(X_inliers[:, 0], X_inliers[:, 1], label='inliers',
color=inlier_color, s=40)
plt.scatter(X_outliers[:, 0], X_outliers[:, 1],
label='outliers', color=outlier_color, s=50, marker='^')
plt.title(sub_plot_title, fontsize=15)
plt.xticks([])
plt.yticks([])
plt.legend(loc=3, prop={'size': 10})
return
# check input data shapes are consistent
X_train, y_train, X_test, y_test, y_train_pred, y_test_pred = \
check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred)
if X_train.shape[1] != 2:
raise ValueError("Input data has to be 2-d for visualization. The "
"input data has {shape}.".format(shape=X_train.shape))
X_train_outliers, X_train_inliers = get_outliers_inliers(X_train, y_train)
X_train_outliers_pred, X_train_inliers_pred = get_outliers_inliers(
X_train, y_train_pred)
X_test_outliers, X_test_inliers = get_outliers_inliers(X_test, y_test)
X_test_outliers_pred, X_test_inliers_pred = get_outliers_inliers(
X_test, y_test_pred)
# plot ground truth vs. predicted results
fig = plt.figure(figsize=(12, 10))
plt.suptitle("Demo of {clf_name} Detector".format(clf_name=clf_name),
fontsize=15)
fig.add_subplot(221)
_add_sub_plot(X_train_inliers, X_train_outliers, 'Train Set Ground Truth',
inlier_color='blue', outlier_color='orange')
fig.add_subplot(222)
_add_sub_plot(X_train_inliers_pred, X_train_outliers_pred,
'Train Set Prediction', inlier_color='blue',
outlier_color='orange')
fig.add_subplot(223)
_add_sub_plot(X_test_inliers, X_test_outliers, 'Test Set Ground Truth',
inlier_color='green', outlier_color='red')
fig.add_subplot(224)
_add_sub_plot(X_test_inliers_pred, X_test_outliers_pred,
'Test Set Prediction', inlier_color='green',
outlier_color='red')
if save_figure:
plt.savefig('{clf_name}.png'.format(clf_name=clf_name), dpi=300)
if show_figure:
plt.show()
return
if __name__ == "__main__":
contamination = 0.1 # percentage of outliers
n_train = 200 # number of training points
n_test = 100 # number of testing points
# Generate sample data
X_train, y_train, X_test, y_test = \
generate_data(n_train=n_train,
n_test=n_test,
n_features=2,
contamination=contamination,
random_state=42)
# train LOF detector
clf_name = 'LOF'
clf = LOF()
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
# get the prediction on the test data
y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)
y_test_scores = clf.decision_function(X_test) # outlier scores
# evaluate and print the results
print("\nOn Training Data:")
evaluate_print(clf_name, y_train, y_train_scores)
print("\nOn Test Data:")
evaluate_print(clf_name, y_test, y_test_scores)
# visualize the results
visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False)
| en | 0.629902 | # -*- coding: utf-8 -*- Example of using LOF for outlier detection # Author: <NAME> <<EMAIL>> # License: BSD 2 clause # temporary solution for relative imports in case pyod is not installed # if pyod is installed, no need to use the following line Utility function for visualizing the results in examples. Internal use only. Parameters ---------- clf_name : str The name of the detector. X_train : numpy array of shape (n_samples, n_features) The training samples. y_train : list or array of shape (n_samples,) The ground truth of training samples. X_test : numpy array of shape (n_samples, n_features) The test samples. y_test : list or array of shape (n_samples,) The ground truth of test samples. y_train_pred : numpy array of shape (n_samples, n_features) The predicted binary labels of the training samples. y_test_pred : numpy array of shape (n_samples, n_features) The predicted binary labels of the test samples. show_figure : bool, optional (default=True) If set to True, show the figure. save_figure : bool, optional (default=False) If set to True, save the figure to the local. Internal method to add subplot of inliers and outliers. Parameters ---------- X_inliers : numpy array of shape (n_samples, n_features) Outliers. X_outliers : numpy array of shape (n_samples, n_features) Inliers. sub_plot_title : str Subplot title. inlier_color : str, optional (default='blue') The color of inliers. outlier_color : str, optional (default='orange') The color of outliers. # check input data shapes are consistent # plot ground truth vs. predicted results # percentage of outliers # number of training points # number of testing points # Generate sample data # train LOF detector # get the prediction labels and outlier scores of the training data # binary labels (0: inliers, 1: outliers) # raw outlier scores # get the prediction on the test data # outlier labels (0 or 1) # outlier scores # evaluate and print the results # visualize the results | 3.115832 | 3 |
donation/forms.py | 9sneha-n/pari | 0 | 6625169 | from django import forms
from django.utils.translation import ugettext_lazy as _
from .fields import AmountField
from .helpers import DonationOptions
class DonateForm(forms.Form):
name = forms.CharField(
label=_("NAME"),
max_length=100,
widget=forms.TextInput(attrs={"class": "form-control"})
)
email = forms.EmailField(
label=_("EMAIL"),
widget=forms.EmailInput(attrs={"class": "form-control"})
)
phone = forms.CharField(
label=_("PHONE NUMBER"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
pan = forms.CharField(
label=_("PAN NUMBER"),
max_length=10,
widget=forms.TextInput(attrs={"class": "form-control"}),
help_text=_("PAN is required as per government regulations.")
)
amount = AmountField(
choices=DonationOptions.Amount.CHOICES,
label=_('AMOUNT')
)
frequency = forms.ChoiceField(
choices=DonationOptions.Frequency.FORM_CHOICES,
widget=forms.RadioSelect,
label=_('TYPE')
)
term = forms.ChoiceField(
choices=DonationOptions.Term.CHOICES,
initial=DonationOptions.Term.Y5,
widget=forms.Select(attrs={"class": "form-control term-select"}),
label=_('DURATION')
)
is_indian = forms.BooleanField(
initial=False,
label=_("I declare that I am an Indian citizen"),
widget=forms.CheckboxInput()
)
def clean_is_indian(self):
data = self.cleaned_data["is_indian"]
if data != True:
raise forms.ValidationError(_("Sorry, we can accept donations "
"from Indians only."))
return data
def clean_term(self):
if self.cleaned_data.get('frequency', '') == DonationOptions.Frequency.Y and \
self.cleaned_data['term'] in (DonationOptions.Term.M6, DonationOptions.Term.Y1):
raise forms.ValidationError(_('Term should be at least 2 years for Yearly donation'))
return self.cleaned_data['term']
| from django import forms
from django.utils.translation import ugettext_lazy as _
from .fields import AmountField
from .helpers import DonationOptions
class DonateForm(forms.Form):
name = forms.CharField(
label=_("NAME"),
max_length=100,
widget=forms.TextInput(attrs={"class": "form-control"})
)
email = forms.EmailField(
label=_("EMAIL"),
widget=forms.EmailInput(attrs={"class": "form-control"})
)
phone = forms.CharField(
label=_("PHONE NUMBER"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
pan = forms.CharField(
label=_("PAN NUMBER"),
max_length=10,
widget=forms.TextInput(attrs={"class": "form-control"}),
help_text=_("PAN is required as per government regulations.")
)
amount = AmountField(
choices=DonationOptions.Amount.CHOICES,
label=_('AMOUNT')
)
frequency = forms.ChoiceField(
choices=DonationOptions.Frequency.FORM_CHOICES,
widget=forms.RadioSelect,
label=_('TYPE')
)
term = forms.ChoiceField(
choices=DonationOptions.Term.CHOICES,
initial=DonationOptions.Term.Y5,
widget=forms.Select(attrs={"class": "form-control term-select"}),
label=_('DURATION')
)
is_indian = forms.BooleanField(
initial=False,
label=_("I declare that I am an Indian citizen"),
widget=forms.CheckboxInput()
)
def clean_is_indian(self):
data = self.cleaned_data["is_indian"]
if data != True:
raise forms.ValidationError(_("Sorry, we can accept donations "
"from Indians only."))
return data
def clean_term(self):
if self.cleaned_data.get('frequency', '') == DonationOptions.Frequency.Y and \
self.cleaned_data['term'] in (DonationOptions.Term.M6, DonationOptions.Term.Y1):
raise forms.ValidationError(_('Term should be at least 2 years for Yearly donation'))
return self.cleaned_data['term']
| none | 1 | 2.199996 | 2 | |
classify.py | bosecodes/cautious-spork | 1 | 6625170 | <reponame>bosecodes/cautious-spork
import tensorflow as tf
import sys
import os
import urllib
import final
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
def prediction(image_path):
final.done()
# Read the image_data
image_data = tf.compat.v1.gfile.FastGFile(image_path, 'rb').read()
print('THis is image path', image_path)
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.io.gfile.GFile(r"./models/tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.compat.v1.gfile.FastGFile(r"./models/tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.compat.v1.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
count = 1
human_string = label_lines[node_id]
score = predictions[0][node_id]
print(count)
count += 1
print('%s (score = %.5f)' % (human_string, score))
score = (round((score * 100), 2))
return human_string,score
| import tensorflow as tf
import sys
import os
import urllib
import final
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
def prediction(image_path):
final.done()
# Read the image_data
image_data = tf.compat.v1.gfile.FastGFile(image_path, 'rb').read()
print('THis is image path', image_path)
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.io.gfile.GFile(r"./models/tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.compat.v1.gfile.FastGFile(r"./models/tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.compat.v1.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
count = 1
human_string = label_lines[node_id]
score = predictions[0][node_id]
print(count)
count += 1
print('%s (score = %.5f)' % (human_string, score))
score = (round((score * 100), 2))
return human_string,score | en | 0.773543 | # Disable tensorflow compilation warnings # Read the image_data # Loads label file, strips off carriage return # Unpersists graph from file # Feed the image_data as input to the graph and get first prediction # Sort to show labels of first prediction in order of confidence | 2.535514 | 3 |
graphics_editor/editor/gui_rectangles.py | foobar167/junkyard | 60 | 6625171 | <reponame>foobar167/junkyard
# -*- coding: utf-8 -*-
import operator
import tkinter as tk
from .gui_canvas import CanvasImage
class Rectangles(CanvasImage):
""" Class of Rectangles. Inherit CanvasImage class """
def __init__(self, placeholder, path):
""" Initialize the Rectangles """
CanvasImage.__init__(self, placeholder, path) # call __init__ of the CanvasImage class
self.canvas.bind('<ButtonPress-1>', self.start_rect) # start new rectangle
self.canvas.bind('<ButtonRelease-1>', self.finish_rect) # finish new rectangle
self.canvas.bind('<ButtonRelease-3>', self.popup) # call popup menu
self.canvas.bind('<Motion>', self.motion) # handle mouse motion
self.canvas.bind('<Delete>', lambda event: self.delete_rect()) # delete selected rectangle
# Create a popup menu for Rectangles
self.hold_menu1 = False # popup menu is closed
self.hold_menu2 = False
self.menu = tk.Menu(self.canvas, tearoff=0)
self.menu.add_command(label='Delete', command=self.delete_rect, accelerator=u'Delete')
# Rectangle parameters
self.width_line = 2 # lines width
self.dash = (1, 1) # dash pattern
self.color_roi = {'draw': 'red', # draw roi color
'point': 'blue', # point roi color
'back': 'yellow', # background roi color
'stipple': 'gray12'} # stipple value for roi
self.current_rect = None # current rectangle to draw on the canvas
self.current_rect_coords = None # current rectangle coordinates
self.tag_roi = 'roi' # roi tag
self.tag_const = 'rect' # constant tag for rectangle
self.tag_poly_line = 'poly_line' # edge of the rectangle
self.selected_rect = [] # selected rectangles
self.roi_dict = {} # dictionary of all rectangles and their coords on the canvas
def start_rect(self, event):
""" Start to draw rectangle """
if self.hold_menu2: # popup menu is opened
self.hold_menu2 = False # popup menu closes automatically
self.motion(event) # motion event for popup menu
return # exit from drawing new rectangle
self.motion(event) # generate motion event. It's needed for menu bar, bug otherwise!
x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas.canvasy(event.y)
if self.outside(x, y): return # starting point is out of scope
# Start to draw current rectangle
self.current_rect = self.canvas.create_rectangle(
(x, y, x, y), width=self.width_line, outline=self.color_roi['draw'])
self.current_rect_coords = (x, y) # save (x, y)
def finish_rect(self, event):
""" Finish to draw rectangle """
if not self.current_rect:
return # there is no current rectangle
if ' '.join(map(str, self.dash)) == self.canvas.itemcget(self.current_rect, 'dash'):
self.delete_current_rect()
return # release button is out of scope
# Get rectangle coordinates on the zoomed image
bbox1 = self.canvas.coords(self.current_rect) # get rectangle area
bbox2 = self.canvas.coords(self.container) # get image area
# Get rectangle coordinates on the image
x1 = int((bbox1[0] - bbox2[0]) / self.imscale)
y1 = int((bbox1[1] - bbox2[1]) / self.imscale)
x2 = int((bbox1[2] - bbox2[0]) / self.imscale)
y2 = int((bbox1[3] - bbox2[1]) / self.imscale)
if x1 == x2 or y1 == y2:
self.delete_current_rect()
return # rectangle has no area, so exit and don't draw it
bbox = (x1, y1, x2, y2) # coords on the image
self.draw_rect(bbox1, bbox) # draw rectangle
self.delete_current_rect()
def delete_current_rect(self):
""" Delete current rectangle """
self.canvas.delete(self.current_rect) # delete from the canvas
self.current_rect = None
self.current_rect_coords = None
def draw_rect(self, bbox1, bbox2):
""" Draw rectangle.
bbox1 - rectangle coordinates on the canvas.
bbox2 - rectangle coordinates on the image. """
# Create rectangle unique ID tag
tag_uid = '{}-{}-{}-{}'.format(bbox2[0], bbox2[1], bbox2[2], bbox2[3])
if tag_uid not in self.roi_dict: # save only unique rectangles with different coords
# Create rectangle. 2nd tag is ALWAYS a unique tag ID + constant string.
self.canvas.create_rectangle(bbox1, fill=self.color_roi['point'],
stipple=self.color_roi['stipple'],
width=0, state='hidden',
tags=(self.tag_roi, tag_uid + self.tag_const))
# Create polyline. 2nd tag is ALWAYS a unique tag ID.
vertices = [(bbox1[0], bbox1[1]), (bbox1[2], bbox1[1]),
(bbox1[2], bbox1[3]), (bbox1[0], bbox1[3]),]
for j in range(-1, len(vertices) - 1):
self.canvas.create_line(vertices[j], vertices[j + 1], width=self.width_line,
fill=self.color_roi['back'],
tags=(self.tag_poly_line, tag_uid))
self.roi_dict[tag_uid] = bbox2 # remember rectangle coordinates in the dictionary
# Print rectangles number into console
print('Images: {n}'.format(n=len(self.roi_dict)) + (20 * ' ') + '\r', end='')
def popup(self, event):
""" Popup menu """
self.motion(event) # select rectangle with popup menu explicitly to be sure it is selected
if self.selected_rect: # show popup menu only for selected rectangle
self.hold_menu1 = True # popup menu is opened
self.hold_menu2 = True
self.menu.post(event.x_root, event.y_root) # show popup menu
self.hold_menu1 = False # popup menu is closed
def motion(self, event):
""" Track mouse position over the canvas """
if self.hold_menu1: return # popup menu is opened
# Redraw current rectangle if it exists
if self.current_rect:
x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas.canvasy(event.y)
if self.outside(x, y): # outside of the canvas
self.canvas.itemconfigure(self.current_rect, dash=self.dash) # set dashed line
else:
self.canvas.itemconfigure(self.current_rect, dash='') # set solid line
# Relocate (change) rectangle
self.canvas.coords(self.current_rect, (min(self.current_rect_coords[0], x),
min(self.current_rect_coords[1], y),
max(self.current_rect_coords[0], x),
max(self.current_rect_coords[1], y),))
self.canvas.lift(self.current_rect) # set roi into foreground
# Handle rectangles on the canvas
self.deselect_rect() # change color and zeroize selected rectangle
self.select_rect() # change color and select rectangle
def select_rect(self):
""" Select and change color of the current roi object """
i = self.canvas.find_withtag('current') # id of the current object
tags = self.canvas.gettags(i) # get tags of the current object
if self.tag_poly_line in tags: # if it's a polyline. 2nd tag is ALWAYS a unique tag ID
j = tags[1] + self.tag_const # unique tag of the rectangle
self.canvas.itemconfigure(tags[1], fill=self.color_roi['point']) # select lines
self.canvas.itemconfigure(j, state='normal') # show rectangle
self.selected_rect.append(tags[1]) # remember 2nd unique tag_id
def deselect_rect(self):
""" Deselect current roi object """
if not self.selected_rect: return # selected rectangles list is empty
for i in self.selected_rect:
j = i + self.tag_const # unique tag of the rectangle
self.canvas.itemconfigure(i, fill=self.color_roi['back']) # deselect lines
self.canvas.itemconfigure(j, state='hidden') # hide rectangle
self.selected_rect.clear() # clear the list
def delete_rect(self):
""" Delete selected rectangle """
if self.selected_rect: # delete selected rectangle
for i in self.selected_rect:
j = i + self.tag_const # unique tag of the rectangle
del(self.roi_dict[i]) # delete ROI from the dictionary of all rectangles
self.canvas.delete(i) # delete lines
self.canvas.delete(j) # delete rectangle
self.selected_rect.clear() # clear selection list
# print rectangles number into console
print('Images: {n}'.format(n=len(self.roi_dict)) + (20 * ' ') + '\r', end='')
self.hold_menu2 = False # popup menu is closed
def delete_all(self):
""" Delete all rectangles from the canvas and clear variables """
self.canvas.delete(self.tag_roi) # delete all rectangles
self.canvas.delete(self.tag_poly_line) # delete all poly-lines
self.selected_rect.clear() # clear selection list
self.hold_menu2 = False # popup menu is closed
self.roi_dict.clear() # clear dictionary of ROI
def reset(self, roi):
""" Reset ROI and holes on the image """
self.delete_all() # delete old rectangles
bbox2 = self.canvas.coords(self.container) # get canvas coordinates
for bbox in roi: # draw roi rectangles
bbox1 = (int(bbox[0] * self.imscale) + bbox2[0],
int(bbox[1] * self.imscale) + bbox2[1],
int(bbox[2] * self.imscale) + bbox2[0],
int(bbox[3] * self.imscale) + bbox2[1])
self.draw_rect(bbox1, bbox)
| # -*- coding: utf-8 -*-
import operator
import tkinter as tk
from .gui_canvas import CanvasImage
class Rectangles(CanvasImage):
""" Class of Rectangles. Inherit CanvasImage class """
def __init__(self, placeholder, path):
""" Initialize the Rectangles """
CanvasImage.__init__(self, placeholder, path) # call __init__ of the CanvasImage class
self.canvas.bind('<ButtonPress-1>', self.start_rect) # start new rectangle
self.canvas.bind('<ButtonRelease-1>', self.finish_rect) # finish new rectangle
self.canvas.bind('<ButtonRelease-3>', self.popup) # call popup menu
self.canvas.bind('<Motion>', self.motion) # handle mouse motion
self.canvas.bind('<Delete>', lambda event: self.delete_rect()) # delete selected rectangle
# Create a popup menu for Rectangles
self.hold_menu1 = False # popup menu is closed
self.hold_menu2 = False
self.menu = tk.Menu(self.canvas, tearoff=0)
self.menu.add_command(label='Delete', command=self.delete_rect, accelerator=u'Delete')
# Rectangle parameters
self.width_line = 2 # lines width
self.dash = (1, 1) # dash pattern
self.color_roi = {'draw': 'red', # draw roi color
'point': 'blue', # point roi color
'back': 'yellow', # background roi color
'stipple': 'gray12'} # stipple value for roi
self.current_rect = None # current rectangle to draw on the canvas
self.current_rect_coords = None # current rectangle coordinates
self.tag_roi = 'roi' # roi tag
self.tag_const = 'rect' # constant tag for rectangle
self.tag_poly_line = 'poly_line' # edge of the rectangle
self.selected_rect = [] # selected rectangles
self.roi_dict = {} # dictionary of all rectangles and their coords on the canvas
def start_rect(self, event):
""" Start to draw rectangle """
if self.hold_menu2: # popup menu is opened
self.hold_menu2 = False # popup menu closes automatically
self.motion(event) # motion event for popup menu
return # exit from drawing new rectangle
self.motion(event) # generate motion event. It's needed for menu bar, bug otherwise!
x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas.canvasy(event.y)
if self.outside(x, y): return # starting point is out of scope
# Start to draw current rectangle
self.current_rect = self.canvas.create_rectangle(
(x, y, x, y), width=self.width_line, outline=self.color_roi['draw'])
self.current_rect_coords = (x, y) # save (x, y)
def finish_rect(self, event):
""" Finish to draw rectangle """
if not self.current_rect:
return # there is no current rectangle
if ' '.join(map(str, self.dash)) == self.canvas.itemcget(self.current_rect, 'dash'):
self.delete_current_rect()
return # release button is out of scope
# Get rectangle coordinates on the zoomed image
bbox1 = self.canvas.coords(self.current_rect) # get rectangle area
bbox2 = self.canvas.coords(self.container) # get image area
# Get rectangle coordinates on the image
x1 = int((bbox1[0] - bbox2[0]) / self.imscale)
y1 = int((bbox1[1] - bbox2[1]) / self.imscale)
x2 = int((bbox1[2] - bbox2[0]) / self.imscale)
y2 = int((bbox1[3] - bbox2[1]) / self.imscale)
if x1 == x2 or y1 == y2:
self.delete_current_rect()
return # rectangle has no area, so exit and don't draw it
bbox = (x1, y1, x2, y2) # coords on the image
self.draw_rect(bbox1, bbox) # draw rectangle
self.delete_current_rect()
def delete_current_rect(self):
""" Delete current rectangle """
self.canvas.delete(self.current_rect) # delete from the canvas
self.current_rect = None
self.current_rect_coords = None
def draw_rect(self, bbox1, bbox2):
""" Draw rectangle.
bbox1 - rectangle coordinates on the canvas.
bbox2 - rectangle coordinates on the image. """
# Create rectangle unique ID tag
tag_uid = '{}-{}-{}-{}'.format(bbox2[0], bbox2[1], bbox2[2], bbox2[3])
if tag_uid not in self.roi_dict: # save only unique rectangles with different coords
# Create rectangle. 2nd tag is ALWAYS a unique tag ID + constant string.
self.canvas.create_rectangle(bbox1, fill=self.color_roi['point'],
stipple=self.color_roi['stipple'],
width=0, state='hidden',
tags=(self.tag_roi, tag_uid + self.tag_const))
# Create polyline. 2nd tag is ALWAYS a unique tag ID.
vertices = [(bbox1[0], bbox1[1]), (bbox1[2], bbox1[1]),
(bbox1[2], bbox1[3]), (bbox1[0], bbox1[3]),]
for j in range(-1, len(vertices) - 1):
self.canvas.create_line(vertices[j], vertices[j + 1], width=self.width_line,
fill=self.color_roi['back'],
tags=(self.tag_poly_line, tag_uid))
self.roi_dict[tag_uid] = bbox2 # remember rectangle coordinates in the dictionary
# Print rectangles number into console
print('Images: {n}'.format(n=len(self.roi_dict)) + (20 * ' ') + '\r', end='')
def popup(self, event):
""" Popup menu """
self.motion(event) # select rectangle with popup menu explicitly to be sure it is selected
if self.selected_rect: # show popup menu only for selected rectangle
self.hold_menu1 = True # popup menu is opened
self.hold_menu2 = True
self.menu.post(event.x_root, event.y_root) # show popup menu
self.hold_menu1 = False # popup menu is closed
def motion(self, event):
""" Track mouse position over the canvas """
if self.hold_menu1: return # popup menu is opened
# Redraw current rectangle if it exists
if self.current_rect:
x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas.canvasy(event.y)
if self.outside(x, y): # outside of the canvas
self.canvas.itemconfigure(self.current_rect, dash=self.dash) # set dashed line
else:
self.canvas.itemconfigure(self.current_rect, dash='') # set solid line
# Relocate (change) rectangle
self.canvas.coords(self.current_rect, (min(self.current_rect_coords[0], x),
min(self.current_rect_coords[1], y),
max(self.current_rect_coords[0], x),
max(self.current_rect_coords[1], y),))
self.canvas.lift(self.current_rect) # set roi into foreground
# Handle rectangles on the canvas
self.deselect_rect() # change color and zeroize selected rectangle
self.select_rect() # change color and select rectangle
def select_rect(self):
""" Select and change color of the current roi object """
i = self.canvas.find_withtag('current') # id of the current object
tags = self.canvas.gettags(i) # get tags of the current object
if self.tag_poly_line in tags: # if it's a polyline. 2nd tag is ALWAYS a unique tag ID
j = tags[1] + self.tag_const # unique tag of the rectangle
self.canvas.itemconfigure(tags[1], fill=self.color_roi['point']) # select lines
self.canvas.itemconfigure(j, state='normal') # show rectangle
self.selected_rect.append(tags[1]) # remember 2nd unique tag_id
def deselect_rect(self):
""" Deselect current roi object """
if not self.selected_rect: return # selected rectangles list is empty
for i in self.selected_rect:
j = i + self.tag_const # unique tag of the rectangle
self.canvas.itemconfigure(i, fill=self.color_roi['back']) # deselect lines
self.canvas.itemconfigure(j, state='hidden') # hide rectangle
self.selected_rect.clear() # clear the list
def delete_rect(self):
""" Delete selected rectangle """
if self.selected_rect: # delete selected rectangle
for i in self.selected_rect:
j = i + self.tag_const # unique tag of the rectangle
del(self.roi_dict[i]) # delete ROI from the dictionary of all rectangles
self.canvas.delete(i) # delete lines
self.canvas.delete(j) # delete rectangle
self.selected_rect.clear() # clear selection list
# print rectangles number into console
print('Images: {n}'.format(n=len(self.roi_dict)) + (20 * ' ') + '\r', end='')
self.hold_menu2 = False # popup menu is closed
def delete_all(self):
""" Delete all rectangles from the canvas and clear variables """
self.canvas.delete(self.tag_roi) # delete all rectangles
self.canvas.delete(self.tag_poly_line) # delete all poly-lines
self.selected_rect.clear() # clear selection list
self.hold_menu2 = False # popup menu is closed
self.roi_dict.clear() # clear dictionary of ROI
def reset(self, roi):
""" Reset ROI and holes on the image """
self.delete_all() # delete old rectangles
bbox2 = self.canvas.coords(self.container) # get canvas coordinates
for bbox in roi: # draw roi rectangles
bbox1 = (int(bbox[0] * self.imscale) + bbox2[0],
int(bbox[1] * self.imscale) + bbox2[1],
int(bbox[2] * self.imscale) + bbox2[0],
int(bbox[3] * self.imscale) + bbox2[1])
self.draw_rect(bbox1, bbox) | en | 0.785774 | # -*- coding: utf-8 -*- Class of Rectangles. Inherit CanvasImage class Initialize the Rectangles # call __init__ of the CanvasImage class # start new rectangle # finish new rectangle # call popup menu # handle mouse motion # delete selected rectangle # Create a popup menu for Rectangles # popup menu is closed # Rectangle parameters # lines width # dash pattern # draw roi color # point roi color # background roi color # stipple value for roi # current rectangle to draw on the canvas # current rectangle coordinates # roi tag # constant tag for rectangle # edge of the rectangle # selected rectangles # dictionary of all rectangles and their coords on the canvas Start to draw rectangle # popup menu is opened # popup menu closes automatically # motion event for popup menu # exit from drawing new rectangle # generate motion event. It's needed for menu bar, bug otherwise! # get coordinates of the event on the canvas # starting point is out of scope # Start to draw current rectangle # save (x, y) Finish to draw rectangle # there is no current rectangle # release button is out of scope # Get rectangle coordinates on the zoomed image # get rectangle area # get image area # Get rectangle coordinates on the image # rectangle has no area, so exit and don't draw it # coords on the image # draw rectangle Delete current rectangle # delete from the canvas Draw rectangle. bbox1 - rectangle coordinates on the canvas. bbox2 - rectangle coordinates on the image. # Create rectangle unique ID tag # save only unique rectangles with different coords # Create rectangle. 2nd tag is ALWAYS a unique tag ID + constant string. # Create polyline. 2nd tag is ALWAYS a unique tag ID. # remember rectangle coordinates in the dictionary # Print rectangles number into console Popup menu # select rectangle with popup menu explicitly to be sure it is selected # show popup menu only for selected rectangle # popup menu is opened # show popup menu # popup menu is closed Track mouse position over the canvas # popup menu is opened # Redraw current rectangle if it exists # get coordinates of the event on the canvas # outside of the canvas # set dashed line # set solid line # Relocate (change) rectangle # set roi into foreground # Handle rectangles on the canvas # change color and zeroize selected rectangle # change color and select rectangle Select and change color of the current roi object # id of the current object # get tags of the current object # if it's a polyline. 2nd tag is ALWAYS a unique tag ID # unique tag of the rectangle # select lines # show rectangle # remember 2nd unique tag_id Deselect current roi object # selected rectangles list is empty # unique tag of the rectangle # deselect lines # hide rectangle # clear the list Delete selected rectangle # delete selected rectangle # unique tag of the rectangle # delete ROI from the dictionary of all rectangles # delete lines # delete rectangle # clear selection list # print rectangles number into console # popup menu is closed Delete all rectangles from the canvas and clear variables # delete all rectangles # delete all poly-lines # clear selection list # popup menu is closed # clear dictionary of ROI Reset ROI and holes on the image # delete old rectangles # get canvas coordinates # draw roi rectangles | 3.315573 | 3 |
app/request.py | mukjos30/News_Articles | 0 | 6625172 | import urllib.request,json
from .models import Source,Article
api_key = None
base_url = None
base_url_articles=None
def config_request(app):
global api_key,base_url,articleUrl
api_key=app.config['NEWS_API_KEY']
base_url=app.config['NEWS_API_WEB_URL']
articleUrl=app.config['ARTICLES_URL']
print(base_url)
def get_sources(category):
"""
Function that gets the json response to our url request
"""
get_sources_url = base_url.format(category,api_key)
print(get_sources_url)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_results(sources_results_list)
return sources_results
def process_results(source_list):
"""
Function that proceeses that the sources result and transform them to a list of Objects
Args:
source_list:A list of dictionaries that contain source details
Returns:
source_results:A list of source Objects
"""
source_results=[]
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
source_object = Source(id,name,description)
source_results.append(source_object)
# print(source_list)
return source_results
def get_articles(id):
"""
Function that gets the json response to our url request
"""
get_articles_url = base_url_articles.format(id,api_key)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_result_list=get_articles_response['articles']
articles_results=process_article_results(articles_result_list)
return articles_results
def process_article_results(articles_list):
articles_results=[]
for article_item in articles_list:
source=article_item.get('source')
author=article_item.get('author')
title=article_item.get('title')
description=article_item.get('description')
url=article_item.get('url')
urlToImage=article_item.get('urlToImage')
publishedAt=article_item.get('publishedAt')
articles_object = Article(source,author,title,description,publishedAt,url,urlToImage)
articles_results.append(articles_object)
return articles_results
| import urllib.request,json
from .models import Source,Article
api_key = None
base_url = None
base_url_articles=None
def config_request(app):
global api_key,base_url,articleUrl
api_key=app.config['NEWS_API_KEY']
base_url=app.config['NEWS_API_WEB_URL']
articleUrl=app.config['ARTICLES_URL']
print(base_url)
def get_sources(category):
"""
Function that gets the json response to our url request
"""
get_sources_url = base_url.format(category,api_key)
print(get_sources_url)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_results(sources_results_list)
return sources_results
def process_results(source_list):
"""
Function that proceeses that the sources result and transform them to a list of Objects
Args:
source_list:A list of dictionaries that contain source details
Returns:
source_results:A list of source Objects
"""
source_results=[]
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
source_object = Source(id,name,description)
source_results.append(source_object)
# print(source_list)
return source_results
def get_articles(id):
"""
Function that gets the json response to our url request
"""
get_articles_url = base_url_articles.format(id,api_key)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_result_list=get_articles_response['articles']
articles_results=process_article_results(articles_result_list)
return articles_results
def process_article_results(articles_list):
articles_results=[]
for article_item in articles_list:
source=article_item.get('source')
author=article_item.get('author')
title=article_item.get('title')
description=article_item.get('description')
url=article_item.get('url')
urlToImage=article_item.get('urlToImage')
publishedAt=article_item.get('publishedAt')
articles_object = Article(source,author,title,description,publishedAt,url,urlToImage)
articles_results.append(articles_object)
return articles_results
| en | 0.744253 | Function that gets the json response to our url request Function that proceeses that the sources result and transform them to a list of Objects Args: source_list:A list of dictionaries that contain source details Returns: source_results:A list of source Objects # print(source_list) Function that gets the json response to our url request | 2.947476 | 3 |
tensorforce/agents/dqfd_agent.py | youlei202/tensorforce-lei | 0 | 6625173 | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
from tensorforce import TensorForceError
from tensorforce.agents import MemoryAgent
from tensorforce.core.memories import Replay
from tensorforce.models import QDemoModel
class DQFDAgent(MemoryAgent):
"""
Deep Q-learning from demonstration (DQFD) agent ([Hester et al., 2017](https://arxiv.org/abs/1704.03732)).
This agent uses DQN to pre-train from demonstration data via an additional supervised loss term.
"""
def __init__(
self,
states_spec,
actions_spec,
batched_observe=1000,
scope='dqfd',
# parameters specific to LearningAgents
summary_spec=None,
network_spec=None,
device=None,
session_config=None,
saver_spec=None,
distributed_spec=None,
optimizer=None,
discount=0.99,
variable_noise=None,
states_preprocessing_spec=None,
explorations_spec=None,
reward_preprocessing_spec=None,
distributions_spec=None,
entropy_regularization=None,
# parameters specific to MemoryAgents
batch_size=32,
memory=None,
first_update=10000,
update_frequency=4,
repeat_update=1,
# parameters specific to DQFD agents
target_sync_frequency=10000,
target_update_weight=1.0,
huber_loss=None,
expert_margin=0.5,
supervised_weight=0.1,
demo_memory_capacity=10000,
demo_sampling_ratio=0.2
):
"""
Deep Q-learning from demonstration (DQFD) agent ([Hester et al., 2017](https://arxiv.org/abs/1704.03732)).
This agent uses DQN to pre-train from demonstration data in combination with a supervised loss.
Args:
target_sync_frequency: Interval between optimization calls synchronizing the target network.
target_update_weight: Update weight, 1.0 meaning a full assignment to target network from training network.
huber_loss: Optional flat specifying Huber-loss clipping.
expert_margin: Positive float specifying enforced supervised margin between expert action Q-value and other
Q-values.
supervised_weight: Weight of supervised loss term.
demo_memory_capacity: Int describing capacity of expert demonstration memory.
demo_sampling_ratio: Runtime sampling ratio of expert data.
"""
self.target_sync_frequency = target_sync_frequency
self.target_update_weight = target_update_weight
self.huber_loss = huber_loss
self.expert_margin = expert_margin
self.supervised_weight = supervised_weight
super(DQFDAgent, self).__init__(
states_spec=states_spec,
actions_spec=actions_spec,
batched_observe=batched_observe,
scope=scope,
# parameters specific to LearningAgent
summary_spec=summary_spec,
network_spec=network_spec,
discount=discount,
device=device,
session_config=session_config,
saver_spec=saver_spec,
distributed_spec=distributed_spec,
optimizer=optimizer,
variable_noise=variable_noise,
states_preprocessing_spec=states_preprocessing_spec,
explorations_spec=explorations_spec,
reward_preprocessing_spec=reward_preprocessing_spec,
distributions_spec=distributions_spec,
entropy_regularization=entropy_regularization,
# parameters specific to MemoryAgents
batch_size=batch_size,
memory=memory,
first_update=first_update,
update_frequency=update_frequency,
repeat_update=repeat_update
)
# The demo_sampling_ratio, called p in paper, controls ratio of expert vs online training samples
# p = n_demo / (n_demo + n_replay) => n_demo = p * n_replay / (1 - p)
self.demo_memory_capacity = demo_memory_capacity
self.demo_batch_size = int(demo_sampling_ratio * batch_size / (1.0 - demo_sampling_ratio))
assert self.demo_batch_size > 0, 'Check DQFD sampling parameters to ensure ' \
'demo_batch_size is positive. (Calculated {} based on current' \
' parameters)'.format(self.demo_batch_size)
# This is the demonstration memory that we will fill with observations before starting
# the main training loop
self.demo_memory = Replay(self.states_spec, self.actions_spec, self.demo_memory_capacity)
def initialize_model(self):
return QDemoModel(
states_spec=self.states_spec,
actions_spec=self.actions_spec,
network_spec=self.network_spec,
device=self.device,
session_config=self.session_config,
scope=self.scope,
saver_spec=self.saver_spec,
summary_spec=self.summary_spec,
distributed_spec=self.distributed_spec,
optimizer=self.optimizer,
discount=self.discount,
variable_noise=self.variable_noise,
states_preprocessing_spec=self.states_preprocessing_spec,
explorations_spec=self.explorations_spec,
reward_preprocessing_spec=self.reward_preprocessing_spec,
distributions_spec=self.distributions_spec,
entropy_regularization=self.entropy_regularization,
target_sync_frequency=self.target_sync_frequency,
target_update_weight=self.target_update_weight,
# DQFD always uses double dqn, which is a required key for a q-model.
double_q_model=True,
huber_loss=self.huber_loss,
# TEMP: Random sampling fix
random_sampling_fix=True,
expert_margin=self.expert_margin,
supervised_weight=self.supervised_weight
)
def observe(self, reward, terminal):
"""
Adds observations, updates via sampling from memories according to update rate.
DQFD samples from the online replay memory and the demo memory with
the fractions controlled by a hyper parameter p called 'expert sampling ratio.
"""
super(DQFDAgent, self).observe(reward=reward, terminal=terminal)
if self.timestep >= self.first_update and self.timestep % self.update_frequency == 0:
for _ in xrange(self.repeat_update):
batch = self.demo_memory.get_batch(batch_size=self.demo_batch_size, next_states=True)
self.model.demonstration_update(
states={name: np.stack((batch['states'][name],
batch['next_states'][name])) for name in batch['states']},
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward']
)
def import_demonstrations(self, demonstrations):
"""
Imports demonstrations, i.e. expert observations. Note that for large numbers of observations,
set_demonstrations is more appropriate, which directly sets memory contents to an array an expects
a different layout.
Args:
demonstrations: List of observation dicts
"""
for observation in demonstrations:
if self.unique_state:
state = dict(state=observation['states'])
else:
state = observation['states']
if self.unique_action:
action = dict(action=observation['actions'])
else:
action = observation['actions']
self.demo_memory.add_observation(
states=state,
internals=observation['internals'],
actions=action,
terminal=observation['terminal'],
reward=observation['reward']
)
def set_demonstrations(self, batch):
"""
Set all demonstrations from batch data. Expects a dict wherein each value contains an array
containing all states, actions, rewards, terminals and internals respectively.
Args:
batch:
"""
self.demo_memory.set_memory(
states=batch['states'],
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward']
)
def pretrain(self, steps):
"""
Computes pre-train updates.
Args:
steps: Number of updates to execute.
"""
for _ in xrange(steps):
# Sample from demo memory.
batch = self.demo_memory.get_batch(batch_size=self.batch_size, next_states=True)
# Update using both double Q-learning and supervised double_q_loss.
self.model.demonstration_update(
states={name: np.stack((batch['states'][name],
batch['next_states'][name])) for name in batch['states']},
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward']
)
| # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
from tensorforce import TensorForceError
from tensorforce.agents import MemoryAgent
from tensorforce.core.memories import Replay
from tensorforce.models import QDemoModel
class DQFDAgent(MemoryAgent):
"""
Deep Q-learning from demonstration (DQFD) agent ([Hester et al., 2017](https://arxiv.org/abs/1704.03732)).
This agent uses DQN to pre-train from demonstration data via an additional supervised loss term.
"""
def __init__(
self,
states_spec,
actions_spec,
batched_observe=1000,
scope='dqfd',
# parameters specific to LearningAgents
summary_spec=None,
network_spec=None,
device=None,
session_config=None,
saver_spec=None,
distributed_spec=None,
optimizer=None,
discount=0.99,
variable_noise=None,
states_preprocessing_spec=None,
explorations_spec=None,
reward_preprocessing_spec=None,
distributions_spec=None,
entropy_regularization=None,
# parameters specific to MemoryAgents
batch_size=32,
memory=None,
first_update=10000,
update_frequency=4,
repeat_update=1,
# parameters specific to DQFD agents
target_sync_frequency=10000,
target_update_weight=1.0,
huber_loss=None,
expert_margin=0.5,
supervised_weight=0.1,
demo_memory_capacity=10000,
demo_sampling_ratio=0.2
):
"""
Deep Q-learning from demonstration (DQFD) agent ([Hester et al., 2017](https://arxiv.org/abs/1704.03732)).
This agent uses DQN to pre-train from demonstration data in combination with a supervised loss.
Args:
target_sync_frequency: Interval between optimization calls synchronizing the target network.
target_update_weight: Update weight, 1.0 meaning a full assignment to target network from training network.
huber_loss: Optional flat specifying Huber-loss clipping.
expert_margin: Positive float specifying enforced supervised margin between expert action Q-value and other
Q-values.
supervised_weight: Weight of supervised loss term.
demo_memory_capacity: Int describing capacity of expert demonstration memory.
demo_sampling_ratio: Runtime sampling ratio of expert data.
"""
self.target_sync_frequency = target_sync_frequency
self.target_update_weight = target_update_weight
self.huber_loss = huber_loss
self.expert_margin = expert_margin
self.supervised_weight = supervised_weight
super(DQFDAgent, self).__init__(
states_spec=states_spec,
actions_spec=actions_spec,
batched_observe=batched_observe,
scope=scope,
# parameters specific to LearningAgent
summary_spec=summary_spec,
network_spec=network_spec,
discount=discount,
device=device,
session_config=session_config,
saver_spec=saver_spec,
distributed_spec=distributed_spec,
optimizer=optimizer,
variable_noise=variable_noise,
states_preprocessing_spec=states_preprocessing_spec,
explorations_spec=explorations_spec,
reward_preprocessing_spec=reward_preprocessing_spec,
distributions_spec=distributions_spec,
entropy_regularization=entropy_regularization,
# parameters specific to MemoryAgents
batch_size=batch_size,
memory=memory,
first_update=first_update,
update_frequency=update_frequency,
repeat_update=repeat_update
)
# The demo_sampling_ratio, called p in paper, controls ratio of expert vs online training samples
# p = n_demo / (n_demo + n_replay) => n_demo = p * n_replay / (1 - p)
self.demo_memory_capacity = demo_memory_capacity
self.demo_batch_size = int(demo_sampling_ratio * batch_size / (1.0 - demo_sampling_ratio))
assert self.demo_batch_size > 0, 'Check DQFD sampling parameters to ensure ' \
'demo_batch_size is positive. (Calculated {} based on current' \
' parameters)'.format(self.demo_batch_size)
# This is the demonstration memory that we will fill with observations before starting
# the main training loop
self.demo_memory = Replay(self.states_spec, self.actions_spec, self.demo_memory_capacity)
def initialize_model(self):
return QDemoModel(
states_spec=self.states_spec,
actions_spec=self.actions_spec,
network_spec=self.network_spec,
device=self.device,
session_config=self.session_config,
scope=self.scope,
saver_spec=self.saver_spec,
summary_spec=self.summary_spec,
distributed_spec=self.distributed_spec,
optimizer=self.optimizer,
discount=self.discount,
variable_noise=self.variable_noise,
states_preprocessing_spec=self.states_preprocessing_spec,
explorations_spec=self.explorations_spec,
reward_preprocessing_spec=self.reward_preprocessing_spec,
distributions_spec=self.distributions_spec,
entropy_regularization=self.entropy_regularization,
target_sync_frequency=self.target_sync_frequency,
target_update_weight=self.target_update_weight,
# DQFD always uses double dqn, which is a required key for a q-model.
double_q_model=True,
huber_loss=self.huber_loss,
# TEMP: Random sampling fix
random_sampling_fix=True,
expert_margin=self.expert_margin,
supervised_weight=self.supervised_weight
)
def observe(self, reward, terminal):
"""
Adds observations, updates via sampling from memories according to update rate.
DQFD samples from the online replay memory and the demo memory with
the fractions controlled by a hyper parameter p called 'expert sampling ratio.
"""
super(DQFDAgent, self).observe(reward=reward, terminal=terminal)
if self.timestep >= self.first_update and self.timestep % self.update_frequency == 0:
for _ in xrange(self.repeat_update):
batch = self.demo_memory.get_batch(batch_size=self.demo_batch_size, next_states=True)
self.model.demonstration_update(
states={name: np.stack((batch['states'][name],
batch['next_states'][name])) for name in batch['states']},
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward']
)
def import_demonstrations(self, demonstrations):
"""
Imports demonstrations, i.e. expert observations. Note that for large numbers of observations,
set_demonstrations is more appropriate, which directly sets memory contents to an array an expects
a different layout.
Args:
demonstrations: List of observation dicts
"""
for observation in demonstrations:
if self.unique_state:
state = dict(state=observation['states'])
else:
state = observation['states']
if self.unique_action:
action = dict(action=observation['actions'])
else:
action = observation['actions']
self.demo_memory.add_observation(
states=state,
internals=observation['internals'],
actions=action,
terminal=observation['terminal'],
reward=observation['reward']
)
def set_demonstrations(self, batch):
"""
Set all demonstrations from batch data. Expects a dict wherein each value contains an array
containing all states, actions, rewards, terminals and internals respectively.
Args:
batch:
"""
self.demo_memory.set_memory(
states=batch['states'],
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward']
)
def pretrain(self, steps):
"""
Computes pre-train updates.
Args:
steps: Number of updates to execute.
"""
for _ in xrange(steps):
# Sample from demo memory.
batch = self.demo_memory.get_batch(batch_size=self.batch_size, next_states=True)
# Update using both double Q-learning and supervised double_q_loss.
self.model.demonstration_update(
states={name: np.stack((batch['states'][name],
batch['next_states'][name])) for name in batch['states']},
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward']
)
| en | 0.775466 | # Copyright 2017 reinforce.io. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Deep Q-learning from demonstration (DQFD) agent ([Hester et al., 2017](https://arxiv.org/abs/1704.03732)). This agent uses DQN to pre-train from demonstration data via an additional supervised loss term. # parameters specific to LearningAgents # parameters specific to MemoryAgents # parameters specific to DQFD agents Deep Q-learning from demonstration (DQFD) agent ([Hester et al., 2017](https://arxiv.org/abs/1704.03732)). This agent uses DQN to pre-train from demonstration data in combination with a supervised loss. Args: target_sync_frequency: Interval between optimization calls synchronizing the target network. target_update_weight: Update weight, 1.0 meaning a full assignment to target network from training network. huber_loss: Optional flat specifying Huber-loss clipping. expert_margin: Positive float specifying enforced supervised margin between expert action Q-value and other Q-values. supervised_weight: Weight of supervised loss term. demo_memory_capacity: Int describing capacity of expert demonstration memory. demo_sampling_ratio: Runtime sampling ratio of expert data. # parameters specific to LearningAgent # parameters specific to MemoryAgents # The demo_sampling_ratio, called p in paper, controls ratio of expert vs online training samples # p = n_demo / (n_demo + n_replay) => n_demo = p * n_replay / (1 - p) # This is the demonstration memory that we will fill with observations before starting # the main training loop # DQFD always uses double dqn, which is a required key for a q-model. # TEMP: Random sampling fix Adds observations, updates via sampling from memories according to update rate. DQFD samples from the online replay memory and the demo memory with the fractions controlled by a hyper parameter p called 'expert sampling ratio. Imports demonstrations, i.e. expert observations. Note that for large numbers of observations, set_demonstrations is more appropriate, which directly sets memory contents to an array an expects a different layout. Args: demonstrations: List of observation dicts Set all demonstrations from batch data. Expects a dict wherein each value contains an array containing all states, actions, rewards, terminals and internals respectively. Args: batch: Computes pre-train updates. Args: steps: Number of updates to execute. # Sample from demo memory. # Update using both double Q-learning and supervised double_q_loss. | 1.599075 | 2 |
test/unit/mysql_class/gtidset_or.py | deepcoder42/mysql-lib | 1 | 6625174 | #!/usr/bin/python
# Classification (U)
"""Program: gtidset_or.py
Description: Unit testing of GTIDSet.__or__ method in mysql_class.py.
Usage:
test/unit/mysql_class/gtidset_or.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_or_not_subset -> Test set 1 is not subset of set 2.
test_or_subset -> Test set 1 is subset of set 2.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.gtidset1 = "50ceee08-9500-11ea-b699-002170204789:1-43"
self.gtidset2 = "50ceee08-9500-11ea-b699-002170204789:43-43"
self.gtidset3 = "50ceee08-9500-11ea-b699-002170204789:1-41"
self.gtidset4 = "50ceee08-9500-11ea-b699-002170204789:1-41:43-43"
def test_or_not_subset(self):
"""Function: test_or_not_subset
Description: Test set 1 is not subset of set 2.
Arguments:
"""
gtid1 = mysql_class.GTIDSet(self.gtidset3)
gtid2 = mysql_class.GTIDSet(self.gtidset2)
results = mysql_class.GTIDSet(self.gtidset4)
data = gtid1 | gtid2
self.assertEqual(data.gtids, results.gtids)
def test_or_subset(self):
"""Function: test_or_subset
Description: Test set 1 is subset of set 2.
Arguments:
"""
gtid1 = mysql_class.GTIDSet(self.gtidset1)
gtid2 = mysql_class.GTIDSet(self.gtidset2)
data = gtid1 | gtid2
self.assertEqual(data.gtids, gtid1.gtids)
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/python
# Classification (U)
"""Program: gtidset_or.py
Description: Unit testing of GTIDSet.__or__ method in mysql_class.py.
Usage:
test/unit/mysql_class/gtidset_or.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_or_not_subset -> Test set 1 is not subset of set 2.
test_or_subset -> Test set 1 is subset of set 2.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.gtidset1 = "50ceee08-9500-11ea-b699-002170204789:1-43"
self.gtidset2 = "50ceee08-9500-11ea-b699-002170204789:43-43"
self.gtidset3 = "50ceee08-9500-11ea-b699-002170204789:1-41"
self.gtidset4 = "50ceee08-9500-11ea-b699-002170204789:1-41:43-43"
def test_or_not_subset(self):
"""Function: test_or_not_subset
Description: Test set 1 is not subset of set 2.
Arguments:
"""
gtid1 = mysql_class.GTIDSet(self.gtidset3)
gtid2 = mysql_class.GTIDSet(self.gtidset2)
results = mysql_class.GTIDSet(self.gtidset4)
data = gtid1 | gtid2
self.assertEqual(data.gtids, results.gtids)
def test_or_subset(self):
"""Function: test_or_subset
Description: Test set 1 is subset of set 2.
Arguments:
"""
gtid1 = mysql_class.GTIDSet(self.gtidset1)
gtid2 = mysql_class.GTIDSet(self.gtidset2)
data = gtid1 | gtid2
self.assertEqual(data.gtids, gtid1.gtids)
if __name__ == "__main__":
unittest.main()
| en | 0.720285 | #!/usr/bin/python # Classification (U) Program: gtidset_or.py Description: Unit testing of GTIDSet.__or__ method in mysql_class.py. Usage: test/unit/mysql_class/gtidset_or.py Arguments: # Libraries and Global Variables # Standard # Third-party # Local Class: UnitTest Description: Class which is a representation of a unit testing. Methods: setUp -> Initialize testing environment. test_or_not_subset -> Test set 1 is not subset of set 2. test_or_subset -> Test set 1 is subset of set 2. Function: setUp Description: Initialization for unit testing. Arguments: Function: test_or_not_subset Description: Test set 1 is not subset of set 2. Arguments: Function: test_or_subset Description: Test set 1 is subset of set 2. Arguments: | 2.830177 | 3 |
tests/components/sonarr/__init__.py | pszafer/core | 3 | 6625175 | """Tests for the Sonarr component."""
from socket import gaierror as SocketGIAError
from homeassistant.components.sonarr.const import (
CONF_BASE_PATH,
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DEFAULT_UPCOMING_DAYS,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
HOST = "192.168.1.189"
PORT = 8989
BASE_PATH = "/api"
API_KEY = "MOCK_API_KEY"
MOCK_SENSOR_CONFIG = {
"platform": DOMAIN,
"host": HOST,
"api_key": API_KEY,
"days": 3,
}
MOCK_USER_INPUT = {
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_BASE_PATH: BASE_PATH,
CONF_SSL: False,
CONF_API_KEY: API_KEY,
}
def mock_connection(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
error: bool = False,
invalid_auth: bool = False,
server_error: bool = False,
) -> None:
"""Mock Sonarr connection."""
if error:
mock_connection_error(
aioclient_mock, host=host, port=port, base_path=base_path,
)
return
if invalid_auth:
mock_connection_invalid_auth(
aioclient_mock, host=host, port=port, base_path=base_path,
)
return
if server_error:
mock_connection_server_error(
aioclient_mock, host=host, port=port, base_path=base_path,
)
return
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(
f"{sonarr_url}/system/status",
text=load_fixture("sonarr/system-status.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/diskspace",
text=load_fixture("sonarr/diskspace.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/calendar",
text=load_fixture("sonarr/calendar.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/command",
text=load_fixture("sonarr/command.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/queue",
text=load_fixture("sonarr/queue.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/series",
text=load_fixture("sonarr/series.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/wanted/missing",
text=load_fixture("sonarr/wanted-missing.json"),
headers={"Content-Type": "application/json"},
)
def mock_connection_error(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
) -> None:
"""Mock Sonarr connection errors."""
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(f"{sonarr_url}/system/status", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/diskspace", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/calendar", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/command", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/queue", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/series", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/missing/wanted", exc=SocketGIAError)
def mock_connection_invalid_auth(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
) -> None:
"""Mock Sonarr invalid auth errors."""
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(f"{sonarr_url}/system/status", status=403)
aioclient_mock.get(f"{sonarr_url}/diskspace", status=403)
aioclient_mock.get(f"{sonarr_url}/calendar", status=403)
aioclient_mock.get(f"{sonarr_url}/command", status=403)
aioclient_mock.get(f"{sonarr_url}/queue", status=403)
aioclient_mock.get(f"{sonarr_url}/series", status=403)
aioclient_mock.get(f"{sonarr_url}/missing/wanted", status=403)
def mock_connection_server_error(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
) -> None:
"""Mock Sonarr server errors."""
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(f"{sonarr_url}/system/status", status=500)
aioclient_mock.get(f"{sonarr_url}/diskspace", status=500)
aioclient_mock.get(f"{sonarr_url}/calendar", status=500)
aioclient_mock.get(f"{sonarr_url}/command", status=500)
aioclient_mock.get(f"{sonarr_url}/queue", status=500)
aioclient_mock.get(f"{sonarr_url}/series", status=500)
aioclient_mock.get(f"{sonarr_url}/missing/wanted", status=500)
async def setup_integration(
hass: HomeAssistantType,
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
api_key: str = API_KEY,
unique_id: str = None,
skip_entry_setup: bool = False,
connection_error: bool = False,
invalid_auth: bool = False,
server_error: bool = False,
) -> MockConfigEntry:
"""Set up the Sonarr integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=unique_id,
data={
CONF_HOST: host,
CONF_PORT: port,
CONF_BASE_PATH: base_path,
CONF_SSL: False,
CONF_VERIFY_SSL: False,
CONF_API_KEY: api_key,
CONF_UPCOMING_DAYS: DEFAULT_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS: DEFAULT_WANTED_MAX_ITEMS,
},
options={
CONF_UPCOMING_DAYS: DEFAULT_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS: DEFAULT_WANTED_MAX_ITEMS,
},
)
entry.add_to_hass(hass)
mock_connection(
aioclient_mock,
host=host,
port=port,
base_path=base_path,
error=connection_error,
invalid_auth=invalid_auth,
server_error=server_error,
)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
def _patch_async_setup(return_value=True):
"""Patch the async setup of sonarr."""
return patch(
"homeassistant.components.sonarr.async_setup", return_value=return_value
)
def _patch_async_setup_entry(return_value=True):
"""Patch the async entry setup of sonarr."""
return patch(
"homeassistant.components.sonarr.async_setup_entry", return_value=return_value,
)
| """Tests for the Sonarr component."""
from socket import gaierror as SocketGIAError
from homeassistant.components.sonarr.const import (
CONF_BASE_PATH,
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DEFAULT_UPCOMING_DAYS,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
HOST = "192.168.1.189"
PORT = 8989
BASE_PATH = "/api"
API_KEY = "MOCK_API_KEY"
MOCK_SENSOR_CONFIG = {
"platform": DOMAIN,
"host": HOST,
"api_key": API_KEY,
"days": 3,
}
MOCK_USER_INPUT = {
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_BASE_PATH: BASE_PATH,
CONF_SSL: False,
CONF_API_KEY: API_KEY,
}
def mock_connection(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
error: bool = False,
invalid_auth: bool = False,
server_error: bool = False,
) -> None:
"""Mock Sonarr connection."""
if error:
mock_connection_error(
aioclient_mock, host=host, port=port, base_path=base_path,
)
return
if invalid_auth:
mock_connection_invalid_auth(
aioclient_mock, host=host, port=port, base_path=base_path,
)
return
if server_error:
mock_connection_server_error(
aioclient_mock, host=host, port=port, base_path=base_path,
)
return
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(
f"{sonarr_url}/system/status",
text=load_fixture("sonarr/system-status.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/diskspace",
text=load_fixture("sonarr/diskspace.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/calendar",
text=load_fixture("sonarr/calendar.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/command",
text=load_fixture("sonarr/command.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/queue",
text=load_fixture("sonarr/queue.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/series",
text=load_fixture("sonarr/series.json"),
headers={"Content-Type": "application/json"},
)
aioclient_mock.get(
f"{sonarr_url}/wanted/missing",
text=load_fixture("sonarr/wanted-missing.json"),
headers={"Content-Type": "application/json"},
)
def mock_connection_error(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
) -> None:
"""Mock Sonarr connection errors."""
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(f"{sonarr_url}/system/status", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/diskspace", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/calendar", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/command", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/queue", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/series", exc=SocketGIAError)
aioclient_mock.get(f"{sonarr_url}/missing/wanted", exc=SocketGIAError)
def mock_connection_invalid_auth(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
) -> None:
"""Mock Sonarr invalid auth errors."""
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(f"{sonarr_url}/system/status", status=403)
aioclient_mock.get(f"{sonarr_url}/diskspace", status=403)
aioclient_mock.get(f"{sonarr_url}/calendar", status=403)
aioclient_mock.get(f"{sonarr_url}/command", status=403)
aioclient_mock.get(f"{sonarr_url}/queue", status=403)
aioclient_mock.get(f"{sonarr_url}/series", status=403)
aioclient_mock.get(f"{sonarr_url}/missing/wanted", status=403)
def mock_connection_server_error(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
) -> None:
"""Mock Sonarr server errors."""
sonarr_url = f"http://{host}:{port}{base_path}"
aioclient_mock.get(f"{sonarr_url}/system/status", status=500)
aioclient_mock.get(f"{sonarr_url}/diskspace", status=500)
aioclient_mock.get(f"{sonarr_url}/calendar", status=500)
aioclient_mock.get(f"{sonarr_url}/command", status=500)
aioclient_mock.get(f"{sonarr_url}/queue", status=500)
aioclient_mock.get(f"{sonarr_url}/series", status=500)
aioclient_mock.get(f"{sonarr_url}/missing/wanted", status=500)
async def setup_integration(
hass: HomeAssistantType,
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: str = PORT,
base_path: str = BASE_PATH,
api_key: str = API_KEY,
unique_id: str = None,
skip_entry_setup: bool = False,
connection_error: bool = False,
invalid_auth: bool = False,
server_error: bool = False,
) -> MockConfigEntry:
"""Set up the Sonarr integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=unique_id,
data={
CONF_HOST: host,
CONF_PORT: port,
CONF_BASE_PATH: base_path,
CONF_SSL: False,
CONF_VERIFY_SSL: False,
CONF_API_KEY: api_key,
CONF_UPCOMING_DAYS: DEFAULT_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS: DEFAULT_WANTED_MAX_ITEMS,
},
options={
CONF_UPCOMING_DAYS: DEFAULT_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS: DEFAULT_WANTED_MAX_ITEMS,
},
)
entry.add_to_hass(hass)
mock_connection(
aioclient_mock,
host=host,
port=port,
base_path=base_path,
error=connection_error,
invalid_auth=invalid_auth,
server_error=server_error,
)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
def _patch_async_setup(return_value=True):
"""Patch the async setup of sonarr."""
return patch(
"homeassistant.components.sonarr.async_setup", return_value=return_value
)
def _patch_async_setup_entry(return_value=True):
"""Patch the async entry setup of sonarr."""
return patch(
"homeassistant.components.sonarr.async_setup_entry", return_value=return_value,
)
| en | 0.686574 | Tests for the Sonarr component. Mock Sonarr connection. Mock Sonarr connection errors. Mock Sonarr invalid auth errors. Mock Sonarr server errors. Set up the Sonarr integration in Home Assistant. Patch the async setup of sonarr. Patch the async entry setup of sonarr. | 2.103566 | 2 |
supriya/nonrealtime/bases.py | josiah-wolf-oberholtzer/supriya | 191 | 6625176 | import abc
import functools
from uqbar.objects import get_repr
from supriya.system import SupriyaObject
class SessionObject(SupriyaObject):
"""
A non-realtime session object, analogous to ServerObject.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Internals"
__slots__ = ()
### INITIALIZER ###
@abc.abstractmethod
def __init__(self, session):
import supriya.nonrealtime
prototype = (supriya.nonrealtime.Session, type(None))
assert isinstance(session, prototype)
self._session = session
### SPECIAL METHODS ###
def __repr__(self):
return get_repr(self, multiline=False)
### PUBLIC METHODS ###
@staticmethod
def require_offset(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
import supriya.nonrealtime
if isinstance(self, supriya.nonrealtime.Session):
session = self
else:
session = self.session
if "offset" not in kwargs or kwargs["offset"] is None:
if not session._active_moments:
raise ValueError("No active moment.")
offset = session._active_moments[-1].offset
kwargs["offset"] = offset
if isinstance(self, SessionObject):
if not (self.start_offset <= kwargs["offset"] <= self.stop_offset):
raise ValueError(
"Offset {} must intersect [{}, {}]".format(
float(offset), self.start_offset, self.stop_offset
)
)
with session.at(kwargs["offset"]):
return function(self, *args, **kwargs)
return wrapper
### PUBLIC PROPERTIES ###
@property
def session(self):
return self._session
| import abc
import functools
from uqbar.objects import get_repr
from supriya.system import SupriyaObject
class SessionObject(SupriyaObject):
"""
A non-realtime session object, analogous to ServerObject.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Internals"
__slots__ = ()
### INITIALIZER ###
@abc.abstractmethod
def __init__(self, session):
import supriya.nonrealtime
prototype = (supriya.nonrealtime.Session, type(None))
assert isinstance(session, prototype)
self._session = session
### SPECIAL METHODS ###
def __repr__(self):
return get_repr(self, multiline=False)
### PUBLIC METHODS ###
@staticmethod
def require_offset(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
import supriya.nonrealtime
if isinstance(self, supriya.nonrealtime.Session):
session = self
else:
session = self.session
if "offset" not in kwargs or kwargs["offset"] is None:
if not session._active_moments:
raise ValueError("No active moment.")
offset = session._active_moments[-1].offset
kwargs["offset"] = offset
if isinstance(self, SessionObject):
if not (self.start_offset <= kwargs["offset"] <= self.stop_offset):
raise ValueError(
"Offset {} must intersect [{}, {}]".format(
float(offset), self.start_offset, self.stop_offset
)
)
with session.at(kwargs["offset"]):
return function(self, *args, **kwargs)
return wrapper
### PUBLIC PROPERTIES ###
@property
def session(self):
return self._session
| en | 0.259807 | A non-realtime session object, analogous to ServerObject. ### CLASS VARIABLES ### ### INITIALIZER ### ### SPECIAL METHODS ### ### PUBLIC METHODS ### ### PUBLIC PROPERTIES ### | 2.443412 | 2 |
PyChanter/_models/directoryTreeModel.py | hariharan849/PyChanter | 3 | 6625177 | <reponame>hariharan849/PyChanter<filename>PyChanter/_models/directoryTreeModel.py
"""
Directory tree model to display directory Information
"""
from . import genericTreeModel as _genericTreeModel
class DirectoryTreeModel(_genericTreeModel.GenericTreeModel):
def headerData(self, section, orientation, role):
"""
Returns Header data to display in column
"""
if role == _QtCore.Qt.DisplayRole:
if section == 0:
return "Find In Files"
super(DirectoryTreeModel, self).headerData(section, orientation, role)
def flags(self, index):
"""
Returns Flags for the data
"""
return _QtCore.Qt.ItemIsEnabled | _QtCore.Qt.ItemIsSelectable
| """
Directory tree model to display directory Information
"""
from . import genericTreeModel as _genericTreeModel
class DirectoryTreeModel(_genericTreeModel.GenericTreeModel):
def headerData(self, section, orientation, role):
"""
Returns Header data to display in column
"""
if role == _QtCore.Qt.DisplayRole:
if section == 0:
return "Find In Files"
super(DirectoryTreeModel, self).headerData(section, orientation, role)
def flags(self, index):
"""
Returns Flags for the data
"""
return _QtCore.Qt.ItemIsEnabled | _QtCore.Qt.ItemIsSelectable | en | 0.482658 | Directory tree model to display directory Information Returns Header data to display in column Returns Flags for the data | 2.562515 | 3 |
setup.py | liordanon/dsbasic | 0 | 6625178 | <filename>setup.py
from distutils.core import setup
setup(
name='dsbasic',
version='0.1',
packages=['dsbasic'],
license='MIT'
)
| <filename>setup.py
from distutils.core import setup
setup(
name='dsbasic',
version='0.1',
packages=['dsbasic'],
license='MIT'
)
| none | 1 | 0.940619 | 1 | |
PCONV_operator/Mtimer.py | limuhit/pseudocylindrical_convolution | 6 | 6625179 | <reponame>limuhit/pseudocylindrical_convolution
import torch
class Timer():
def __init__(self, flag=False):
self.start_t = torch.cuda.Event(enable_timing=True)
self.end_t = torch.cuda.Event(enable_timing=True)
self.flag = flag
def start(self):
if self.flag:
self.start_t.record()
def end(self, out_string=''):
if self.flag:
self.end_t.record()
torch.cuda.synchronize()
print(out_string, self.start_t.elapsed_time(self.end_t)) | import torch
class Timer():
def __init__(self, flag=False):
self.start_t = torch.cuda.Event(enable_timing=True)
self.end_t = torch.cuda.Event(enable_timing=True)
self.flag = flag
def start(self):
if self.flag:
self.start_t.record()
def end(self, out_string=''):
if self.flag:
self.end_t.record()
torch.cuda.synchronize()
print(out_string, self.start_t.elapsed_time(self.end_t)) | none | 1 | 2.72647 | 3 | |
venv/lib/python3.8/site-packages/numpy/distutils/intelccompiler.py | Retraces/UkraineBot | 2 | 6625180 | <reponame>Retraces/UkraineBot
/home/runner/.cache/pip/pool/37/fa/6f/5a394b3917651f7e1cb3dee85382c136bfecf6be5d76c9a67bb5c4bece | /home/runner/.cache/pip/pool/37/fa/6f/5a394b3917651f7e1cb3dee85382c136bfecf6be5d76c9a67bb5c4bece | none | 1 | 0.892406 | 1 | |
python3/help/apihelper1.py | jtraver/dev | 0 | 6625181 | #!/usr/bin/env python3
#!/usr/bin/python
import apihelper
apihelper.info(apihelper)
| #!/usr/bin/env python3
#!/usr/bin/python
import apihelper
apihelper.info(apihelper)
| ru | 0.236488 | #!/usr/bin/env python3 #!/usr/bin/python | 1.136986 | 1 |
ML_Chinahadoop/04/code/test/test2.py | lsieun/learn-AI | 1 | 6625182 | #coding:utf-8
print('This is in test1.py')
print(__name__)
print(__file__) | #coding:utf-8
print('This is in test1.py')
print(__name__)
print(__file__) | en | 0.795494 | #coding:utf-8 | 1.385595 | 1 |
client/bahub/bahubapp/mapping/handlers.py | FeatureToggleStudy/file-repository | 0 | 6625183 |
from ..handler.dockervolumebackup import DockerVolumeHotBackup, DockerVolumeBackup
from ..handler.localfilebackup import LocalFileBackup
from ..handler.commandoutputbackup import CommandOutputBackup
from ..handler.dockeroutputbackup import DockerCommandOutputBackup
from ..handler.mysqlbackup import MySQLBackup
class HandlersMapping:
_mapping = {
'docker_hot_volumes': DockerVolumeHotBackup,
'docker_volumes': DockerVolumeBackup,
'mysql': MySQLBackup,
'docker_output': DockerCommandOutputBackup,
'command_output': CommandOutputBackup,
'directory': LocalFileBackup
}
def get(self, name: str):
""" Resolves "type" configuration key into object, on error throws KeyError """
return self._mapping[name]
def has_handler(self, name: str) -> bool:
return name in self._mapping
|
from ..handler.dockervolumebackup import DockerVolumeHotBackup, DockerVolumeBackup
from ..handler.localfilebackup import LocalFileBackup
from ..handler.commandoutputbackup import CommandOutputBackup
from ..handler.dockeroutputbackup import DockerCommandOutputBackup
from ..handler.mysqlbackup import MySQLBackup
class HandlersMapping:
_mapping = {
'docker_hot_volumes': DockerVolumeHotBackup,
'docker_volumes': DockerVolumeBackup,
'mysql': MySQLBackup,
'docker_output': DockerCommandOutputBackup,
'command_output': CommandOutputBackup,
'directory': LocalFileBackup
}
def get(self, name: str):
""" Resolves "type" configuration key into object, on error throws KeyError """
return self._mapping[name]
def has_handler(self, name: str) -> bool:
return name in self._mapping
| en | 0.856264 | Resolves "type" configuration key into object, on error throws KeyError | 2.131655 | 2 |
tools/importers/common/converters.py | shawncal/ELL | 2,094 | 6625184 | ####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: converters.py (importers)
# Authors: <NAME>
#
# Requires: Python 3.x
#
####################################################################################################
import typing
import numpy as np
import ell
import logger
import common.memory_shapes as memory_shapes
_logger = logger.get()
class ImporterNode:
"""
Common class for intermediate representation of nodes in the importer.
The core importer engine can convert ImporterNodes into ELL Nodes
and insert them into an ELL Model.
"""
def __init__(self, id: str,
operation_type: str,
inputs: typing.Sequence[str] = [],
outputs: typing.Sequence[str] = [],
weights: typing.Mapping[str, typing.Any] = {},
attributes: typing.Mapping[str, typing.Any] = {},
padding: typing.Mapping[str, typing.Any] = {},
input_shapes: typing.Sequence[typing.Any] = [],
output_shapes: typing.Sequence[typing.Any] = [],
metadata: typing.Mapping[str, str] = {}):
"""
id: unique identifier for this node
operation_type: string name of the operation type to be imported.
This will get mapped to an ELL operation via the operation_map.
inputs: array of strings representing where the input comes from.
The string is the 'id' of another ImporterNode.
outputs: array of strings representing the output tensors.
The string is the 'id' of another ImporterNode.
weights: dictionary of weight parameter labels to weight names e.g. a
convolutional node may have {'weights': 'w123', 'bias': 'b832'}.
Dictionary keys are specific to the ELL operation.
The value is the id of a tensor in ImporterModel.tensors.
attributes: dictionary of attribute names and values e.g. a
convolutional node may have {'size': 3, 'step': 1, 'pad': 0 }.
Dictionary keys are specific to the ELL operation.
padding: dictionary of padding size and padding scheme e.g.
{"size": 0, "scheme": ell.neural.PaddingScheme.zeros}
[chris] why isn't this just a type of attribute?
input_shapes: array of tuples representing input shapes and ordering
e.g. ((3,64,64), "channel_row_column").
The ImporterEngine will take care of reshaping everything to
match the order required by ELL.
output_shapes: array of tuples representing output shapes and ordering
e.g. ((32,8,8), "channel_row_column").
metadata: optional additional metadata to store in the ell_nodes.
"""
self.id = id
self.operation_type = operation_type
self.weights = weights
self.inputs = inputs
self.outputs = outputs
self.attributes = attributes
self.padding = padding
self.output_padding = {"size": 0, "scheme": ell.neural.PaddingScheme.zeros}
self.input_shapes = input_shapes
self.output_shapes = output_shapes
self.metadata = metadata
def __repr__(self):
attrs = dict((k, self.attributes[k]) for k in self.attributes)
if "tensor" in attrs:
attrs["tensor"] = "..."
_print_line = ""
_print_line += "{} {}: {} -> {}, attributes {}\n".format(self.operation_type, self.id, self.inputs,
self.outputs, attrs)
_print_line += " input_shape {}\n".format(self.input_shapes)
_print_line += " output_shape {}\n".format(self.output_shapes)
_print_line += " padding {}\n".format(self.padding)
_print_line += " output_padding {}\n".format(self.output_padding)
_print_line += " weights {}\n".format(self.weights.keys())
return _print_line
class LookupTable:
"""
A helper class that stores the typing.Mappings between:
- tensor id to a tuple containing (tensor value, tensor order)
- ELL id to ELL node. These get created during the conversion process.
- importer node id to ELL ids. These get created during the conversion
process. Note that one ImporterNode could get converted to multiple
ELL nodes.
In addition, there are convenience methods for accessing the tenspors
in appropriate ELL order.
"""
def __init__(self, tensors: typing.Mapping[str, typing.Any]):
# Stores mapping of ELL Node id string to ELL Node
self.ell_id_to_ell_nodes = {}
# Stores mapping of importer node id string to ELL Node id
self.importer_id_to_ell_ids = {}
# Stores mapping of ell node id string to Importer Node
self.ell_id_to_owning_importer_node = {}
# Stores mapping of output id string to owning ELL Node id
self.output_id_to_ell_ids = {}
# Stores mapping of tensor ids to numpy tensor instance
self.tensors = tensors
# Stores input nodes. When creating an ELL map from an ELL model,
# map inputs must be identified.
self.input_ell_nodes = []
# Stores output nodes When creating an ELL map from an ELL model,
# map inputs must be identified.
self.output_ell_nodes = []
def add_imported_ell_node(self, importer_node: ImporterNode, ell_node: ell.nodes.Node, set_group_id=True):
"""
Adds an ImporterNode and associated ELL node to the lookup.
"""
# Add to mapping of ELL Node id to ELL Node
ell_node_id = ell_node.GetId()
self.add_ell_node(ell_node)
# Add ImporterNode id to ELL Node id mapping
if importer_node.id in self.importer_id_to_ell_ids:
self.importer_id_to_ell_ids[importer_node.id].append(ell_node_id)
else:
self.importer_id_to_ell_ids[importer_node.id] = [ell_node_id]
_logger.debug("ImporterNode {} -> intermediate ELL nodes {}".format(
importer_node.id, self.importer_id_to_ell_ids[importer_node.id]))
# Add output id to owner mapping.
for output_id in importer_node.outputs:
self.set_owning_node_for_output(output_id, ell_node)
if set_group_id:
# Set the node's metadata to show where this node came from
ell_node.SetMetadataValue("GroupId", importer_node.id)
# Also use this as the node's friendly name (by default)
ell_node.SetMetadataValue("name", importer_node.id)
# concatenate any importer_node metadata provided by importer
if importer_node.metadata is not None:
for key in importer_node.metadata:
value = importer_node.metadata[key]
ell_node.SetMetadataValue(key, value)
# Add owning id mapping
self.ell_id_to_owning_importer_node[ell_node_id] = importer_node
def add_ell_node(self, ell_node: ell.nodes.Node):
"""
Adds an ELL node to the lookup.
"""
ell_node_id = ell_node.GetId()
self.ell_id_to_ell_nodes[ell_node_id] = ell_node
def get_ell_node_from_id(self, node_id: str):
return self.ell_id_to_ell_nodes[node_id]
def get_ell_id(self, importer_node_id: str):
"""
Return the id of the last ELL node associated with this importer node.
"""
id = None
if importer_node_id in self.importer_id_to_ell_ids:
id = self.importer_id_to_ell_ids[importer_node_id][-1]
return id
def get_ell_node_from_importer_node_id(self, importer_node_id: str):
"""
Return the last ELL node associated with this importer node.
"""
node = None
if importer_node_id in self.importer_id_to_ell_ids:
id = self.importer_id_to_ell_ids[importer_node_id][-1]
if id in self.ell_id_to_ell_nodes:
node = self.ell_id_to_ell_nodes[id]
return node
def get_tensor_in_ell_order(self, uid: str):
"""
Returns a numpy array in ELL order
"""
if uid not in self.tensors:
raise Exception("Required tensor {} not found".format(uid))
original_tensor, order = self.tensors[uid]
return memory_shapes.get_tensor_in_ell_order(original_tensor, order)
def get_vector_from_constant(self, uid: str, size: int):
"""
Returns a single dimensional numpy array containing the tensor weights.
If the tensor is actually a scalar, expand it to be a vector of length
'size'.
"""
original_vector, order = self.tensors[uid]
# Workaround: For some reason, np.full is not returning a type that SWIG can parse.
# So just manually walk the array setting the scalar
array = np.zeros(size, dtype=np.float)
for i in range(array.size):
array[i] = original_vector
return array
def get_vector_in_ell_order(self, uid: str):
"""
Returns a single dimensional numpy array containing the tensor weights.
"""
original_vector, order = self.tensors[uid]
ordered_weights = np.zeros(original_vector.size, dtype=np.float)
i = 0
for value in original_vector:
ordered_weights[i] = value
i += 1
return ordered_weights
def get_tensor_info(self, uid: str):
"""
Returns a tuple containing (shape, order) for the tensor.
"""
value, order = self.tensors[uid]
return (value.shape, order)
def get_port_elements_for_input(self, importer_node: ImporterNode, input_index=0) -> ell.nodes.PortElements:
"""
Returns an ell.nodes.PortElements for the corresponding ImporterNode.
"""
try:
# First check whether this importer node has any corresponding
# ELL nodes yet:
# - If it does, grab the output of the last ELL node which
# is designated as the input to this node.
# - If it doesn't, grab the output of the last ELL node which
# the Importer's input is tied to.
owning_node = self.get_ell_node_from_importer_node_id(importer_node.id)
if owning_node is None:
owning_node_id = self.output_id_to_ell_ids[importer_node.inputs[input_index]]
owning_node = self.ell_id_to_ell_nodes[owning_node_id]
except BaseException:
raise Exception("Cannot get input port elements for {}, missing ELL owning node".format(importer_node.id))
return self.get_output_port_elements_for_node(owning_node)
def get_port_elements_and_memory_layout_for_input(self, importer_node: ImporterNode, input_index=0) \
-> (ell.nodes.PortElements, ell.model.PortMemoryLayout):
"""
Returns an (ell.nodes.PortElements, ell.nodes.PortMemoryLayout) for the corresponding input of the ImporterNode.
"""
try:
owning_ell_node = self.get_owning_node_for_output(importer_node.inputs[input_index])
owning_importer_node = self.ell_id_to_owning_importer_node[owning_ell_node.GetId()]
padding = owning_importer_node.output_padding["size"]
output_shape = owning_importer_node.output_shapes[0]
port_elements = self.get_port_elements_for_input(importer_node, input_index)
port_memory_layout = memory_shapes.get_ell_port_memory_layout(output_shape[0], output_shape[1], padding)
except BaseException:
raise Exception("Could not get PortMemoryElements or PortMemoryLayout for importer node {}, input {}"
.format(importer_node.id, input_index))
return (port_elements, port_memory_layout)
def get_output_port_elements_for_node(self, ell_node: ell.nodes.Node, output_label: str = "output"):
"""
Returns an ell.nodes.PortElements for the corresponding ELL node's
output port that corresponds to 'output_label'.
"""
try:
output_link = ell_node.GetOutputPort(output_label)
except BaseException:
raise Exception("Cannot get output port {} for {}".format(output_label, ell_node.GetId()))
return ell.nodes.PortElements(output_link)
def get_owning_node_for_output(self, output_id: str) -> ell.nodes.Node:
"""
Gets the ELL node that owns the output identified by output_id.
"""
try:
ell_node_id = self.output_id_to_ell_ids[output_id]
ell_node = self.ell_id_to_ell_nodes[ell_node_id]
except BaseException:
raise Exception("Cannot find owning ELL node for output {}".format(output_id))
return ell_node
def get_originating_importer_node_for_output(self, output_id: str) -> ImporterNode:
"""
Gets the originating ImporterNode for the output identified by output_id.
"""
try:
ell_node_id = self.output_id_to_ell_ids[output_id]
importer_node = self.ell_id_to_owning_importer_node[ell_node_id]
except BaseException:
raise Exception("Cannot find originating ImporterNode node for output {}".format(output_id))
return importer_node
def set_owning_node_for_output(self, output_id: str, ell_node: ell.nodes.Node):
"""
Sets the mapping for the ELL node that owns the output identified
by output_id.
"""
self.output_id_to_ell_ids[output_id] = ell_node.GetId()
def add_ell_input(self, ell_node: ell.nodes.Node):
self.input_ell_nodes = [ell_node] + self.input_ell_nodes
def get_ell_inputs(self):
return self.input_ell_nodes
def add_ell_output(self, ell_node: ell.nodes.Node):
self.output_ell_nodes = [ell_node] + self.output_ell_nodes
def get_ell_outputs(self):
return self.output_ell_nodes
class ConvertBase:
"""
Base class for converting an ImporterNode into an ELL Node
"""
def __init__(self, node: ImporterNode):
"""
Derived classes should initialize their required_inputs,
required_weights and required_attributes
"""
self.required_weights = []
self.required_attributes = []
self.importer_node = node
self.optional = False
def can_convert(self) -> bool:
"""
Verify that the node contains the necessary inputs, weights and
attributes to convert. Nodes that cannot be converted due to
missing weights or attributes are deemed optional and are skipped.
See comments in operation_map for examples.
"""
for w in self.required_weights:
if w not in self.importer_node.weights:
if not self.optional:
raise Exception("Missing required weight '{}' on node {}_{}".format(
w, self.importer_node.operation_type, self.importer_node.id))
return False
for attr in self.required_attributes:
if attr not in self.importer_node.attributes:
if not self.optional:
raise Exception("Missing required attribute {} on node {}_{}".format(
attr, self.importer_node.operation_type, self.importer_node.id))
return False
return True
def get_input_parameters(self, first_in_block=True, input_index=0):
"""
Return the input shape and padding parameters as a tuple.
first_in_block - indicates whether this will be the first ell
node in a block. If it is, it will have its padding requirements set
differently.
input_index - indicates the index of the input shape requested.
"""
shape_entry = self.importer_node.input_shapes[input_index]
padding = 0
if first_in_block:
padding = self.importer_node.padding["size"]
else:
shape_entry = self.importer_node.output_shapes[0]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], padding)
ell_padding_parameter = ell.neural.PaddingParameters(self.importer_node.padding["scheme"], padding)
return (ell_shape, ell_padding_parameter)
def get_output_parameters(self, last_in_block=True, output_index=0):
"""
Return the output shape and padding parameters as a tuple.
last_in_block - indicates whether this will be the last ell
node in a block. If it is, it will have its output padding set
differently.
"""
shape_entry = self.importer_node.output_shapes[output_index]
padding = 0
if last_in_block:
padding = self.importer_node.output_padding["size"]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], padding)
ell_padding_parameter = ell.neural.PaddingParameters(self.importer_node.output_padding["scheme"], padding)
return (ell_shape, ell_padding_parameter)
def get_layer_parameters(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the ELL layer parameters for this node.
"""
input_shape, input_padding = self.get_input_parameters(conversion_parameters["first_in_block"])
output_shape, output_padding = self.get_output_parameters(conversion_parameters["last_in_block"])
return ell.neural.LayerParameters(input_shape, input_padding, output_shape, output_padding,
ell.nodes.PortType.smallReal)
def get_ell_shape(self, shape: tuple, order: str, padding: int = 0):
"""
Return the shape in ELL canonical order
"""
return memory_shapes.get_ell_shape(shape, order, padding)
def get_ell_tensor(self, uid: str, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Returns a weight tensor as an ELL tensor
"""
lookup_table = conversion_parameters["lookup_table"]
return ell.math.DoubleTensor(lookup_table.get_tensor_in_ell_order(uid))
def get_vector(self, uid: str, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Returns a weight tensor as a 1 dimensional numpy array. If the
original tensor is a scalar, it will be expanded to a vector of size
equal to the number of output channels.
"""
lookup_table = conversion_parameters["lookup_table"]
shape, order = lookup_table.get_tensor_info(uid)
if len(shape) == 0:
shape_entry = self.importer_node.output_shapes[0]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], 0)
vector = lookup_table.get_vector_from_constant(uid, ell_shape.channels)
else:
vector = lookup_table.get_vector_in_ell_order(uid)
return vector
def get_ell_vector(self, uid: str, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Returns a weight tensor as an ELL vector. If the original tensor is a
scalar, it will be expanded to a vector of size equal to the number of
output channels.
"""
return ell.math.DoubleVector(self.get_vector(uid, conversion_parameters))
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
return None
class ConvertActivation(ConvertBase):
"""
Converter for Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["activation"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
activation = self.importer_node.attributes["activation"]
alpha = 0.01
if "alpha" in self.importer_node.attributes:
alpha = self.importer_node.attributes["alpha"]
if (activation == ell.neural.ActivationType.leaky):
return ell.neural.LeakyReLUActivationLayer(layer_parameters, alpha)
else:
return ell.neural.ActivationLayer(layer_parameters, activation)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the activation layer
activation_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ActivationLayerNode to the model
ell_node = builder.AddActivationLayerNode(model, input_port_elements, activation_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class OptionalConvertActivation(ConvertActivation):
"""
Optional converter for Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.optional = True
class ConvertAveragePooling(ConvertBase):
"""
Converter for Average Pooling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
attributes = self.importer_node.attributes
pooling_parameters = ell.neural.PoolingParameters(
attributes["size"], attributes["stride"])
# Create the ELL pooling layer
return ell.neural.PoolingLayer(layer_parameters, pooling_parameters, ell.neural.PoolingType.mean)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the pooling layer
pooling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the PoolingLayerNode to the model
ell_node = builder.AddPoolingLayerNode(model, input_port_elements, pooling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertBatchNormalization(ConvertBase):
"""
Converter for BatchNormalization
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["mean", "variance"]
self.required_attributes = []
self.epsilon = 1e-5
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
mean_vector = self.get_ell_vector(
self.importer_node.weights["mean"][0], conversion_parameters)
variance_vector = self.get_ell_vector(
self.importer_node.weights["variance"][0], conversion_parameters)
return ell.neural.BatchNormalizationLayer(
layer_parameters,
mean_vector, variance_vector, self.epsilon,
ell.neural.EpsilonSummand.variance)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the batch normalization layer
batch_normalization_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the BatchNormalizationLayerNode to the model
ell_node = builder.AddBatchNormalizationLayerNode(model, input_port_elements, batch_normalization_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertBias(ConvertBase):
"""
Converter for Bias
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["bias"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
bias = self.get_ell_vector(
self.importer_node.weights["bias"][0], conversion_parameters)
return ell.neural.BiasLayer(layer_parameters, bias)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the bias layer
bias_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the BiasLayerNode to the model
ell_node = builder.AddBiasLayerNode(model, input_port_elements, bias_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class OptionalConvertBias(ConvertBias):
"""
Optional converter for Bias
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.optional = True
class ConvertBinaryConvolution(ConvertBase):
"""
Converter for BinaryConvolution
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["weights"]
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
layer_parameters.inputPaddingParameters
weights = self.get_ell_tensor(
self.importer_node.weights["weights"][0], conversion_parameters)
attributes = self.importer_node.attributes
convolutional_parameters = ell.neural.BinaryConvolutionalParameters(
attributes["size"], attributes["stride"], ell.neural.BinaryConvolutionMethod.bitwise,
ell.neural.BinaryWeightsScale.none)
return ell.neural.BinaryConvolutionalLayer(layer_parameters, convolutional_parameters, weights)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the convolutional layer
convolutional_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# If we require padding but the input doesn't provide it
# (which can happen when a single node output is used as input to
# multiple nodes), ensure correct padding with a ReorderDataNode.
owning_node_for_input = lookup_table.get_originating_importer_node_for_output(self.importer_node.inputs[0])
padding = self.importer_node.padding["size"]
if (owning_node_for_input.output_padding["size"] != padding):
input_node = lookup_table.get_ell_node_from_importer_node_id(owning_node_for_input.id)
port_elements = lookup_table.get_output_port_elements_for_node(input_node)
shape_entry = owning_node_for_input.output_shapes[0]
input_memory_layout = memory_shapes.get_ell_port_memory_layout(
shape_entry[0], shape_entry[1], owning_node_for_input.output_padding["size"])
output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding)
# Create the reorder node
reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout,
[0, 1, 2])
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
input_port_elements = lookup_table.get_output_port_elements_for_node(reorder_node)
# Add the ConvolutionalLayerNode to the model
ell_node = builder.AddBinaryConvolutionalLayerNode(model, input_port_elements, convolutional_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertConvolution(ConvertBase):
"""
Converter for Convolution
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["weights"]
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
weights = self.get_ell_tensor(
self.importer_node.weights["weights"][0], conversion_parameters)
attributes = self.importer_node.attributes
convolutional_parameters = ell.neural.ConvolutionalParameters(
attributes["size"], attributes["stride"], 0, 1)
return ell.neural.ConvolutionalLayer(layer_parameters,
convolutional_parameters, weights)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the convolutional layer
convolutional_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# If we require padding but the input doesn't provide it
# (which can happen when a single node output is used as input to
# multiple nodes), ensure correct padding with a ReorderDataNode.
owning_node_for_input = lookup_table.get_originating_importer_node_for_output(self.importer_node.inputs[0])
padding = self.importer_node.padding["size"]
if (owning_node_for_input.output_padding["size"] != padding):
input_node = lookup_table.get_ell_node_from_importer_node_id(owning_node_for_input.id)
port_elements = lookup_table.get_output_port_elements_for_node(input_node)
shape_entry = owning_node_for_input.output_shapes[0]
input_memory_layout = memory_shapes.get_ell_port_memory_layout(
shape_entry[0], shape_entry[1], owning_node_for_input.output_padding["size"])
output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding)
# Create the reorder node
reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout,
[0, 1, 2])
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
input_port_elements = lookup_table.get_output_port_elements_for_node(reorder_node)
# Add the ConvolutionalLayerNode to the model
ell_node = builder.AddConvolutionalLayerNode(model, input_port_elements, convolutional_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertFullyConnected(ConvertBase):
"""
Converter for FullyConnected
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["weights"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
weights = self.get_ell_tensor(
self.importer_node.weights["weights"][0], conversion_parameters)
return ell.neural.FullyConnectedLayer(
layer_parameters, weights)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the fully connected layer
fully_connected_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the FullyConnectedLayerNode to the model
ell_node = builder.AddFullyConnectedLayerNode(model, input_port_elements, fully_connected_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertElementTimes(ConvertBase):
"""
Converter for Element Times, which is equivalent to Scaling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["scale"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
scale = self.get_ell_vector(
self.importer_node.weights["scale"][0], conversion_parameters)
return ell.neural.ScalingLayer(layer_parameters, scale)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the scaling layer
scaling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ScalingLayerNode to the model
ell_node = builder.AddScalingLayerNode(model, input_port_elements, scaling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertGRU(ConvertBase):
"""
Converter for Gated Recurrent Unit (GRU). If the GRU node has 2 inputs,
the second input is used as the trigger, otherwise a constant node is inserted as the
trigger.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["input_weights", "hidden_weights", "input_bias", "hidden_bias"]
self.required_attributes = ["hidden_size", "activation", "recurrent_activation"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for GRU. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# create constant nodes for the weights
input_weights = self.get_ell_tensor(
self.importer_node.weights["input_weights"][0], conversion_parameters)
hidden_weights = self.get_ell_tensor(
self.importer_node.weights["hidden_weights"][0], conversion_parameters)
input_bias = self.get_ell_tensor(
self.importer_node.weights["input_bias"][0], conversion_parameters)
hidden_bias = self.get_ell_tensor(
self.importer_node.weights["hidden_bias"][0], conversion_parameters)
input_weights_node = builder.AddConstantNode(model, input_weights.data, ell.nodes.PortType.smallReal)
hidden_weights_node = builder.AddConstantNode(model, hidden_weights.data, ell.nodes.PortType.smallReal)
input_bias_node = builder.AddConstantNode(model, input_bias.data, ell.nodes.PortType.smallReal)
hidden_bias = builder.AddConstantNode(model, hidden_bias.data, ell.nodes.PortType.smallReal)
hidden_size = self.importer_node.attributes["hidden_size"]
activation = self.importer_node.attributes["activation"]
recurrentActivation = self.importer_node.attributes["recurrent_activation"]
# Get the port elements for the reset trigger
if len(self.importer_node.inputs) > 1 and self.importer_node.inputs[1] != '':
reset_port_elements, reset_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
else:
# Create a constant node as the trigger. The trigger fires on value change,
# so will never fire in this case.
reset_node = builder.AddConstantNode(model, [0], ell.nodes.PortType.integer)
reset_port_elements = ell.nodes.PortElements(reset_node.GetOutputPort("output"))
# Add the GRUNode to the model
ell_node = builder.AddGRUNode(
model, input_port_elements, reset_port_elements, hidden_size,
ell.nodes.PortElements(input_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(input_bias_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_bias.GetOutputPort("output")),
activation, recurrentActivation)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertFastGRNN(ConvertBase):
"""
Converter for Fast Gated Recurrent Neural Network (FastGRNN). If the FastGRNN node has 2 inputs,
the second input is used as the trigger, otherwise a constant node is inserted as the
trigger.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ['W1', 'W2', 'U1', 'U2', 'bias_gate', 'bias_update', 'zeta', 'nu']
self.required_attributes = ["hidden_size", "gate_nonlinearity", "update_nonlinearity", "wRank", "uRank"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for FastGRNN. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# create constant nodes for the weights
W1 = self.get_ell_tensor(
self.importer_node.weights["W1"][0], conversion_parameters)
W2 = self.get_ell_tensor(
self.importer_node.weights["W2"][0], conversion_parameters)
U1 = self.get_ell_tensor(
self.importer_node.weights["U1"][0], conversion_parameters)
U2 = self.get_ell_tensor(
self.importer_node.weights["U2"][0], conversion_parameters)
bias_gate = self.get_ell_tensor(
self.importer_node.weights["bias_gate"][0], conversion_parameters)
bias_update = self.get_ell_tensor(
self.importer_node.weights["bias_update"][0], conversion_parameters)
zeta = self.get_ell_tensor(
self.importer_node.weights["zeta"][0], conversion_parameters)
nu = self.get_ell_tensor(
self.importer_node.weights["nu"][0], conversion_parameters)
W1_node = builder.AddConstantNode(model, W1.data, ell.nodes.PortType.smallReal)
W2_node = builder.AddConstantNode(model, W2.data, ell.nodes.PortType.smallReal)
U1_node = builder.AddConstantNode(model, U1.data, ell.nodes.PortType.smallReal)
U2_node = builder.AddConstantNode(model, U2.data, ell.nodes.PortType.smallReal)
bias_gate_node = builder.AddConstantNode(model, bias_gate.data, ell.nodes.PortType.smallReal)
bias_update_node = builder.AddConstantNode(model, bias_update.data, ell.nodes.PortType.smallReal)
zeta_node = builder.AddConstantNode(model, zeta.data, ell.nodes.PortType.smallReal)
nu_node = builder.AddConstantNode(model, nu.data, ell.nodes.PortType.smallReal)
hidden_size = self.importer_node.attributes["hidden_size"]
wRank = self.importer_node.attributes["wRank"]
uRank = self.importer_node.attributes["uRank"]
gate_nonlinearity = self.importer_node.attributes["gate_nonlinearity"]
update_nonlinearity = self.importer_node.attributes["update_nonlinearity"]
# Get the port elements for the reset trigger
if len(self.importer_node.inputs) > 1 and self.importer_node.inputs[1] != '':
reset_port_elements, reset_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
else:
# Create a constant node as the trigger. The trigger fires on value change,
# so will never fire in this case.
reset_node = builder.AddConstantNode(model, [0], ell.nodes.PortType.integer)
reset_port_elements = ell.nodes.PortElements(reset_node.GetOutputPort("output"))
# Add the GRUNode to the model
ell_node = builder.AddFastGRNNNode(
model, input_port_elements, reset_port_elements, hidden_size, wRank, uRank,
ell.nodes.PortElements(W1_node.GetOutputPort("output")),
ell.nodes.PortElements(W2_node.GetOutputPort("output")),
ell.nodes.PortElements(U1_node.GetOutputPort("output")),
ell.nodes.PortElements(U2_node.GetOutputPort("output")),
ell.nodes.PortElements(bias_gate_node.GetOutputPort("output")),
ell.nodes.PortElements(bias_update_node.GetOutputPort("output")),
ell.nodes.PortElements(zeta_node.GetOutputPort("output")),
ell.nodes.PortElements(nu_node.GetOutputPort("output")),
gate_nonlinearity, update_nonlinearity)
# Register the mappings
lookup_table.add_imported_ell_node(self.importer_node, W1_node)
lookup_table.add_imported_ell_node(self.importer_node, W2_node)
lookup_table.add_imported_ell_node(self.importer_node, U1_node)
lookup_table.add_imported_ell_node(self.importer_node, U2_node)
lookup_table.add_imported_ell_node(self.importer_node, bias_gate_node)
lookup_table.add_imported_ell_node(self.importer_node, bias_update_node)
lookup_table.add_imported_ell_node(self.importer_node, zeta_node)
lookup_table.add_imported_ell_node(self.importer_node, nu_node)
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertInput(ConvertBase):
"""
Converter for Input
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
# Skip processing the input. It is implicit when using
# ELL Layers
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
step_interval_msec = conversion_parameters["step_interval_msec"]
lag_threshold_msec = conversion_parameters["lag_threshold_msec"]
function_prefix = ""
# Add the InputNode to the model
shape_entry = self.importer_node.output_shapes[0]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], 0)
original_input_node = None
if step_interval_msec is not None:
# in the steppable case the input is a clock ticks (which is a double)
input_node = builder.AddInputNode(
model, ell.model.PortMemoryLayout([1]), ell.nodes.PortType.real)
if lag_threshold_msec is None:
lag_threshold_msec = 2 * step_interval_msec
clock_node = builder.AddClockNode(
model, ell.nodes.PortElements(input_node.GetOutputPort("output")),
float(step_interval_msec), float(lag_threshold_msec),
"{}LagNotification".format(function_prefix))
source_node = builder.AddSourceNode(
model, ell.nodes.PortElements(clock_node.GetOutputPort("output")),
ell.nodes.PortType.smallReal, ell.model.PortMemoryLayout(ell_shape),
"{}InputCallback".format(function_prefix))
original_input_node = input_node
input_node = source_node
else:
input_node = builder.AddInputNode(
model, ell.model.PortMemoryLayout(ell_shape), ell.nodes.PortType.smallReal)
original_input_node = input_node
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, input_node)
if step_interval_msec is not None:
lookup_table.add_imported_ell_node(self.importer_node, clock_node)
lookup_table.add_imported_ell_node(self.importer_node, source_node)
# Special case: If output requires padding e.g. Input is connected to a
# Convolutional node that requires padding, add a ReorderData node to
# ensure proper memory layout. This can be skipped once Input supports
# different memory layouts of the output.
padding = self.importer_node.output_padding["size"]
if padding > 0:
# Create the reorder node
port_elements = lookup_table.get_output_port_elements_for_node(input_node)
input_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], 0)
output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding)
reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout,
[0, 1, 2])
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
lookup_table.add_ell_input(original_input_node)
class ConvertTypeCast(ConvertBase):
"""
Converter for explicit TypeCast
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["cast_to"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
raise Exception("No corresponding ELL layer for TypeCast. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
castTo = self.importer_node.attributes["cast_to"]
# Add the TypeCastNode to the model
ell_node = builder.AddTypeCastNode(model, input_port_elements, castTo)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertLeakyReLU(ConvertActivation):
"""
Converter for LeakyReLU, which is equivalent to
Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
self.importer_node.attributes["activation"] = ell.neural.ActivationType.leaky
class ConvertLSTM(ConvertBase):
"""
Converter for Long Short-Term Memory (LSTM) unit. If the LSTM node has 2 inputs,
the second input is used as the trigger, otherwise a constant node is inserted as the
trigger.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["input_weights", "hidden_weights", "input_bias", "hidden_bias"]
self.required_attributes = ["hidden_size", "activation", "recurrent_activation"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for LSTM. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# create constant nodes for the weights
input_weights = self.get_ell_tensor(
self.importer_node.weights["input_weights"][0], conversion_parameters)
hidden_weights = self.get_ell_tensor(
self.importer_node.weights["hidden_weights"][0], conversion_parameters)
input_bias = self.get_ell_tensor(
self.importer_node.weights["input_bias"][0], conversion_parameters)
hidden_bias = self.get_ell_tensor(
self.importer_node.weights["hidden_bias"][0], conversion_parameters)
input_weights_node = builder.AddConstantNode(model, input_weights.data, ell.nodes.PortType.smallReal)
hidden_weights_node = builder.AddConstantNode(model, hidden_weights.data, ell.nodes.PortType.smallReal)
input_bias_node = builder.AddConstantNode(model, input_bias.data, ell.nodes.PortType.smallReal)
hidden_bias = builder.AddConstantNode(model, hidden_bias.data, ell.nodes.PortType.smallReal)
hidden_size = self.importer_node.attributes["hidden_size"]
activation = self.importer_node.attributes["activation"]
recurrentActivation = self.importer_node.attributes["recurrent_activation"]
# Get the port elements for the reset trigger
if len(self.importer_node.inputs) > 1 and self.importer_node.inputs[1] != '':
reset_port_elements, reset_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
else:
# Create a constant node as the trigger. The trigger fires on value change,
# so will never fire in this case.
reset_node = builder.AddConstantNode(model, [0], ell.nodes.PortType.integer)
reset_port_elements = ell.nodes.PortElements(reset_node.GetOutputPort("output"))
# Add the LSTMNode to the model
ell_node = builder.AddLSTMNode(
model, input_port_elements, reset_port_elements, hidden_size,
ell.nodes.PortElements(input_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(input_bias_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_bias.GetOutputPort("output")),
activation, recurrentActivation)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertMaxPooling(ConvertBase):
"""
Converter for Max Pooling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
attributes = self.importer_node.attributes
pooling_parameters = ell.neural.PoolingParameters(
attributes["size"], attributes["stride"])
# Create the ELL pooling layer
return ell.neural.PoolingLayer(layer_parameters,
pooling_parameters, ell.neural.PoolingType.max)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the pooling layer
pooling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the PoolingLayerNode to the model
ell_node = builder.AddPoolingLayerNode(model, input_port_elements, pooling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertMinus(ConvertBase):
"""
Converter for Minus, which is equivalent to
a negative Bias
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["bias"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
bias = self.get_vector(
self.importer_node.weights["bias"][0], conversion_parameters)
# Minus is a negative bias in ELL. Negate the bias values so we
# can use an additive bias layer.
bias = -1.0 * bias
return ell.neural.BiasLayer(layer_parameters, bias)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the bias layer
bias_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the BiasLayerNode to the model
ell_node = builder.AddBiasLayerNode(model, input_port_elements, bias_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertPassthrough(ConvertBase):
"""
Converter for Passthrough, which has information on inputs and outputs but
doesn't produce typing.Any ELL nodes/layers.
It's sole purpose is to preserve connections between nodes during the conversion
process.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return nothing
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
lookup_table = conversion_parameters["lookup_table"]
# Set owner of this output to be the Passthrough node's input node
if len(self.importer_node.inputs) == 0:
raise Exception("### Passthrough node {}({}) has no inputs".format(self.importer_node.operation_type,
self.importer_node.id))
input_owner = lookup_table.get_owning_node_for_output(self.importer_node.inputs[0])
lookup_table.add_imported_ell_node(self.importer_node, input_owner, set_group_id=False)
class ConvertBinaryOperation(ConvertBase):
"""
Converter for Binary Operations
"""
def __init__(self, node: ImporterNode, op: ell.nodes.BinaryOperationType):
super().__init__(node)
self.operator = op
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def add_reinterpret_node(self, builder, model, input_elements, memory_layout):
node = builder.AddReinterpretLayoutNode(model, input_elements, memory_layout)
return (ell.nodes.PortElements(node.GetOutputPort("output")), node)
def reinterpret_input(self, builder, model, input_elements, memory_layout):
input_layout = input_elements.GetMemoryLayout()
if not input_layout == memory_layout:
if np.product(list(input_layout.size)) != np.product(list(memory_layout.size)):
raise Exception("Binary operation {} does not yet support broadcasting".format(self.operator))
return self.add_reinterpret_node(builder, model, input_elements, memory_layout)
return (input_elements, None)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements and memory layout from the two inputs.
# Since the 2 inputs and output could have different padding,
# we need both the port elements and the memory layouts for each.
input1_port_elements, input1_port_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 0)
input2_port_elements, input2_port_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
output_shape_tuple = self.importer_node.output_shapes[0]
output_port_memory_layout = memory_shapes.get_ell_port_memory_layout(
output_shape_tuple[0],
output_shape_tuple[1],
self.importer_node.output_padding["size"])
# see if the shapes match
input1_port_elements, _ = self.reinterpret_input(builder, model, input1_port_elements,
input1_port_memory_layout)
input2_port_elements, _ = self.reinterpret_input(builder, model, input2_port_elements,
input2_port_memory_layout)
# Add the BinaryOperationNode to the model.
ell_node = builder.AddBinaryOperationNode(
model,
input1_port_elements,
input2_port_elements,
self.operator)
output_elements = ell.nodes.PortElements(ell_node.GetOutputPort("output"))
output_port_elements, new_output_node = self.reinterpret_input(builder, model, output_elements,
output_port_memory_layout)
if new_output_node is not None:
ell_node = new_output_node
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertPlus(ConvertBinaryOperation):
"""
Converter for Plus
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.add)
class ConvertSubtract(ConvertBinaryOperation):
"""
Converter for Subtract which is subtracting one output from another.
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.subtract)
class ConvertCoordinatewiseMultiply(ConvertBinaryOperation):
"""
Converter for CoordinatewiseMultiply which is doing element-wise multiplication of two inputs.
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.multiply)
class ConvertCoordinatewiseDivide(ConvertBinaryOperation):
"""
Converter for CoordinatewiseDivide which is doing element-wise division of two inputs.
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.divide)
class ConvertPooling(ConvertBase):
"""
Converter for Pooling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["size", "stride", "poolingType"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
class ConvertPReLU(ConvertBase):
"""
Converter for PReLU, which is equivalent to
Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["alpha"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
alpha = self.get_ell_tensor(
self.importer_node.weights["alpha"][0], conversion_parameters)
return ell.neural.PReLUActivationLayer(layer_parameters, alpha)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the activation layer
activation_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ActivationLayerNode to the model
ell_node = builder.AddActivationLayerNode(model, input_port_elements, activation_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertReLU(ConvertActivation):
"""
Converter for ReLU, which is equivalent to
Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
self.importer_node.attributes["activation"] = ell.neural.ActivationType.relu
class ConvertRegion(ConvertBase):
"""
Converter for region detection layer
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["width", "height", "numBoxesPerCell", "numClasses", "numAnchors", "applySoftmax"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
attributes = self.importer_node.attributes
region_detection_parameters = ell.neural.RegionDetectionParameters(
attributes["width"],
attributes["height"],
attributes["numBoxesPerCell"],
attributes["numClasses"],
attributes["numAnchors"],
attributes["applySoftmax"]
)
return ell.neural.FullyConnectedLayer(
layer_parameters, region_detection_parameters)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the region detection layer
region_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the RegionDetectionLayerNode to the model
ell_node = builder.AddRegionDetectionLayerNode(model, input_port_elements, region_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertScaling(ConvertBase):
"""
Converter for Scaling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["scale"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
scale = self.get_ell_vector(
self.importer_node.weights["scale"][0], conversion_parameters)
return ell.neural.ScalingLayer(layer_parameters, scale)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the scaling layer
scaling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ScalingLayerNode to the model
ell_node = builder.AddScalingLayerNode(model, input_port_elements, scaling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class OptionalConvertScaling(ConvertScaling):
"""
Optional converter for Scaling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.optional = True
class ConvertSoftmax(ConvertBase):
"""
Converter for Softmax
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_atteamstributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
return ell.neural.SoftmaxLayer(layer_parameters)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the softmax layer
softmax_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the SoftmaxLayerNode to the model
ell_node = builder.AddSoftmaxLayerNode(model, input_port_elements, softmax_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertUnaryOperation(ConvertBase):
"""
Converter for Unary Operators
"""
def __init__(self, node: ImporterNode, op: ell.nodes.UnaryOperationType):
super().__init__(node)
self.operator = op
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the UnaryOperationNode to the model.
ell_node = builder.AddUnaryOperationNode(model, input_port_elements, self.operator)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertSigmoid(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sigmoid)
class ConvertSign(ConvertUnaryOperation):
"""
Converter for Sign operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sign)
class ConvertHardSigmoid(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.hardSigmoid)
class ConvertTanh(ConvertUnaryOperation):
"""
Converter for tanh operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.tanh)
class ConvertHardTanh(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.hardTanh)
class ConvertAbs(ConvertUnaryOperation):
"""
Converter for Abs operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.abs)
class ConvertSqrt(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sqrt)
class ConvertSquare(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.square)
class ConvertSin(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sin)
class ConvertCos(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.cos)
class ConvertExp(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.exp)
class ConvertLog(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.log)
class ConvertSplice(ConvertBase):
"""
Converter for Splice, which for now is Output followed by
Reshape
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["dimension_to_stack"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
last_in_block = conversion_parameters["last_in_block"]
pre_order = [0, 1, 2]
post_order = [0, 1, 2]
if self.importer_node.attributes["dimension_to_stack"] == "channel":
# When output from nodes are concatenated together in the
# order (channel, row, column), they effectively stack in the
# channel dimension.
pre_order = [2, 0, 1]
elif self.importer_node.attributes["dimension_to_stack"] == "row":
# When output from nodes are concatenated together in the
# order (row, column, channel), they effectively stack in the
# row dimension.
pre_order = [0, 1, 2]
elif self.importer_node.attributes["dimension_to_stack"] == "column":
# When output from nodes are concatenated together in the
# order (column, row, channel), they effectively stack in the
# column dimension.
pre_order = [1, 0, 2]
else:
raise Exception("Splice does not yet support stacking along dimension {}, just row, column or channel"
.format(self.required_attributes["dimension_to_stack"]))
# NOTE: The ReorderDataNodes that are inserted can be removed by the
# optimizer if they're redundant
# Loop over all inputs and for each, insert a reorder node to
# put into specified order.
reorder_nodes = []
for input_index in range(len(self.importer_node.inputs)):
# Create the reorder node
input_node = lookup_table.get_owning_node_for_output(self.importer_node.inputs[input_index])
input_port_elements = lookup_table.get_output_port_elements_for_node(input_node)
# Take the active region of inputs
port_elements, input_port_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, input_index)
reorder_node = builder.AddReorderDataNode(model, input_port_elements, input_port_memory_layout,
input_port_memory_layout, pre_order)
reorder_nodes.append(reorder_node)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
# Splice together the reorder nodes
output_shape, output_padding = self.get_output_parameters(last_in_block)
reordered_output_shape = ell.math.TensorShape(output_shape.channels, output_shape.rows, output_shape.columns)
input_port_elements_list = []
for ell_node in reorder_nodes:
portElements = lookup_table.get_output_port_elements_for_node(ell_node)
input_port_elements_list.append(portElements)
splice_node = builder.AddSpliceNode(model, ell.nodes.PortElementsList(input_port_elements_list))
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, splice_node)
# Insert a reorder node to to be in row, column, channel order with appropriate padding.
port_elements = lookup_table.get_output_port_elements_for_node(splice_node)
padding_size = output_padding.paddingSize
reorderedPortMemoryLayout = ell.model.PortMemoryLayout(
[reordered_output_shape.rows, reordered_output_shape.columns, reordered_output_shape.channels],
[reordered_output_shape.rows, reordered_output_shape.columns, reordered_output_shape.channels],
[0, 0, 0], pre_order)
outputPortMemoryLayout = ell.model.PortMemoryLayout(
[output_shape.rows, output_shape.columns, output_shape.channels],
[output_shape.rows - 2 * padding_size, output_shape.columns - 2 * padding_size, output_shape.channels],
[padding_size, padding_size, 0], post_order)
final_reorder_node = builder.AddReorderDataNode(model, port_elements, reorderedPortMemoryLayout,
outputPortMemoryLayout, post_order, 0)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, final_reorder_node)
class ConvertReshape(ConvertBase):
"""
Converter for Reshape
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
lookup_table = conversion_parameters["lookup_table"]
# Quick workaround for unnecessary reshapes: Set owner of this output
# to be the reshape's input node
input_owner = lookup_table.get_owning_node_for_output(self.importer_node.inputs[0])
lookup_table.add_imported_ell_node(self.importer_node, input_owner, set_group_id=False)
class ConvertReorder(ConvertBase):
"""
Converter for Reshape
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["order"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
order = list(np.array(self.importer_node.attributes["order"]).astype(np.int))
# Create the reorder node
reorder_node = builder.AddReorderDataNode(model, input_port_elements, order)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
input_port_elements = lookup_table.get_output_port_elements_for_node(reorder_node)
class ConvertConstant(ConvertBase):
"""
Converter for Constant nodes
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ['tensor']
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
tensor = self.importer_node.attributes["tensor"]
port_type = ell.nodes.PortType.real
if tensor.dtype == np.float32:
port_type = ell.nodes.PortType.smallReal
elif tensor.dtype == np.int:
port_type = ell.nodes.PortType.integer
elif tensor.dtype == np.int64:
port_type = ell.nodes.PortType.bigInt
elif tensor.dtype == np.bool:
port_type = ell.nodes.PortType.boolean
ell_node = builder.AddConstantNode(model, tensor.ravel().astype(np.float64), port_type)
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertVAD(ConvertBase):
"""
Converter for Voice Activity Detector.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["sampleRate", "frameDuration", "tauUp", "tauDown", "largeInput", "gainAtt",
"thresholdUp", "thresholdDown", "levelThreshold"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for Voice Actvitity Detector (VAD). Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
sample_rate = self.importer_node.attributes["sampleRate"]
frame_duration = self.importer_node.attributes["frameDuration"]
tau_up = self.importer_node.attributes["tauUp"]
tau_down = self.importer_node.attributes["tauDown"]
large_input = self.importer_node.attributes["largeInput"]
gain_att = self.importer_node.attributes["gainAtt"]
threshold_up = self.importer_node.attributes["thresholdUp"]
threshold_down = self.importer_node.attributes["thresholdDown"]
level_threshold = self.importer_node.attributes["levelThreshold"]
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Create the VAD node
ell_node = builder.AddVoiceActivityDetectorNode(
model, input_port_elements,
sample_rate, frame_duration, tau_up, tau_down, large_input, gain_att,
threshold_up, threshold_down, level_threshold)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
| ####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: converters.py (importers)
# Authors: <NAME>
#
# Requires: Python 3.x
#
####################################################################################################
import typing
import numpy as np
import ell
import logger
import common.memory_shapes as memory_shapes
_logger = logger.get()
class ImporterNode:
"""
Common class for intermediate representation of nodes in the importer.
The core importer engine can convert ImporterNodes into ELL Nodes
and insert them into an ELL Model.
"""
def __init__(self, id: str,
operation_type: str,
inputs: typing.Sequence[str] = [],
outputs: typing.Sequence[str] = [],
weights: typing.Mapping[str, typing.Any] = {},
attributes: typing.Mapping[str, typing.Any] = {},
padding: typing.Mapping[str, typing.Any] = {},
input_shapes: typing.Sequence[typing.Any] = [],
output_shapes: typing.Sequence[typing.Any] = [],
metadata: typing.Mapping[str, str] = {}):
"""
id: unique identifier for this node
operation_type: string name of the operation type to be imported.
This will get mapped to an ELL operation via the operation_map.
inputs: array of strings representing where the input comes from.
The string is the 'id' of another ImporterNode.
outputs: array of strings representing the output tensors.
The string is the 'id' of another ImporterNode.
weights: dictionary of weight parameter labels to weight names e.g. a
convolutional node may have {'weights': 'w123', 'bias': 'b832'}.
Dictionary keys are specific to the ELL operation.
The value is the id of a tensor in ImporterModel.tensors.
attributes: dictionary of attribute names and values e.g. a
convolutional node may have {'size': 3, 'step': 1, 'pad': 0 }.
Dictionary keys are specific to the ELL operation.
padding: dictionary of padding size and padding scheme e.g.
{"size": 0, "scheme": ell.neural.PaddingScheme.zeros}
[chris] why isn't this just a type of attribute?
input_shapes: array of tuples representing input shapes and ordering
e.g. ((3,64,64), "channel_row_column").
The ImporterEngine will take care of reshaping everything to
match the order required by ELL.
output_shapes: array of tuples representing output shapes and ordering
e.g. ((32,8,8), "channel_row_column").
metadata: optional additional metadata to store in the ell_nodes.
"""
self.id = id
self.operation_type = operation_type
self.weights = weights
self.inputs = inputs
self.outputs = outputs
self.attributes = attributes
self.padding = padding
self.output_padding = {"size": 0, "scheme": ell.neural.PaddingScheme.zeros}
self.input_shapes = input_shapes
self.output_shapes = output_shapes
self.metadata = metadata
def __repr__(self):
attrs = dict((k, self.attributes[k]) for k in self.attributes)
if "tensor" in attrs:
attrs["tensor"] = "..."
_print_line = ""
_print_line += "{} {}: {} -> {}, attributes {}\n".format(self.operation_type, self.id, self.inputs,
self.outputs, attrs)
_print_line += " input_shape {}\n".format(self.input_shapes)
_print_line += " output_shape {}\n".format(self.output_shapes)
_print_line += " padding {}\n".format(self.padding)
_print_line += " output_padding {}\n".format(self.output_padding)
_print_line += " weights {}\n".format(self.weights.keys())
return _print_line
class LookupTable:
"""
A helper class that stores the typing.Mappings between:
- tensor id to a tuple containing (tensor value, tensor order)
- ELL id to ELL node. These get created during the conversion process.
- importer node id to ELL ids. These get created during the conversion
process. Note that one ImporterNode could get converted to multiple
ELL nodes.
In addition, there are convenience methods for accessing the tenspors
in appropriate ELL order.
"""
def __init__(self, tensors: typing.Mapping[str, typing.Any]):
# Stores mapping of ELL Node id string to ELL Node
self.ell_id_to_ell_nodes = {}
# Stores mapping of importer node id string to ELL Node id
self.importer_id_to_ell_ids = {}
# Stores mapping of ell node id string to Importer Node
self.ell_id_to_owning_importer_node = {}
# Stores mapping of output id string to owning ELL Node id
self.output_id_to_ell_ids = {}
# Stores mapping of tensor ids to numpy tensor instance
self.tensors = tensors
# Stores input nodes. When creating an ELL map from an ELL model,
# map inputs must be identified.
self.input_ell_nodes = []
# Stores output nodes When creating an ELL map from an ELL model,
# map inputs must be identified.
self.output_ell_nodes = []
def add_imported_ell_node(self, importer_node: ImporterNode, ell_node: ell.nodes.Node, set_group_id=True):
"""
Adds an ImporterNode and associated ELL node to the lookup.
"""
# Add to mapping of ELL Node id to ELL Node
ell_node_id = ell_node.GetId()
self.add_ell_node(ell_node)
# Add ImporterNode id to ELL Node id mapping
if importer_node.id in self.importer_id_to_ell_ids:
self.importer_id_to_ell_ids[importer_node.id].append(ell_node_id)
else:
self.importer_id_to_ell_ids[importer_node.id] = [ell_node_id]
_logger.debug("ImporterNode {} -> intermediate ELL nodes {}".format(
importer_node.id, self.importer_id_to_ell_ids[importer_node.id]))
# Add output id to owner mapping.
for output_id in importer_node.outputs:
self.set_owning_node_for_output(output_id, ell_node)
if set_group_id:
# Set the node's metadata to show where this node came from
ell_node.SetMetadataValue("GroupId", importer_node.id)
# Also use this as the node's friendly name (by default)
ell_node.SetMetadataValue("name", importer_node.id)
# concatenate any importer_node metadata provided by importer
if importer_node.metadata is not None:
for key in importer_node.metadata:
value = importer_node.metadata[key]
ell_node.SetMetadataValue(key, value)
# Add owning id mapping
self.ell_id_to_owning_importer_node[ell_node_id] = importer_node
def add_ell_node(self, ell_node: ell.nodes.Node):
"""
Adds an ELL node to the lookup.
"""
ell_node_id = ell_node.GetId()
self.ell_id_to_ell_nodes[ell_node_id] = ell_node
def get_ell_node_from_id(self, node_id: str):
return self.ell_id_to_ell_nodes[node_id]
def get_ell_id(self, importer_node_id: str):
"""
Return the id of the last ELL node associated with this importer node.
"""
id = None
if importer_node_id in self.importer_id_to_ell_ids:
id = self.importer_id_to_ell_ids[importer_node_id][-1]
return id
def get_ell_node_from_importer_node_id(self, importer_node_id: str):
"""
Return the last ELL node associated with this importer node.
"""
node = None
if importer_node_id in self.importer_id_to_ell_ids:
id = self.importer_id_to_ell_ids[importer_node_id][-1]
if id in self.ell_id_to_ell_nodes:
node = self.ell_id_to_ell_nodes[id]
return node
def get_tensor_in_ell_order(self, uid: str):
"""
Returns a numpy array in ELL order
"""
if uid not in self.tensors:
raise Exception("Required tensor {} not found".format(uid))
original_tensor, order = self.tensors[uid]
return memory_shapes.get_tensor_in_ell_order(original_tensor, order)
def get_vector_from_constant(self, uid: str, size: int):
"""
Returns a single dimensional numpy array containing the tensor weights.
If the tensor is actually a scalar, expand it to be a vector of length
'size'.
"""
original_vector, order = self.tensors[uid]
# Workaround: For some reason, np.full is not returning a type that SWIG can parse.
# So just manually walk the array setting the scalar
array = np.zeros(size, dtype=np.float)
for i in range(array.size):
array[i] = original_vector
return array
def get_vector_in_ell_order(self, uid: str):
"""
Returns a single dimensional numpy array containing the tensor weights.
"""
original_vector, order = self.tensors[uid]
ordered_weights = np.zeros(original_vector.size, dtype=np.float)
i = 0
for value in original_vector:
ordered_weights[i] = value
i += 1
return ordered_weights
def get_tensor_info(self, uid: str):
"""
Returns a tuple containing (shape, order) for the tensor.
"""
value, order = self.tensors[uid]
return (value.shape, order)
def get_port_elements_for_input(self, importer_node: ImporterNode, input_index=0) -> ell.nodes.PortElements:
"""
Returns an ell.nodes.PortElements for the corresponding ImporterNode.
"""
try:
# First check whether this importer node has any corresponding
# ELL nodes yet:
# - If it does, grab the output of the last ELL node which
# is designated as the input to this node.
# - If it doesn't, grab the output of the last ELL node which
# the Importer's input is tied to.
owning_node = self.get_ell_node_from_importer_node_id(importer_node.id)
if owning_node is None:
owning_node_id = self.output_id_to_ell_ids[importer_node.inputs[input_index]]
owning_node = self.ell_id_to_ell_nodes[owning_node_id]
except BaseException:
raise Exception("Cannot get input port elements for {}, missing ELL owning node".format(importer_node.id))
return self.get_output_port_elements_for_node(owning_node)
def get_port_elements_and_memory_layout_for_input(self, importer_node: ImporterNode, input_index=0) \
-> (ell.nodes.PortElements, ell.model.PortMemoryLayout):
"""
Returns an (ell.nodes.PortElements, ell.nodes.PortMemoryLayout) for the corresponding input of the ImporterNode.
"""
try:
owning_ell_node = self.get_owning_node_for_output(importer_node.inputs[input_index])
owning_importer_node = self.ell_id_to_owning_importer_node[owning_ell_node.GetId()]
padding = owning_importer_node.output_padding["size"]
output_shape = owning_importer_node.output_shapes[0]
port_elements = self.get_port_elements_for_input(importer_node, input_index)
port_memory_layout = memory_shapes.get_ell_port_memory_layout(output_shape[0], output_shape[1], padding)
except BaseException:
raise Exception("Could not get PortMemoryElements or PortMemoryLayout for importer node {}, input {}"
.format(importer_node.id, input_index))
return (port_elements, port_memory_layout)
def get_output_port_elements_for_node(self, ell_node: ell.nodes.Node, output_label: str = "output"):
"""
Returns an ell.nodes.PortElements for the corresponding ELL node's
output port that corresponds to 'output_label'.
"""
try:
output_link = ell_node.GetOutputPort(output_label)
except BaseException:
raise Exception("Cannot get output port {} for {}".format(output_label, ell_node.GetId()))
return ell.nodes.PortElements(output_link)
def get_owning_node_for_output(self, output_id: str) -> ell.nodes.Node:
"""
Gets the ELL node that owns the output identified by output_id.
"""
try:
ell_node_id = self.output_id_to_ell_ids[output_id]
ell_node = self.ell_id_to_ell_nodes[ell_node_id]
except BaseException:
raise Exception("Cannot find owning ELL node for output {}".format(output_id))
return ell_node
def get_originating_importer_node_for_output(self, output_id: str) -> ImporterNode:
"""
Gets the originating ImporterNode for the output identified by output_id.
"""
try:
ell_node_id = self.output_id_to_ell_ids[output_id]
importer_node = self.ell_id_to_owning_importer_node[ell_node_id]
except BaseException:
raise Exception("Cannot find originating ImporterNode node for output {}".format(output_id))
return importer_node
def set_owning_node_for_output(self, output_id: str, ell_node: ell.nodes.Node):
"""
Sets the mapping for the ELL node that owns the output identified
by output_id.
"""
self.output_id_to_ell_ids[output_id] = ell_node.GetId()
def add_ell_input(self, ell_node: ell.nodes.Node):
self.input_ell_nodes = [ell_node] + self.input_ell_nodes
def get_ell_inputs(self):
return self.input_ell_nodes
def add_ell_output(self, ell_node: ell.nodes.Node):
self.output_ell_nodes = [ell_node] + self.output_ell_nodes
def get_ell_outputs(self):
return self.output_ell_nodes
class ConvertBase:
"""
Base class for converting an ImporterNode into an ELL Node
"""
def __init__(self, node: ImporterNode):
"""
Derived classes should initialize their required_inputs,
required_weights and required_attributes
"""
self.required_weights = []
self.required_attributes = []
self.importer_node = node
self.optional = False
def can_convert(self) -> bool:
"""
Verify that the node contains the necessary inputs, weights and
attributes to convert. Nodes that cannot be converted due to
missing weights or attributes are deemed optional and are skipped.
See comments in operation_map for examples.
"""
for w in self.required_weights:
if w not in self.importer_node.weights:
if not self.optional:
raise Exception("Missing required weight '{}' on node {}_{}".format(
w, self.importer_node.operation_type, self.importer_node.id))
return False
for attr in self.required_attributes:
if attr not in self.importer_node.attributes:
if not self.optional:
raise Exception("Missing required attribute {} on node {}_{}".format(
attr, self.importer_node.operation_type, self.importer_node.id))
return False
return True
def get_input_parameters(self, first_in_block=True, input_index=0):
"""
Return the input shape and padding parameters as a tuple.
first_in_block - indicates whether this will be the first ell
node in a block. If it is, it will have its padding requirements set
differently.
input_index - indicates the index of the input shape requested.
"""
shape_entry = self.importer_node.input_shapes[input_index]
padding = 0
if first_in_block:
padding = self.importer_node.padding["size"]
else:
shape_entry = self.importer_node.output_shapes[0]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], padding)
ell_padding_parameter = ell.neural.PaddingParameters(self.importer_node.padding["scheme"], padding)
return (ell_shape, ell_padding_parameter)
def get_output_parameters(self, last_in_block=True, output_index=0):
"""
Return the output shape and padding parameters as a tuple.
last_in_block - indicates whether this will be the last ell
node in a block. If it is, it will have its output padding set
differently.
"""
shape_entry = self.importer_node.output_shapes[output_index]
padding = 0
if last_in_block:
padding = self.importer_node.output_padding["size"]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], padding)
ell_padding_parameter = ell.neural.PaddingParameters(self.importer_node.output_padding["scheme"], padding)
return (ell_shape, ell_padding_parameter)
def get_layer_parameters(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the ELL layer parameters for this node.
"""
input_shape, input_padding = self.get_input_parameters(conversion_parameters["first_in_block"])
output_shape, output_padding = self.get_output_parameters(conversion_parameters["last_in_block"])
return ell.neural.LayerParameters(input_shape, input_padding, output_shape, output_padding,
ell.nodes.PortType.smallReal)
def get_ell_shape(self, shape: tuple, order: str, padding: int = 0):
"""
Return the shape in ELL canonical order
"""
return memory_shapes.get_ell_shape(shape, order, padding)
def get_ell_tensor(self, uid: str, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Returns a weight tensor as an ELL tensor
"""
lookup_table = conversion_parameters["lookup_table"]
return ell.math.DoubleTensor(lookup_table.get_tensor_in_ell_order(uid))
def get_vector(self, uid: str, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Returns a weight tensor as a 1 dimensional numpy array. If the
original tensor is a scalar, it will be expanded to a vector of size
equal to the number of output channels.
"""
lookup_table = conversion_parameters["lookup_table"]
shape, order = lookup_table.get_tensor_info(uid)
if len(shape) == 0:
shape_entry = self.importer_node.output_shapes[0]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], 0)
vector = lookup_table.get_vector_from_constant(uid, ell_shape.channels)
else:
vector = lookup_table.get_vector_in_ell_order(uid)
return vector
def get_ell_vector(self, uid: str, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Returns a weight tensor as an ELL vector. If the original tensor is a
scalar, it will be expanded to a vector of size equal to the number of
output channels.
"""
return ell.math.DoubleVector(self.get_vector(uid, conversion_parameters))
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
return None
class ConvertActivation(ConvertBase):
"""
Converter for Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["activation"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
activation = self.importer_node.attributes["activation"]
alpha = 0.01
if "alpha" in self.importer_node.attributes:
alpha = self.importer_node.attributes["alpha"]
if (activation == ell.neural.ActivationType.leaky):
return ell.neural.LeakyReLUActivationLayer(layer_parameters, alpha)
else:
return ell.neural.ActivationLayer(layer_parameters, activation)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the activation layer
activation_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ActivationLayerNode to the model
ell_node = builder.AddActivationLayerNode(model, input_port_elements, activation_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class OptionalConvertActivation(ConvertActivation):
"""
Optional converter for Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.optional = True
class ConvertAveragePooling(ConvertBase):
"""
Converter for Average Pooling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
attributes = self.importer_node.attributes
pooling_parameters = ell.neural.PoolingParameters(
attributes["size"], attributes["stride"])
# Create the ELL pooling layer
return ell.neural.PoolingLayer(layer_parameters, pooling_parameters, ell.neural.PoolingType.mean)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the pooling layer
pooling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the PoolingLayerNode to the model
ell_node = builder.AddPoolingLayerNode(model, input_port_elements, pooling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertBatchNormalization(ConvertBase):
"""
Converter for BatchNormalization
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["mean", "variance"]
self.required_attributes = []
self.epsilon = 1e-5
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
mean_vector = self.get_ell_vector(
self.importer_node.weights["mean"][0], conversion_parameters)
variance_vector = self.get_ell_vector(
self.importer_node.weights["variance"][0], conversion_parameters)
return ell.neural.BatchNormalizationLayer(
layer_parameters,
mean_vector, variance_vector, self.epsilon,
ell.neural.EpsilonSummand.variance)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the batch normalization layer
batch_normalization_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the BatchNormalizationLayerNode to the model
ell_node = builder.AddBatchNormalizationLayerNode(model, input_port_elements, batch_normalization_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertBias(ConvertBase):
"""
Converter for Bias
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["bias"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
bias = self.get_ell_vector(
self.importer_node.weights["bias"][0], conversion_parameters)
return ell.neural.BiasLayer(layer_parameters, bias)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the bias layer
bias_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the BiasLayerNode to the model
ell_node = builder.AddBiasLayerNode(model, input_port_elements, bias_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class OptionalConvertBias(ConvertBias):
"""
Optional converter for Bias
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.optional = True
class ConvertBinaryConvolution(ConvertBase):
"""
Converter for BinaryConvolution
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["weights"]
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
layer_parameters.inputPaddingParameters
weights = self.get_ell_tensor(
self.importer_node.weights["weights"][0], conversion_parameters)
attributes = self.importer_node.attributes
convolutional_parameters = ell.neural.BinaryConvolutionalParameters(
attributes["size"], attributes["stride"], ell.neural.BinaryConvolutionMethod.bitwise,
ell.neural.BinaryWeightsScale.none)
return ell.neural.BinaryConvolutionalLayer(layer_parameters, convolutional_parameters, weights)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the convolutional layer
convolutional_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# If we require padding but the input doesn't provide it
# (which can happen when a single node output is used as input to
# multiple nodes), ensure correct padding with a ReorderDataNode.
owning_node_for_input = lookup_table.get_originating_importer_node_for_output(self.importer_node.inputs[0])
padding = self.importer_node.padding["size"]
if (owning_node_for_input.output_padding["size"] != padding):
input_node = lookup_table.get_ell_node_from_importer_node_id(owning_node_for_input.id)
port_elements = lookup_table.get_output_port_elements_for_node(input_node)
shape_entry = owning_node_for_input.output_shapes[0]
input_memory_layout = memory_shapes.get_ell_port_memory_layout(
shape_entry[0], shape_entry[1], owning_node_for_input.output_padding["size"])
output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding)
# Create the reorder node
reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout,
[0, 1, 2])
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
input_port_elements = lookup_table.get_output_port_elements_for_node(reorder_node)
# Add the ConvolutionalLayerNode to the model
ell_node = builder.AddBinaryConvolutionalLayerNode(model, input_port_elements, convolutional_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertConvolution(ConvertBase):
"""
Converter for Convolution
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["weights"]
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
weights = self.get_ell_tensor(
self.importer_node.weights["weights"][0], conversion_parameters)
attributes = self.importer_node.attributes
convolutional_parameters = ell.neural.ConvolutionalParameters(
attributes["size"], attributes["stride"], 0, 1)
return ell.neural.ConvolutionalLayer(layer_parameters,
convolutional_parameters, weights)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the convolutional layer
convolutional_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# If we require padding but the input doesn't provide it
# (which can happen when a single node output is used as input to
# multiple nodes), ensure correct padding with a ReorderDataNode.
owning_node_for_input = lookup_table.get_originating_importer_node_for_output(self.importer_node.inputs[0])
padding = self.importer_node.padding["size"]
if (owning_node_for_input.output_padding["size"] != padding):
input_node = lookup_table.get_ell_node_from_importer_node_id(owning_node_for_input.id)
port_elements = lookup_table.get_output_port_elements_for_node(input_node)
shape_entry = owning_node_for_input.output_shapes[0]
input_memory_layout = memory_shapes.get_ell_port_memory_layout(
shape_entry[0], shape_entry[1], owning_node_for_input.output_padding["size"])
output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding)
# Create the reorder node
reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout,
[0, 1, 2])
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
input_port_elements = lookup_table.get_output_port_elements_for_node(reorder_node)
# Add the ConvolutionalLayerNode to the model
ell_node = builder.AddConvolutionalLayerNode(model, input_port_elements, convolutional_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertFullyConnected(ConvertBase):
"""
Converter for FullyConnected
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["weights"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
weights = self.get_ell_tensor(
self.importer_node.weights["weights"][0], conversion_parameters)
return ell.neural.FullyConnectedLayer(
layer_parameters, weights)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the fully connected layer
fully_connected_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the FullyConnectedLayerNode to the model
ell_node = builder.AddFullyConnectedLayerNode(model, input_port_elements, fully_connected_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertElementTimes(ConvertBase):
"""
Converter for Element Times, which is equivalent to Scaling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["scale"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
scale = self.get_ell_vector(
self.importer_node.weights["scale"][0], conversion_parameters)
return ell.neural.ScalingLayer(layer_parameters, scale)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the scaling layer
scaling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ScalingLayerNode to the model
ell_node = builder.AddScalingLayerNode(model, input_port_elements, scaling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertGRU(ConvertBase):
"""
Converter for Gated Recurrent Unit (GRU). If the GRU node has 2 inputs,
the second input is used as the trigger, otherwise a constant node is inserted as the
trigger.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["input_weights", "hidden_weights", "input_bias", "hidden_bias"]
self.required_attributes = ["hidden_size", "activation", "recurrent_activation"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for GRU. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# create constant nodes for the weights
input_weights = self.get_ell_tensor(
self.importer_node.weights["input_weights"][0], conversion_parameters)
hidden_weights = self.get_ell_tensor(
self.importer_node.weights["hidden_weights"][0], conversion_parameters)
input_bias = self.get_ell_tensor(
self.importer_node.weights["input_bias"][0], conversion_parameters)
hidden_bias = self.get_ell_tensor(
self.importer_node.weights["hidden_bias"][0], conversion_parameters)
input_weights_node = builder.AddConstantNode(model, input_weights.data, ell.nodes.PortType.smallReal)
hidden_weights_node = builder.AddConstantNode(model, hidden_weights.data, ell.nodes.PortType.smallReal)
input_bias_node = builder.AddConstantNode(model, input_bias.data, ell.nodes.PortType.smallReal)
hidden_bias = builder.AddConstantNode(model, hidden_bias.data, ell.nodes.PortType.smallReal)
hidden_size = self.importer_node.attributes["hidden_size"]
activation = self.importer_node.attributes["activation"]
recurrentActivation = self.importer_node.attributes["recurrent_activation"]
# Get the port elements for the reset trigger
if len(self.importer_node.inputs) > 1 and self.importer_node.inputs[1] != '':
reset_port_elements, reset_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
else:
# Create a constant node as the trigger. The trigger fires on value change,
# so will never fire in this case.
reset_node = builder.AddConstantNode(model, [0], ell.nodes.PortType.integer)
reset_port_elements = ell.nodes.PortElements(reset_node.GetOutputPort("output"))
# Add the GRUNode to the model
ell_node = builder.AddGRUNode(
model, input_port_elements, reset_port_elements, hidden_size,
ell.nodes.PortElements(input_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(input_bias_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_bias.GetOutputPort("output")),
activation, recurrentActivation)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertFastGRNN(ConvertBase):
"""
Converter for Fast Gated Recurrent Neural Network (FastGRNN). If the FastGRNN node has 2 inputs,
the second input is used as the trigger, otherwise a constant node is inserted as the
trigger.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ['W1', 'W2', 'U1', 'U2', 'bias_gate', 'bias_update', 'zeta', 'nu']
self.required_attributes = ["hidden_size", "gate_nonlinearity", "update_nonlinearity", "wRank", "uRank"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for FastGRNN. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# create constant nodes for the weights
W1 = self.get_ell_tensor(
self.importer_node.weights["W1"][0], conversion_parameters)
W2 = self.get_ell_tensor(
self.importer_node.weights["W2"][0], conversion_parameters)
U1 = self.get_ell_tensor(
self.importer_node.weights["U1"][0], conversion_parameters)
U2 = self.get_ell_tensor(
self.importer_node.weights["U2"][0], conversion_parameters)
bias_gate = self.get_ell_tensor(
self.importer_node.weights["bias_gate"][0], conversion_parameters)
bias_update = self.get_ell_tensor(
self.importer_node.weights["bias_update"][0], conversion_parameters)
zeta = self.get_ell_tensor(
self.importer_node.weights["zeta"][0], conversion_parameters)
nu = self.get_ell_tensor(
self.importer_node.weights["nu"][0], conversion_parameters)
W1_node = builder.AddConstantNode(model, W1.data, ell.nodes.PortType.smallReal)
W2_node = builder.AddConstantNode(model, W2.data, ell.nodes.PortType.smallReal)
U1_node = builder.AddConstantNode(model, U1.data, ell.nodes.PortType.smallReal)
U2_node = builder.AddConstantNode(model, U2.data, ell.nodes.PortType.smallReal)
bias_gate_node = builder.AddConstantNode(model, bias_gate.data, ell.nodes.PortType.smallReal)
bias_update_node = builder.AddConstantNode(model, bias_update.data, ell.nodes.PortType.smallReal)
zeta_node = builder.AddConstantNode(model, zeta.data, ell.nodes.PortType.smallReal)
nu_node = builder.AddConstantNode(model, nu.data, ell.nodes.PortType.smallReal)
hidden_size = self.importer_node.attributes["hidden_size"]
wRank = self.importer_node.attributes["wRank"]
uRank = self.importer_node.attributes["uRank"]
gate_nonlinearity = self.importer_node.attributes["gate_nonlinearity"]
update_nonlinearity = self.importer_node.attributes["update_nonlinearity"]
# Get the port elements for the reset trigger
if len(self.importer_node.inputs) > 1 and self.importer_node.inputs[1] != '':
reset_port_elements, reset_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
else:
# Create a constant node as the trigger. The trigger fires on value change,
# so will never fire in this case.
reset_node = builder.AddConstantNode(model, [0], ell.nodes.PortType.integer)
reset_port_elements = ell.nodes.PortElements(reset_node.GetOutputPort("output"))
# Add the GRUNode to the model
ell_node = builder.AddFastGRNNNode(
model, input_port_elements, reset_port_elements, hidden_size, wRank, uRank,
ell.nodes.PortElements(W1_node.GetOutputPort("output")),
ell.nodes.PortElements(W2_node.GetOutputPort("output")),
ell.nodes.PortElements(U1_node.GetOutputPort("output")),
ell.nodes.PortElements(U2_node.GetOutputPort("output")),
ell.nodes.PortElements(bias_gate_node.GetOutputPort("output")),
ell.nodes.PortElements(bias_update_node.GetOutputPort("output")),
ell.nodes.PortElements(zeta_node.GetOutputPort("output")),
ell.nodes.PortElements(nu_node.GetOutputPort("output")),
gate_nonlinearity, update_nonlinearity)
# Register the mappings
lookup_table.add_imported_ell_node(self.importer_node, W1_node)
lookup_table.add_imported_ell_node(self.importer_node, W2_node)
lookup_table.add_imported_ell_node(self.importer_node, U1_node)
lookup_table.add_imported_ell_node(self.importer_node, U2_node)
lookup_table.add_imported_ell_node(self.importer_node, bias_gate_node)
lookup_table.add_imported_ell_node(self.importer_node, bias_update_node)
lookup_table.add_imported_ell_node(self.importer_node, zeta_node)
lookup_table.add_imported_ell_node(self.importer_node, nu_node)
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertInput(ConvertBase):
"""
Converter for Input
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
# Skip processing the input. It is implicit when using
# ELL Layers
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to return the appropriate ELL node
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
step_interval_msec = conversion_parameters["step_interval_msec"]
lag_threshold_msec = conversion_parameters["lag_threshold_msec"]
function_prefix = ""
# Add the InputNode to the model
shape_entry = self.importer_node.output_shapes[0]
ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], 0)
original_input_node = None
if step_interval_msec is not None:
# in the steppable case the input is a clock ticks (which is a double)
input_node = builder.AddInputNode(
model, ell.model.PortMemoryLayout([1]), ell.nodes.PortType.real)
if lag_threshold_msec is None:
lag_threshold_msec = 2 * step_interval_msec
clock_node = builder.AddClockNode(
model, ell.nodes.PortElements(input_node.GetOutputPort("output")),
float(step_interval_msec), float(lag_threshold_msec),
"{}LagNotification".format(function_prefix))
source_node = builder.AddSourceNode(
model, ell.nodes.PortElements(clock_node.GetOutputPort("output")),
ell.nodes.PortType.smallReal, ell.model.PortMemoryLayout(ell_shape),
"{}InputCallback".format(function_prefix))
original_input_node = input_node
input_node = source_node
else:
input_node = builder.AddInputNode(
model, ell.model.PortMemoryLayout(ell_shape), ell.nodes.PortType.smallReal)
original_input_node = input_node
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, input_node)
if step_interval_msec is not None:
lookup_table.add_imported_ell_node(self.importer_node, clock_node)
lookup_table.add_imported_ell_node(self.importer_node, source_node)
# Special case: If output requires padding e.g. Input is connected to a
# Convolutional node that requires padding, add a ReorderData node to
# ensure proper memory layout. This can be skipped once Input supports
# different memory layouts of the output.
padding = self.importer_node.output_padding["size"]
if padding > 0:
# Create the reorder node
port_elements = lookup_table.get_output_port_elements_for_node(input_node)
input_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], 0)
output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding)
reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout,
[0, 1, 2])
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
lookup_table.add_ell_input(original_input_node)
class ConvertTypeCast(ConvertBase):
"""
Converter for explicit TypeCast
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["cast_to"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
raise Exception("No corresponding ELL layer for TypeCast. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
castTo = self.importer_node.attributes["cast_to"]
# Add the TypeCastNode to the model
ell_node = builder.AddTypeCastNode(model, input_port_elements, castTo)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertLeakyReLU(ConvertActivation):
"""
Converter for LeakyReLU, which is equivalent to
Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
self.importer_node.attributes["activation"] = ell.neural.ActivationType.leaky
class ConvertLSTM(ConvertBase):
"""
Converter for Long Short-Term Memory (LSTM) unit. If the LSTM node has 2 inputs,
the second input is used as the trigger, otherwise a constant node is inserted as the
trigger.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["input_weights", "hidden_weights", "input_bias", "hidden_bias"]
self.required_attributes = ["hidden_size", "activation", "recurrent_activation"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for LSTM. Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# create constant nodes for the weights
input_weights = self.get_ell_tensor(
self.importer_node.weights["input_weights"][0], conversion_parameters)
hidden_weights = self.get_ell_tensor(
self.importer_node.weights["hidden_weights"][0], conversion_parameters)
input_bias = self.get_ell_tensor(
self.importer_node.weights["input_bias"][0], conversion_parameters)
hidden_bias = self.get_ell_tensor(
self.importer_node.weights["hidden_bias"][0], conversion_parameters)
input_weights_node = builder.AddConstantNode(model, input_weights.data, ell.nodes.PortType.smallReal)
hidden_weights_node = builder.AddConstantNode(model, hidden_weights.data, ell.nodes.PortType.smallReal)
input_bias_node = builder.AddConstantNode(model, input_bias.data, ell.nodes.PortType.smallReal)
hidden_bias = builder.AddConstantNode(model, hidden_bias.data, ell.nodes.PortType.smallReal)
hidden_size = self.importer_node.attributes["hidden_size"]
activation = self.importer_node.attributes["activation"]
recurrentActivation = self.importer_node.attributes["recurrent_activation"]
# Get the port elements for the reset trigger
if len(self.importer_node.inputs) > 1 and self.importer_node.inputs[1] != '':
reset_port_elements, reset_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
else:
# Create a constant node as the trigger. The trigger fires on value change,
# so will never fire in this case.
reset_node = builder.AddConstantNode(model, [0], ell.nodes.PortType.integer)
reset_port_elements = ell.nodes.PortElements(reset_node.GetOutputPort("output"))
# Add the LSTMNode to the model
ell_node = builder.AddLSTMNode(
model, input_port_elements, reset_port_elements, hidden_size,
ell.nodes.PortElements(input_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(input_bias_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_bias.GetOutputPort("output")),
activation, recurrentActivation)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertMaxPooling(ConvertBase):
"""
Converter for Max Pooling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["size", "stride"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
attributes = self.importer_node.attributes
pooling_parameters = ell.neural.PoolingParameters(
attributes["size"], attributes["stride"])
# Create the ELL pooling layer
return ell.neural.PoolingLayer(layer_parameters,
pooling_parameters, ell.neural.PoolingType.max)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the pooling layer
pooling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the PoolingLayerNode to the model
ell_node = builder.AddPoolingLayerNode(model, input_port_elements, pooling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertMinus(ConvertBase):
"""
Converter for Minus, which is equivalent to
a negative Bias
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["bias"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
bias = self.get_vector(
self.importer_node.weights["bias"][0], conversion_parameters)
# Minus is a negative bias in ELL. Negate the bias values so we
# can use an additive bias layer.
bias = -1.0 * bias
return ell.neural.BiasLayer(layer_parameters, bias)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the bias layer
bias_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the BiasLayerNode to the model
ell_node = builder.AddBiasLayerNode(model, input_port_elements, bias_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertPassthrough(ConvertBase):
"""
Converter for Passthrough, which has information on inputs and outputs but
doesn't produce typing.Any ELL nodes/layers.
It's sole purpose is to preserve connections between nodes during the conversion
process.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return nothing
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
lookup_table = conversion_parameters["lookup_table"]
# Set owner of this output to be the Passthrough node's input node
if len(self.importer_node.inputs) == 0:
raise Exception("### Passthrough node {}({}) has no inputs".format(self.importer_node.operation_type,
self.importer_node.id))
input_owner = lookup_table.get_owning_node_for_output(self.importer_node.inputs[0])
lookup_table.add_imported_ell_node(self.importer_node, input_owner, set_group_id=False)
class ConvertBinaryOperation(ConvertBase):
"""
Converter for Binary Operations
"""
def __init__(self, node: ImporterNode, op: ell.nodes.BinaryOperationType):
super().__init__(node)
self.operator = op
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def add_reinterpret_node(self, builder, model, input_elements, memory_layout):
node = builder.AddReinterpretLayoutNode(model, input_elements, memory_layout)
return (ell.nodes.PortElements(node.GetOutputPort("output")), node)
def reinterpret_input(self, builder, model, input_elements, memory_layout):
input_layout = input_elements.GetMemoryLayout()
if not input_layout == memory_layout:
if np.product(list(input_layout.size)) != np.product(list(memory_layout.size)):
raise Exception("Binary operation {} does not yet support broadcasting".format(self.operator))
return self.add_reinterpret_node(builder, model, input_elements, memory_layout)
return (input_elements, None)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Get the port elements and memory layout from the two inputs.
# Since the 2 inputs and output could have different padding,
# we need both the port elements and the memory layouts for each.
input1_port_elements, input1_port_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 0)
input2_port_elements, input2_port_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, 1)
output_shape_tuple = self.importer_node.output_shapes[0]
output_port_memory_layout = memory_shapes.get_ell_port_memory_layout(
output_shape_tuple[0],
output_shape_tuple[1],
self.importer_node.output_padding["size"])
# see if the shapes match
input1_port_elements, _ = self.reinterpret_input(builder, model, input1_port_elements,
input1_port_memory_layout)
input2_port_elements, _ = self.reinterpret_input(builder, model, input2_port_elements,
input2_port_memory_layout)
# Add the BinaryOperationNode to the model.
ell_node = builder.AddBinaryOperationNode(
model,
input1_port_elements,
input2_port_elements,
self.operator)
output_elements = ell.nodes.PortElements(ell_node.GetOutputPort("output"))
output_port_elements, new_output_node = self.reinterpret_input(builder, model, output_elements,
output_port_memory_layout)
if new_output_node is not None:
ell_node = new_output_node
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertPlus(ConvertBinaryOperation):
"""
Converter for Plus
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.add)
class ConvertSubtract(ConvertBinaryOperation):
"""
Converter for Subtract which is subtracting one output from another.
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.subtract)
class ConvertCoordinatewiseMultiply(ConvertBinaryOperation):
"""
Converter for CoordinatewiseMultiply which is doing element-wise multiplication of two inputs.
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.multiply)
class ConvertCoordinatewiseDivide(ConvertBinaryOperation):
"""
Converter for CoordinatewiseDivide which is doing element-wise division of two inputs.
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.BinaryOperationType.divide)
class ConvertPooling(ConvertBase):
"""
Converter for Pooling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["size", "stride", "poolingType"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
class ConvertPReLU(ConvertBase):
"""
Converter for PReLU, which is equivalent to
Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["alpha"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
alpha = self.get_ell_tensor(
self.importer_node.weights["alpha"][0], conversion_parameters)
return ell.neural.PReLUActivationLayer(layer_parameters, alpha)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the activation layer
activation_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ActivationLayerNode to the model
ell_node = builder.AddActivationLayerNode(model, input_port_elements, activation_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertReLU(ConvertActivation):
"""
Converter for ReLU, which is equivalent to
Activation
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
self.importer_node.attributes["activation"] = ell.neural.ActivationType.relu
class ConvertRegion(ConvertBase):
"""
Converter for region detection layer
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["width", "height", "numBoxesPerCell", "numClasses", "numAnchors", "applySoftmax"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
attributes = self.importer_node.attributes
region_detection_parameters = ell.neural.RegionDetectionParameters(
attributes["width"],
attributes["height"],
attributes["numBoxesPerCell"],
attributes["numClasses"],
attributes["numAnchors"],
attributes["applySoftmax"]
)
return ell.neural.FullyConnectedLayer(
layer_parameters, region_detection_parameters)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the region detection layer
region_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the RegionDetectionLayerNode to the model
ell_node = builder.AddRegionDetectionLayerNode(model, input_port_elements, region_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertScaling(ConvertBase):
"""
Converter for Scaling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = ["scale"]
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
scale = self.get_ell_vector(
self.importer_node.weights["scale"][0], conversion_parameters)
return ell.neural.ScalingLayer(layer_parameters, scale)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the scaling layer
scaling_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the ScalingLayerNode to the model
ell_node = builder.AddScalingLayerNode(model, input_port_elements, scaling_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class OptionalConvertScaling(ConvertScaling):
"""
Optional converter for Scaling
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.optional = True
class ConvertSoftmax(ConvertBase):
"""
Converter for Softmax
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_atteamstributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
layer_parameters = self.get_layer_parameters(conversion_parameters)
return ell.neural.SoftmaxLayer(layer_parameters)
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
# Create the softmax layer
softmax_layer = self.convert(conversion_parameters)
# Get the port elements from the input
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the SoftmaxLayerNode to the model
ell_node = builder.AddSoftmaxLayerNode(model, input_port_elements, softmax_layer)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertUnaryOperation(ConvertBase):
"""
Converter for Unary Operators
"""
def __init__(self, node: ImporterNode, op: ell.nodes.UnaryOperationType):
super().__init__(node)
self.operator = op
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Add the UnaryOperationNode to the model.
ell_node = builder.AddUnaryOperationNode(model, input_port_elements, self.operator)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertSigmoid(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sigmoid)
class ConvertSign(ConvertUnaryOperation):
"""
Converter for Sign operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sign)
class ConvertHardSigmoid(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.hardSigmoid)
class ConvertTanh(ConvertUnaryOperation):
"""
Converter for tanh operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.tanh)
class ConvertHardTanh(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.hardTanh)
class ConvertAbs(ConvertUnaryOperation):
"""
Converter for Abs operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.abs)
class ConvertSqrt(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sqrt)
class ConvertSquare(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.square)
class ConvertSin(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.sin)
class ConvertCos(ConvertUnaryOperation):
"""
Converter for Sqrt operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.cos)
class ConvertExp(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.exp)
class ConvertLog(ConvertUnaryOperation):
"""
Converter for Sigmoid operation
"""
def __init__(self, node: ImporterNode):
super().__init__(node, ell.nodes.UnaryOperationType.log)
class ConvertSplice(ConvertBase):
"""
Converter for Splice, which for now is Output followed by
Reshape
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["dimension_to_stack"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
last_in_block = conversion_parameters["last_in_block"]
pre_order = [0, 1, 2]
post_order = [0, 1, 2]
if self.importer_node.attributes["dimension_to_stack"] == "channel":
# When output from nodes are concatenated together in the
# order (channel, row, column), they effectively stack in the
# channel dimension.
pre_order = [2, 0, 1]
elif self.importer_node.attributes["dimension_to_stack"] == "row":
# When output from nodes are concatenated together in the
# order (row, column, channel), they effectively stack in the
# row dimension.
pre_order = [0, 1, 2]
elif self.importer_node.attributes["dimension_to_stack"] == "column":
# When output from nodes are concatenated together in the
# order (column, row, channel), they effectively stack in the
# column dimension.
pre_order = [1, 0, 2]
else:
raise Exception("Splice does not yet support stacking along dimension {}, just row, column or channel"
.format(self.required_attributes["dimension_to_stack"]))
# NOTE: The ReorderDataNodes that are inserted can be removed by the
# optimizer if they're redundant
# Loop over all inputs and for each, insert a reorder node to
# put into specified order.
reorder_nodes = []
for input_index in range(len(self.importer_node.inputs)):
# Create the reorder node
input_node = lookup_table.get_owning_node_for_output(self.importer_node.inputs[input_index])
input_port_elements = lookup_table.get_output_port_elements_for_node(input_node)
# Take the active region of inputs
port_elements, input_port_memory_layout = lookup_table.get_port_elements_and_memory_layout_for_input(
self.importer_node, input_index)
reorder_node = builder.AddReorderDataNode(model, input_port_elements, input_port_memory_layout,
input_port_memory_layout, pre_order)
reorder_nodes.append(reorder_node)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
# Splice together the reorder nodes
output_shape, output_padding = self.get_output_parameters(last_in_block)
reordered_output_shape = ell.math.TensorShape(output_shape.channels, output_shape.rows, output_shape.columns)
input_port_elements_list = []
for ell_node in reorder_nodes:
portElements = lookup_table.get_output_port_elements_for_node(ell_node)
input_port_elements_list.append(portElements)
splice_node = builder.AddSpliceNode(model, ell.nodes.PortElementsList(input_port_elements_list))
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, splice_node)
# Insert a reorder node to to be in row, column, channel order with appropriate padding.
port_elements = lookup_table.get_output_port_elements_for_node(splice_node)
padding_size = output_padding.paddingSize
reorderedPortMemoryLayout = ell.model.PortMemoryLayout(
[reordered_output_shape.rows, reordered_output_shape.columns, reordered_output_shape.channels],
[reordered_output_shape.rows, reordered_output_shape.columns, reordered_output_shape.channels],
[0, 0, 0], pre_order)
outputPortMemoryLayout = ell.model.PortMemoryLayout(
[output_shape.rows, output_shape.columns, output_shape.channels],
[output_shape.rows - 2 * padding_size, output_shape.columns - 2 * padding_size, output_shape.channels],
[padding_size, padding_size, 0], post_order)
final_reorder_node = builder.AddReorderDataNode(model, port_elements, reorderedPortMemoryLayout,
outputPortMemoryLayout, post_order, 0)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, final_reorder_node)
class ConvertReshape(ConvertBase):
"""
Converter for Reshape
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = []
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
lookup_table = conversion_parameters["lookup_table"]
# Quick workaround for unnecessary reshapes: Set owner of this output
# to be the reshape's input node
input_owner = lookup_table.get_owning_node_for_output(self.importer_node.inputs[0])
lookup_table.add_imported_ell_node(self.importer_node, input_owner, set_group_id=False)
class ConvertReorder(ConvertBase):
"""
Converter for Reshape
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["order"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
order = list(np.array(self.importer_node.attributes["order"]).astype(np.int))
# Create the reorder node
reorder_node = builder.AddReorderDataNode(model, input_port_elements, order)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, reorder_node)
input_port_elements = lookup_table.get_output_port_elements_for_node(reorder_node)
class ConvertConstant(ConvertBase):
"""
Converter for Constant nodes
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ['tensor']
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL node
"""
return None
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
tensor = self.importer_node.attributes["tensor"]
port_type = ell.nodes.PortType.real
if tensor.dtype == np.float32:
port_type = ell.nodes.PortType.smallReal
elif tensor.dtype == np.int:
port_type = ell.nodes.PortType.integer
elif tensor.dtype == np.int64:
port_type = ell.nodes.PortType.bigInt
elif tensor.dtype == np.bool:
port_type = ell.nodes.PortType.boolean
ell_node = builder.AddConstantNode(model, tensor.ravel().astype(np.float64), port_type)
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
class ConvertVAD(ConvertBase):
"""
Converter for Voice Activity Detector.
"""
def __init__(self, node: ImporterNode):
super().__init__(node)
self.required_weights = []
self.required_attributes = ["sampleRate", "frameDuration", "tauUp", "tauDown", "largeInput", "gainAtt",
"thresholdUp", "thresholdDown", "levelThreshold"]
def convert(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Return the appropriate ELL layer
"""
raise Exception("No corresponding ELL layer for Voice Actvitity Detector (VAD). Use node instead.")
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]):
"""
Derived classes override to convert the importer node to appropriate ELL node(s)
and insert into the model
"""
model = conversion_parameters["model"]
builder = conversion_parameters["builder"]
lookup_table = conversion_parameters["lookup_table"]
sample_rate = self.importer_node.attributes["sampleRate"]
frame_duration = self.importer_node.attributes["frameDuration"]
tau_up = self.importer_node.attributes["tauUp"]
tau_down = self.importer_node.attributes["tauDown"]
large_input = self.importer_node.attributes["largeInput"]
gain_att = self.importer_node.attributes["gainAtt"]
threshold_up = self.importer_node.attributes["thresholdUp"]
threshold_down = self.importer_node.attributes["thresholdDown"]
level_threshold = self.importer_node.attributes["levelThreshold"]
input_port_elements = lookup_table.get_port_elements_for_input(self.importer_node)
# Create the VAD node
ell_node = builder.AddVoiceActivityDetectorNode(
model, input_port_elements,
sample_rate, frame_duration, tau_up, tau_down, large_input, gain_att,
threshold_up, threshold_down, level_threshold)
# Register the mapping
lookup_table.add_imported_ell_node(self.importer_node, ell_node)
| en | 0.74958 | #################################################################################################### # # Project: Embedded Learning Library (ELL) # File: converters.py (importers) # Authors: <NAME> # # Requires: Python 3.x # #################################################################################################### Common class for intermediate representation of nodes in the importer. The core importer engine can convert ImporterNodes into ELL Nodes and insert them into an ELL Model. id: unique identifier for this node operation_type: string name of the operation type to be imported. This will get mapped to an ELL operation via the operation_map. inputs: array of strings representing where the input comes from. The string is the 'id' of another ImporterNode. outputs: array of strings representing the output tensors. The string is the 'id' of another ImporterNode. weights: dictionary of weight parameter labels to weight names e.g. a convolutional node may have {'weights': 'w123', 'bias': 'b832'}. Dictionary keys are specific to the ELL operation. The value is the id of a tensor in ImporterModel.tensors. attributes: dictionary of attribute names and values e.g. a convolutional node may have {'size': 3, 'step': 1, 'pad': 0 }. Dictionary keys are specific to the ELL operation. padding: dictionary of padding size and padding scheme e.g. {"size": 0, "scheme": ell.neural.PaddingScheme.zeros} [chris] why isn't this just a type of attribute? input_shapes: array of tuples representing input shapes and ordering e.g. ((3,64,64), "channel_row_column"). The ImporterEngine will take care of reshaping everything to match the order required by ELL. output_shapes: array of tuples representing output shapes and ordering e.g. ((32,8,8), "channel_row_column"). metadata: optional additional metadata to store in the ell_nodes. A helper class that stores the typing.Mappings between: - tensor id to a tuple containing (tensor value, tensor order) - ELL id to ELL node. These get created during the conversion process. - importer node id to ELL ids. These get created during the conversion process. Note that one ImporterNode could get converted to multiple ELL nodes. In addition, there are convenience methods for accessing the tenspors in appropriate ELL order. # Stores mapping of ELL Node id string to ELL Node # Stores mapping of importer node id string to ELL Node id # Stores mapping of ell node id string to Importer Node # Stores mapping of output id string to owning ELL Node id # Stores mapping of tensor ids to numpy tensor instance # Stores input nodes. When creating an ELL map from an ELL model, # map inputs must be identified. # Stores output nodes When creating an ELL map from an ELL model, # map inputs must be identified. Adds an ImporterNode and associated ELL node to the lookup. # Add to mapping of ELL Node id to ELL Node # Add ImporterNode id to ELL Node id mapping # Add output id to owner mapping. # Set the node's metadata to show where this node came from # Also use this as the node's friendly name (by default) # concatenate any importer_node metadata provided by importer # Add owning id mapping Adds an ELL node to the lookup. Return the id of the last ELL node associated with this importer node. Return the last ELL node associated with this importer node. Returns a numpy array in ELL order Returns a single dimensional numpy array containing the tensor weights. If the tensor is actually a scalar, expand it to be a vector of length 'size'. # Workaround: For some reason, np.full is not returning a type that SWIG can parse. # So just manually walk the array setting the scalar Returns a single dimensional numpy array containing the tensor weights. Returns a tuple containing (shape, order) for the tensor. Returns an ell.nodes.PortElements for the corresponding ImporterNode. # First check whether this importer node has any corresponding # ELL nodes yet: # - If it does, grab the output of the last ELL node which # is designated as the input to this node. # - If it doesn't, grab the output of the last ELL node which # the Importer's input is tied to. Returns an (ell.nodes.PortElements, ell.nodes.PortMemoryLayout) for the corresponding input of the ImporterNode. Returns an ell.nodes.PortElements for the corresponding ELL node's output port that corresponds to 'output_label'. Gets the ELL node that owns the output identified by output_id. Gets the originating ImporterNode for the output identified by output_id. Sets the mapping for the ELL node that owns the output identified by output_id. Base class for converting an ImporterNode into an ELL Node Derived classes should initialize their required_inputs, required_weights and required_attributes Verify that the node contains the necessary inputs, weights and attributes to convert. Nodes that cannot be converted due to missing weights or attributes are deemed optional and are skipped. See comments in operation_map for examples. Return the input shape and padding parameters as a tuple. first_in_block - indicates whether this will be the first ell node in a block. If it is, it will have its padding requirements set differently. input_index - indicates the index of the input shape requested. Return the output shape and padding parameters as a tuple. last_in_block - indicates whether this will be the last ell node in a block. If it is, it will have its output padding set differently. Return the ELL layer parameters for this node. Return the shape in ELL canonical order Returns a weight tensor as an ELL tensor Returns a weight tensor as a 1 dimensional numpy array. If the original tensor is a scalar, it will be expanded to a vector of size equal to the number of output channels. Returns a weight tensor as an ELL vector. If the original tensor is a scalar, it will be expanded to a vector of size equal to the number of output channels. Derived classes override to return the appropriate ELL node Converter for Activation Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the activation layer # Get the port elements from the input # Add the ActivationLayerNode to the model # Register the mapping Optional converter for Activation Converter for Average Pooling Return the appropriate ELL node # Create the ELL pooling layer Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the pooling layer # Get the port elements from the input # Add the PoolingLayerNode to the model # Register the mapping Converter for BatchNormalization Derived classes override to return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the batch normalization layer # Get the port elements from the input # Add the BatchNormalizationLayerNode to the model # Register the mapping Converter for Bias Derived classes override to return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the bias layer # Get the port elements from the input # Add the BiasLayerNode to the model # Register the mapping Optional converter for Bias Converter for BinaryConvolution Derived classes override to return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the convolutional layer # Get the port elements from the input # If we require padding but the input doesn't provide it # (which can happen when a single node output is used as input to # multiple nodes), ensure correct padding with a ReorderDataNode. # Create the reorder node # Register the mapping # Add the ConvolutionalLayerNode to the model # Register the mapping Converter for Convolution Derived classes override to return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the convolutional layer # Get the port elements from the input # If we require padding but the input doesn't provide it # (which can happen when a single node output is used as input to # multiple nodes), ensure correct padding with a ReorderDataNode. # Create the reorder node # Register the mapping # Add the ConvolutionalLayerNode to the model # Register the mapping Converter for FullyConnected Derived classes override to return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the fully connected layer # Get the port elements from the input # Add the FullyConnectedLayerNode to the model # Register the mapping Converter for Element Times, which is equivalent to Scaling Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the scaling layer # Get the port elements from the input # Add the ScalingLayerNode to the model # Register the mapping Converter for Gated Recurrent Unit (GRU). If the GRU node has 2 inputs, the second input is used as the trigger, otherwise a constant node is inserted as the trigger. Return the appropriate ELL layer Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Get the port elements from the input # create constant nodes for the weights # Get the port elements for the reset trigger # Create a constant node as the trigger. The trigger fires on value change, # so will never fire in this case. # Add the GRUNode to the model # Register the mapping Converter for Fast Gated Recurrent Neural Network (FastGRNN). If the FastGRNN node has 2 inputs, the second input is used as the trigger, otherwise a constant node is inserted as the trigger. Return the appropriate ELL layer Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Get the port elements from the input # create constant nodes for the weights # Get the port elements for the reset trigger # Create a constant node as the trigger. The trigger fires on value change, # so will never fire in this case. # Add the GRUNode to the model # Register the mappings Converter for Input Derived classes override to return the appropriate ELL node # Skip processing the input. It is implicit when using # ELL Layers Derived classes override to return the appropriate ELL node # Add the InputNode to the model # in the steppable case the input is a clock ticks (which is a double) # Register the mapping # Special case: If output requires padding e.g. Input is connected to a # Convolutional node that requires padding, add a ReorderData node to # ensure proper memory layout. This can be skipped once Input supports # different memory layouts of the output. # Create the reorder node # Register the mapping Converter for explicit TypeCast Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Get the port elements from the input # Add the TypeCastNode to the model # Register the mapping Converter for LeakyReLU, which is equivalent to Activation Converter for Long Short-Term Memory (LSTM) unit. If the LSTM node has 2 inputs, the second input is used as the trigger, otherwise a constant node is inserted as the trigger. Return the appropriate ELL layer Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Get the port elements from the input # create constant nodes for the weights # Get the port elements for the reset trigger # Create a constant node as the trigger. The trigger fires on value change, # so will never fire in this case. # Add the LSTMNode to the model # Register the mapping Converter for Max Pooling Return the appropriate ELL node # Create the ELL pooling layer Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the pooling layer # Get the port elements from the input # Add the PoolingLayerNode to the model # Register the mapping Converter for Minus, which is equivalent to a negative Bias Return the appropriate ELL node # Minus is a negative bias in ELL. Negate the bias values so we # can use an additive bias layer. Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the bias layer # Get the port elements from the input # Add the BiasLayerNode to the model # Register the mapping Converter for Passthrough, which has information on inputs and outputs but doesn't produce typing.Any ELL nodes/layers. It's sole purpose is to preserve connections between nodes during the conversion process. Return nothing Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Set owner of this output to be the Passthrough node's input node ## Passthrough node {}({}) has no inputs".format(self.importer_node.operation_type, Converter for Binary Operations Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Get the port elements and memory layout from the two inputs. # Since the 2 inputs and output could have different padding, # we need both the port elements and the memory layouts for each. # see if the shapes match # Add the BinaryOperationNode to the model. # Register the mapping Converter for Plus Converter for Subtract which is subtracting one output from another. Converter for CoordinatewiseMultiply which is doing element-wise multiplication of two inputs. Converter for CoordinatewiseDivide which is doing element-wise division of two inputs. Converter for Pooling Return the appropriate ELL node Converter for PReLU, which is equivalent to Activation Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the activation layer # Get the port elements from the input # Add the ActivationLayerNode to the model # Register the mapping Converter for ReLU, which is equivalent to Activation Converter for region detection layer Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the region detection layer # Get the port elements from the input # Add the RegionDetectionLayerNode to the model # Register the mapping Converter for Scaling Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the scaling layer # Get the port elements from the input # Add the ScalingLayerNode to the model # Register the mapping Optional converter for Scaling Converter for Softmax Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the softmax layer # Get the port elements from the input # Add the SoftmaxLayerNode to the model # Register the mapping Converter for Unary Operators Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Add the UnaryOperationNode to the model. # Register the mapping Converter for Sigmoid operation Converter for Sign operation Converter for Sigmoid operation Converter for tanh operation Converter for Sigmoid operation Converter for Abs operation Converter for Sqrt operation Converter for Sqrt operation Converter for Sqrt operation Converter for Sqrt operation Converter for Sigmoid operation Converter for Sigmoid operation Converter for Splice, which for now is Output followed by Reshape Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # When output from nodes are concatenated together in the # order (channel, row, column), they effectively stack in the # channel dimension. # When output from nodes are concatenated together in the # order (row, column, channel), they effectively stack in the # row dimension. # When output from nodes are concatenated together in the # order (column, row, channel), they effectively stack in the # column dimension. # NOTE: The ReorderDataNodes that are inserted can be removed by the # optimizer if they're redundant # Loop over all inputs and for each, insert a reorder node to # put into specified order. # Create the reorder node # Take the active region of inputs # Register the mapping # Splice together the reorder nodes # Register the mapping # Insert a reorder node to to be in row, column, channel order with appropriate padding. # Register the mapping Converter for Reshape Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Quick workaround for unnecessary reshapes: Set owner of this output # to be the reshape's input node Converter for Reshape Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the reorder node # Register the mapping Converter for Constant nodes Return the appropriate ELL node Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model Converter for Voice Activity Detector. Return the appropriate ELL layer Derived classes override to convert the importer node to appropriate ELL node(s) and insert into the model # Create the VAD node # Register the mapping | 2.528478 | 3 |
malaya_speech/model/__init__.py | techthiyanes/malaya-speech | 0 | 6625185 | <gh_stars>0
# Malaya-Speech, Speech-Toolkit library for bahasa Malaysia
#
# Copyright (C) 2019 Malaya Project
# Licensed under the MIT License
# Author: huseinzol05 <<EMAIL>>
# URL: <https://malaya-speech.readthedocs.io/>
# For license information, see https://github.com/huseinzol05/malaya-speech/blob/master/LICENSE
| # Malaya-Speech, Speech-Toolkit library for bahasa Malaysia
#
# Copyright (C) 2019 Malaya Project
# Licensed under the MIT License
# Author: huseinzol05 <<EMAIL>>
# URL: <https://malaya-speech.readthedocs.io/>
# For license information, see https://github.com/huseinzol05/malaya-speech/blob/master/LICENSE | en | 0.463462 | # Malaya-Speech, Speech-Toolkit library for bahasa Malaysia # # Copyright (C) 2019 Malaya Project # Licensed under the MIT License # Author: huseinzol05 <<EMAIL>> # URL: <https://malaya-speech.readthedocs.io/> # For license information, see https://github.com/huseinzol05/malaya-speech/blob/master/LICENSE | 0.513157 | 1 |
duoquest/query.py | umich-dbgroup/duoquest | 4 | 6625186 | from numbers import Number
from .proto.duoquest_pb2 import *
from .schema import JoinEdge
from .external.process_sql import AGG_OPS, WHERE_OPS
def to_str_tribool(proto_tribool):
if proto_tribool == UNKNOWN:
return None
elif proto_tribool == TRUE:
return True
else:
return False
def to_proto_tribool(boolval):
if boolval is None:
return UNKNOWN
elif boolval:
return TRUE
else:
return FALSE
def to_proto_set_op(set_op):
if set_op == 'none':
return NO_SET_OP
elif set_op == 'intersect':
return INTERSECT
elif set_op == 'except':
return EXCEPT
elif set_op == 'union':
return UNION
else:
raise Exception('Unknown set_op: {}'.format(set_op))
def to_proto_agg(agg):
if agg == 'none':
return NO_AGG
elif agg == 'max':
return MAX
elif agg == 'min':
return MIN
elif agg == 'count':
return COUNT
elif agg == 'sum':
return SUM
elif agg == 'avg':
return AVG
else:
raise Exception('Unrecognized agg: {}'.format(agg))
def to_str_agg(proto_agg):
if proto_agg == MAX:
return 'max'
elif proto_agg == MIN:
return 'min'
elif proto_agg == COUNT:
return 'count'
elif proto_agg == SUM:
return 'sum'
elif proto_agg == AVG:
return 'avg'
else:
raise Exception('Unrecognized agg: {}'.format(proto_agg))
def to_proto_logical_op(logical_op):
if logical_op == 'and':
return AND
elif logical_op == 'or':
return OR
else:
raise Exception('Unknown logical_op: {}'.format(logical_op))
def to_str_logical_op(proto_logical_op):
if proto_logical_op == AND:
return 'and'
elif proto_logical_op == OR:
return 'or'
else:
raise Exception('Unknown logical_op: {}'.format(proto_logical_op))
def to_proto_old_op(not_op, op):
if op == 'between':
return BETWEEN
elif op == '=':
return EQUALS
elif op == '>':
return GT
elif op == '<':
return LT
elif op == '>=':
return GEQ
elif op == '<=':
return LEQ
elif op == '!=':
return NEQ
elif op == 'in' and not not_op:
return IN
elif op == 'in' and not_op:
return NOT_IN
elif op == 'like':
return LIKE
else:
raise Exception('Unrecognized op: {}'.format(op))
def to_proto_op(op):
if op == '=':
return EQUALS
elif op == '>':
return GT
elif op == '<':
return LT
elif op == '>=':
return GEQ
elif op == '<=':
return LEQ
elif op == '!=':
return NEQ
elif op == 'like':
return LIKE
elif op == 'in':
return IN
elif op == 'not in':
return NOT_IN
elif op == 'between':
return BETWEEN
else:
raise Exception('Unrecognized op: {}'.format(op))
def to_str_op(proto_op):
if proto_op == EQUALS:
return '='
elif proto_op == GT:
return '>'
elif proto_op == LT:
return '<'
elif proto_op == GEQ:
return '>='
elif proto_op == LEQ:
return '<='
elif proto_op == NEQ:
return '!='
elif proto_op == LIKE:
return 'like'
elif proto_op == IN:
return 'in'
elif proto_op == NOT_IN:
return 'not in'
elif proto_op == BETWEEN:
return 'between'
else:
raise Exception('Unrecognized op: {}'.format(proto_op))
def to_proto_dir(dir):
if dir == 'desc':
return DESC
elif dir == 'asc':
return ASC
else:
raise Exception('Unrecognized dir: {}'.format(dir))
def to_str_dir(proto_dir):
if proto_dir == DESC:
return 'desc'
elif proto_dir == ASC:
return 'asc'
else:
raise Exception('Unrecognized dir: {}'.format(proto_dir))
def gen_alias(alias_idx, alias_prefix):
if alias_prefix:
return '{}t{}'.format(alias_prefix, alias_idx)
else:
return 't{}'.format(alias_idx)
def from_clause_str(pq, schema, alias_prefix):
aliases = {}
join_exprs = ['FROM']
tables = list(map(lambda x: schema.get_table(x),
pq.from_clause.edge_map.keys()))
tbl = min(tables, key=lambda x: x.syn_name)
# single table case, no aliases
if len(tables) == 1:
join_exprs.append(u'{}'.format(tbl.syn_name))
return u' '.join(join_exprs), aliases
alias = gen_alias(len(aliases) + 1, alias_prefix)
aliases[tbl.syn_name] = alias
join_exprs.append(u'{} AS {}'.format(tbl.syn_name, alias))
stack = [tbl]
while stack:
tbl = stack.pop()
for edge in pq.from_clause.edge_map[tbl.id].edges:
edge = JoinEdge(
schema.get_col(edge.fk_col_id),
schema.get_col(edge.pk_col_id)
)
other_tbl = edge.other(tbl)
if other_tbl.syn_name in aliases:
continue
alias = gen_alias(len(aliases) + 1, alias_prefix)
aliases[other_tbl.syn_name] = alias
join_exprs.append(
u'JOIN {} AS {} ON {}.{} = {}.{}'.format(
other_tbl.syn_name, alias,
aliases[tbl.syn_name], edge.key(tbl).syn_name,
aliases[other_tbl.syn_name], edge.key(other_tbl).syn_name
)
)
stack.append(other_tbl)
return u' '.join(join_exprs), aliases
def select_clause_str(pq, schema, aliases, select_aliases=None):
projs = []
for i, agg_col in enumerate(pq.select):
if agg_col.has_agg == TRUE:
if agg_col.agg == COUNT and \
schema.get_col(agg_col.col_id).syn_name != '*':
proj_str = u'{}(DISTINCT {})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
else:
proj_str = u'{}({})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
if select_aliases:
proj_str = f'{proj_str} AS {select_aliases[i]}'
projs.append(proj_str)
else:
projs.append(schema.get_aliased_col(aliases, agg_col.col_id))
if pq.distinct:
return u'SELECT DISTINCT ' + ', '.join(projs)
else:
return u'SELECT ' + ', '.join(projs)
def where_clause_str(pq, schema, aliases, verify=None):
where_exprs = []
predicates = []
for i, pred in enumerate(pq.where.predicates):
if i != 0:
predicates.append(to_str_logical_op(pq.where.logical_op))
col_type = schema.get_col(pred.col_id).type
where_val = None
if pred.has_subquery == TRUE:
where_val = u'({})'.format(
generate_sql_str(pred.subquery, schema,
alias_prefix='w{}'.format(i))
)
else:
if not pred.value:
raise Exception('Value is empty when generating where clause.')
if pred.op in (IN, NOT_IN):
where_val = u"({})".format(
u','.join(
map(lambda x: format_literal(col_type, x),
pred.value)
))
elif pred.op == BETWEEN:
where_val = u"{} AND {}".format(
format_literal(col_type, pred.value[0]),
format_literal(col_type, pred.value[1])
)
else:
where_val = format_literal(col_type, pred.value[0])
pred_str = u' '.join([
schema.get_aliased_col(aliases, pred.col_id),
to_str_op(pred.op),
where_val
])
predicates.append(pred_str)
verify_preds = []
if verify:
for i, item in enumerate(verify):
agg_col, tsq_const = item
assert(agg_col.has_agg == FALSE)
assert(tsq_const is not None)
col_type = schema.get_col(agg_col.col_id).type
if col_type == 'number':
where_col = 'CAST({} AS FLOAT)'.format(
schema.get_aliased_col(aliases, agg_col.col_id)
)
else:
where_col = schema.get_aliased_col(aliases, agg_col.col_id)
if isinstance(tsq_const, list): # range constraint
verify_preds.append(
u' '.join([where_col, '>=', str(tsq_const[0])])
)
verify_preds.append(
u' '.join([where_col, '<=', str(tsq_const[1])])
)
else: # exact constraint
verify_preds.append(u' '.join([
where_col,
'=',
format_literal(col_type, tsq_const)
]))
if predicates and verify_preds:
where_exprs.append(u'({})'.format(u' '.join(predicates)))
where_exprs.append(u'({})'.format(u' AND '.join(verify_preds)))
else:
if predicates:
where_exprs.append(u'{}'.format(u' '.join(predicates)))
if verify_preds:
where_exprs.append(u'{}'.format(u' AND '.join(verify_preds)))
return u'WHERE {}'.format(u' AND '.join(where_exprs))
def group_by_clause_str(pq, schema, aliases):
group_by_cols = []
for col_id in pq.group_by:
group_by_cols.append(schema.get_aliased_col(aliases, col_id))
return u'GROUP BY {}'.format(u', '.join(group_by_cols))
def having_clause_str(pq, schema, aliases, verify=None):
having_exprs = []
predicates = []
for i, pred in enumerate(pq.having.predicates):
if i != 0:
predicates.append(to_str_logical_op(pq.having.logical_op))
assert(pred.has_agg == TRUE)
if pred.agg == COUNT and \
schema.get_col(pred.col_id).syn_name != '*':
having_col = u'{}(DISTINCT {})'.format(
to_str_agg(pred.agg),
schema.get_aliased_col(aliases, pred.col_id)
)
else:
having_col = u'{}({})'.format(
to_str_agg(pred.agg),
schema.get_aliased_col(aliases, pred.col_id)
)
col_type = schema.get_col(pred.col_id).type
having_val = None
if pred.has_subquery == TRUE:
having_val = '({})'.format(
generate_sql_str(pred.subquery, schema,
alias_prefix='h{}'.format(i))
)
elif pred.op in (IN, NOT_IN):
having_val = u"({})".format(
u','.join(
map(lambda x: format_literal('number', x),
pred.value)
))
elif pred.op == BETWEEN:
having_val = u"{} AND {}".format(
format_literal('number', pred.value[0]),
format_literal('number', pred.value[1])
)
else:
having_val = format_literal('number', pred.value[0])
pred_str = u' '.join([having_col, to_str_op(pred.op), having_val])
predicates.append(pred_str)
verify_preds = []
if verify:
for i, item in enumerate(verify):
agg_col, tsq_const = item
assert(agg_col.has_agg == TRUE)
assert(tsq_const is not None)
col_type = schema.get_col(agg_col.col_id).type
if agg_col.col_id == 0:
having_col = u'{}({})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
elif agg_col.agg == COUNT:
having_col = u'{}(DISTINCT {})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
else:
having_col = u'{}(DISTINCT CAST({} AS FLOAT))'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
if isinstance(tsq_const, list): # range constraint
verify_preds.append(
u' '.join([having_col, '>=', str(tsq_const[0])])
)
verify_preds.append(
u' '.join([having_col, '<=', str(tsq_const[1])])
)
else: # exact constraint
verify_preds.append(u' '.join([
having_col,
'=',
format_literal('number', tsq_const)
]))
if predicates and verify_preds:
having_exprs.append(u'({})'.format(u' '.join(predicates)))
having_exprs.append(u'({})'.format(u' AND '.join(verify_preds)))
else:
if predicates:
having_exprs.append(u'{}'.format(u' '.join(predicates)))
if verify_preds:
having_exprs.append(u'{}'.format(u' AND '.join(verify_preds)))
return u'HAVING {}'.format(u' AND '.join(having_exprs))
def order_by_clause_str(pq, schema, aliases):
order_by_cols = []
for ordered_col in pq.order_by:
if ordered_col.agg_col.has_agg == TRUE:
order_by_cols.append('{}({}) {}'.format(
to_str_agg(ordered_col.agg_col.agg),
schema.get_aliased_col(aliases, ordered_col.agg_col.col_id),
to_str_dir(ordered_col.dir)
))
else:
order_by_cols.append('{} {}'.format(
schema.get_aliased_col(aliases, ordered_col.agg_col.col_id),
to_str_dir(ordered_col.dir)
))
return u'ORDER BY {}'.format(u', '.join(order_by_cols))
def limit_clause_str(pq):
if pq.limit == 0: # if not set, default to 1
pq.limit = 1
return u'LIMIT {}'.format(pq.limit)
def format_literal(type, literal):
if isinstance(literal, Number):
return str(literal)
# escape apostrophes
literal = literal.replace("'", "''")
if type == 'number':
try:
float(literal)
return literal
except Exception as e:
raise InvalidValueException()
else:
return f"'{literal}'"
def verify_sql_str(pq, schema, tsq_row, strict=False):
verify_agg = [] # tuples: (agg_col, tsq constraint)
verify_non_agg = [] # tuples: (agg_col, tsq constraint)
for i, agg_col in enumerate(pq.select):
if tsq_row[i] is None:
continue
if agg_col.has_agg == TRUE:
verify_agg.append((agg_col, tsq_row[i]))
elif agg_col.has_agg == FALSE:
verify_non_agg.append((agg_col, tsq_row[i]))
else:
raise Exception('Cannot verify AggCol with has_agg UNKNOWN.')
if not verify_agg and not verify_non_agg:
return None # nothing to verify!
from_clause, aliases = from_clause_str(pq, schema, None)
if from_clause is None:
raise Exception('FROM clause not generated.')
# Special Case: all aggregates and no group by, because SQLite does not
# permit HAVING clause without a GROUP BY
if verify_agg and not verify_non_agg and pq.has_group_by == FALSE:
select_aliases = []
where_preds = []
for i, agg_col in enumerate(pq.select):
tsq_const = tsq_row[i]
select_alias = f's{i}'
select_aliases.append(select_alias)
if tsq_const is None:
continue
col_type = schema.get_col(agg_col.col_id).type
if isinstance(tsq_const, list): # range constraint
where_preds.append(
u' '.join([select_alias, '>=', str(tsq_const[0])])
)
where_preds.append(
u' '.join([select_alias, '<=', str(tsq_const[1])])
)
else: # exact constraint
where_preds.append(u' '.join([
select_alias,
'=',
format_literal(col_type, tsq_const)
]))
return 'SELECT 1 FROM ({}) WHERE {}'.format(
generate_sql_str(pq, schema, select_aliases=select_aliases,
no_order_by=True),
u' AND '.join(where_preds)
)
else:
clauses = []
clauses.append('SELECT 1')
clauses.append(from_clause)
if (pq.has_where == TRUE and pq.where.predicates) or verify_non_agg:
clauses.append(where_clause_str(pq, schema, aliases,
verify=verify_non_agg))
if pq.has_group_by == TRUE and pq.done_group_by:
clauses.append(group_by_clause_str(pq, schema, aliases))
if (pq.has_having == TRUE and pq.having.predicates) or verify_agg:
clauses.append(having_clause_str(pq, schema, aliases,
verify=verify_agg))
clauses.append('LIMIT 1')
return u' '.join(clauses)
def generate_sql_str(pq, schema, alias_prefix=None, select_aliases=None,
no_order_by=False):
if pq.set_op != NO_SET_OP:
set_op_str = None
if pq.set_op == INTERSECT:
set_op_str = 'INTERSECT'
elif pq.set_op == UNION:
set_op_str = 'UNION'
elif pq.set_op == EXCEPT:
set_op_str = 'EXCEPT'
return u'{} {} {}'.format(
generate_sql_str(pq.left, schema),
set_op_str,
generate_sql_str(pq.right, schema, alias_prefix=set_op_str[0])
)
from_clause, aliases = from_clause_str(pq, schema, alias_prefix)
if from_clause is None:
raise Exception('FROM clause not generated.')
clauses = []
clauses.append(select_clause_str(pq, schema, aliases,
select_aliases=select_aliases))
clauses.append(from_clause)
if pq.has_where == TRUE and pq.where.predicates:
clauses.append(where_clause_str(pq, schema, aliases))
if pq.has_group_by == TRUE:
clauses.append(group_by_clause_str(pq, schema, aliases))
if pq.has_having == TRUE and pq.having.predicates:
clauses.append(having_clause_str(pq, schema, aliases))
if pq.has_order_by == TRUE and not no_order_by:
clauses.append(order_by_clause_str(pq, schema, aliases))
if pq.has_limit == TRUE and not no_order_by:
clauses.append(limit_clause_str(pq))
return u' '.join(clauses)
# Get all tables used in PQ. Does not consider subqueries.
def get_tables(schema, pq):
# assuming no duplicate tables, change to list() if allowing self-join
tables = set()
for agg_col in pq.select:
tbl = schema.get_col(agg_col.col_id).table
if tbl: # check in case tbl is None for '*' column case
tables.add(tbl)
if pq.has_where == TRUE:
for pred in pq.where.predicates:
tbl = schema.get_col(pred.col_id).table
if tbl:
tables.add(tbl)
if pq.has_group_by == TRUE:
for col_id in pq.group_by:
tbl = schema.get_col(col_id).table
if tbl:
tables.add(tbl)
if pq.has_having == TRUE:
for pred in pq.having.predicates:
tbl = schema.get_col(pred.col_id).table
if tbl:
tables.add(tbl)
if pq.has_order_by == TRUE:
for ordered_col in pq.order_by:
tbl = schema.get_col(ordered_col.agg_col.col_id).table
if tbl:
tables.add(tbl)
return tables
# Only considers whether join path for current localized pq needs updating.
# Does not consider for subqueries or set op children
# Returns:
# - True: if join path needs to be and can be updated
# - False: if join path needs no updating
def join_path_needs_update(schema, pq):
tables_in_cur_jp = set(map(lambda x: schema.get_table(x),
pq.from_clause.edge_map.keys()))
# if SELECT has a column (i.e. inference started) and there are no tables
if pq.select and len(tables_in_cur_jp) == 0:
return True
# if the current join path doesn't account for all tables in protoquery
tables = get_tables(schema, pq)
if tables_in_cur_jp >= tables:
return False
else:
return True
def with_updated_join_paths(schema, pq, minimal_join_paths=False):
for agg_col in pq.select:
if agg_col.agg == COUNT and agg_col.col_id == 0:
minimal_join_paths = False
jps = schema.get_join_paths(get_tables(schema, pq),
minimal_join_paths=minimal_join_paths)
new_pqs = []
for jp in jps:
new_pq = ProtoQuery()
new_pq.CopyFrom(pq)
set_proto_from(new_pq, jp)
new_pqs.append(new_pq)
return new_pqs
def set_proto_from(pq, jp):
# reset from clause
del pq.from_clause.edge_list.edges[:]
for key in pq.from_clause.edge_map.keys():
del pq.from_clause.edge_map[key]
if jp.distinct:
pq.distinct = True
for edge in jp.edges:
proto_edge = ProtoJoinEdge()
proto_edge.fk_col_id = edge.fk_col.id
proto_edge.pk_col_id = edge.pk_col.id
pq.from_clause.edge_list.edges.append(proto_edge)
for tbl, edges in jp.edge_map.items():
# initialize table in protobuf even if edges don't exist
pq.from_clause.edge_map.get_or_create(tbl.id)
for edge in edges:
proto_edge = ProtoJoinEdge()
proto_edge.fk_col_id = edge.fk_col.id
proto_edge.pk_col_id = edge.pk_col.id
pq.from_clause.edge_map[tbl.id].edges.append(proto_edge)
class ColumnBinaryOpException(Exception):
pass
class FromSubqueryException(Exception):
pass
class MultipleLogicalOpException(Exception):
pass
class MultipleOrderByException(Exception):
pass
class SetOpException(Exception):
pass
class InvalidValueException(Exception):
pass
class InvalidGroupByException(Exception):
pass
class AggTypeMismatchException(Exception):
pass
class OpTypeMismatchException(Exception):
pass
class SubqueryException(Exception):
pass
class EmptyResultException(Exception):
pass
class WildcardColumnException(Exception):
pass
class UnsupportedColumnTypeException(Exception):
pass
class ForeignKeyException(Exception):
pass
class InconsistentPredicateException(Exception):
pass
def load_pq_from_spider(schema, spider_sql, set_op=None):
pq = ProtoQuery()
if set_op is None:
if 'intersect' in spider_sql and spider_sql['intersect']:
raise SetOpException()
# pq.set_op = INTERSECT
# pq.left = load_pq_from_spider(schema, spider_sql,
# set_op='intersect')
# pq.right = load_pq_from_spider(schema, spider_sql['intersect'],
# set_op='intersect')
return pq
elif 'except' in spider_sql and spider_sql['except']:
raise SetOpException()
# pq.set_op = EXCEPT
# pq.left = load_pq_from_spider(schema, spider_sql, set_op='except')
# pq.right = load_pq_from_spider(schema, spider_sql['except'],
# set_op='except')
return pq
elif 'union' in spider_sql and spider_sql['union']:
raise SetOpException()
# pq.set_op = UNION
# pq.left = load_pq_from_spider(schema, spider_sql, set_op='union')
# pq.right = load_pq_from_spider(schema, spider_sql['union'],
# set_op='union')
return pq
tables = set()
# SELECT
pq.distinct = spider_sql['select'][0]
agg_projs = []
non_agg_projs = []
for agg, val_unit in spider_sql['select'][1]:
if val_unit[0] != 0:
raise ColumnBinaryOpException()
proj = pq.select.add()
col = schema.get_col(val_unit[1][1])
if col.fk_ref:
proj.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
proj.col_id = col.id
if col.id != 0:
tables.add(col.table)
proj.agg = to_proto_agg(AGG_OPS[agg])
if proj.agg != NO_AGG:
proj.has_agg = TRUE
agg_projs.append(proj)
else:
proj.has_agg = FALSE
non_agg_projs.append(proj)
pq.min_select_cols = len(pq.select)
# WHERE
equality_cols = set()
if 'where' in spider_sql and spider_sql['where']:
pq.has_where = TRUE
logical_op_set = False
for cond in spider_sql['where']:
if cond in ('and', 'or'):
if logical_op_set and \
to_proto_logical_op(cond) != pq.where.logical_op:
raise MultipleLogicalOpException()
else:
pq.where.logical_op = to_proto_logical_op(cond)
logical_op_set = True
else:
if cond[2][0] != 0:
raise ColumnBinaryOpException()
pred = pq.where.predicates.add()
pred.has_agg = FALSE
col = schema.get_col(cond[2][1][1])
if col.fk_ref:
pred.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
pred.col_id = col.id
tables.add(col.table)
pred.op = to_proto_old_op(cond[0], WHERE_OPS[cond[1]])
if pred.op == EQUALS:
if pred.col_id in equality_cols:
raise InconsistentPredicateException()
equality_cols.add(pred.col_id)
if isinstance(cond[3], dict):
pred.has_subquery = TRUE
pred.subquery.CopyFrom(load_pq_from_spider(schema, cond[3]))
elif isinstance(cond[3], Number) or isinstance(cond[3], str):
pred.has_subquery = FALSE
val_str = str(cond[3]).replace('"', '')
pred.value.append(val_str)
else:
raise InvalidValueException()
if cond[4] is not None:
pred.value.append(str(cond[4]))
pq.min_where_preds = len(pq.where.predicates)
else:
pq.has_where = FALSE
# GROUP BY
if 'groupBy' in spider_sql and spider_sql['groupBy']:
pq.has_group_by = TRUE
for col_unit in spider_sql['groupBy']:
col = schema.get_col(col_unit[1])
if col.fk_ref:
pq.group_by.append(col.fk_ref)
tables.add(schema.get_col(col.fk_ref).table)
else:
pq.group_by.append(col.id)
tables.add(col.table)
pq.min_group_by_cols = len(pq.group_by)
else:
pq.has_group_by = FALSE
# HAVING
if pq.has_group_by == TRUE:
if 'having' in spider_sql and spider_sql['having']:
pq.has_having = TRUE
logical_op_set = False
for cond in spider_sql['having']:
if cond in ('and', 'or'):
if logical_op_set and \
to_proto_logical_op(cond) != pq.having.logical_op:
raise MultipleLogicalOpException()
else:
pq.having.logical_op = to_proto_logical_op(cond)
logical_op_set = True
else:
if cond[2][0] != 0:
raise ColumnBinaryOpException()
pred = pq.having.predicates.add()
pred.has_agg = TRUE
pred.agg = to_proto_agg(AGG_OPS[cond[2][1][0]])
if pred.agg == NO_AGG:
raise AggTypeMismatchException()
col = schema.get_col(cond[2][1][1])
if col.fk_ref:
pred.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
pred.col_id = col.id
tables.add(col.table)
pred.op = to_proto_old_op(cond[0], WHERE_OPS[cond[1]])
if isinstance(cond[3], dict):
pred.has_subquery = TRUE
pred.subquery.CopyFrom(load_pq_from_spider(schema,
cond[3]))
elif isinstance(cond[3], Number) or isinstance(cond[3],
str):
pred.has_subquery = FALSE
val_str = str(cond[3]).replace('"', '')
pred.value.append(val_str)
else:
raise InvalidValueException()
if cond[4] is not None:
pred.value.append(str(cond[4]))
pq.min_having_preds = len(pq.having.predicates)
else:
pq.has_having = FALSE
# ORDER BY
if 'orderBy' in spider_sql and spider_sql['orderBy']:
pq.has_order_by = TRUE
if len(spider_sql['orderBy'][1]) != 1:
raise MultipleOrderByException()
if spider_sql['orderBy'][1][0][0] != 0:
raise ColumnBinaryOpException()
order_col = pq.order_by.add()
order_col.dir = to_proto_dir(spider_sql['orderBy'][0])
order_col.agg_col.agg = to_proto_agg(
AGG_OPS[spider_sql['orderBy'][1][0][1][0]])
if order_col.agg_col.agg != NO_AGG:
order_col.agg_col.has_agg = TRUE
else:
order_col.agg_col.has_agg = FALSE
col = schema.get_col(spider_sql['orderBy'][1][0][1][1])
if col.fk_ref:
order_col.agg_col.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
order_col.agg_col.col_id = col.id
tables.add(col.table)
pq.min_order_by_cols = len(pq.order_by)
else:
pq.has_order_by = FALSE
# LIMIT
if pq.has_order_by == TRUE:
if 'limit' in spider_sql and spider_sql['limit']:
pq.has_limit = TRUE
pq.limit = spider_sql['limit']
else:
pq.has_limit = FALSE
if len(agg_projs) > 0 and len(non_agg_projs) > 0:
# GROUP BY must exist if both agg and non_agg exist
if pq.has_group_by == FALSE:
raise InvalidGroupByException()
elif len(agg_projs) > 0:
# if only agg exists and there is GROUP BY,
# add GROUP BY columns to projection
if pq.has_group_by == TRUE:
for col_id in pq.group_by:
proj = pq.select.add()
proj.has_agg = FALSE
proj.col_id = col_id
else:
# if only non-agg exists and there is GROUP BY,
# add aggregated columns from elsewhere to projection
if pq.has_group_by == TRUE:
added = False
for pred in pq.having.predicates:
proj = pq.select.add()
proj.has_agg = TRUE
proj.col_id = pred.col_id
proj.agg = pred.agg
added = True
for oc in pq.order_by:
if oc.agg_col.has_agg == TRUE:
proj = pq.select.add()
proj.CopyFrom(oc.agg_col)
added = True
if not added:
raise InvalidGroupByException()
# FROM
self_join_check = set()
for tbl_unit in spider_sql['from']['table_units']:
if tbl_unit[0] != 'table_unit':
raise FromSubqueryException()
tables.add(schema.get_table(tbl_unit[1]))
jp = schema.steiner(tables)
set_proto_from(pq, jp)
pq.done_select = True
pq.done_where = True
pq.done_group_by = True
pq.done_having = True
pq.done_order_by = True
pq.done_limit = True
pq.done_query = True
return pq
class Query():
def __init__(self, schema, protoquery=None):
self.schema = schema
if protoquery is None:
protoquery = ProtoQuery()
self.pq = protoquery
def copy(self):
new_query = Query(self.schema)
new_query.pq.CopyFrom(self.pq)
return new_query
@staticmethod
def from_spider(schema, spider_sql):
new_query = Query(schema)
new_query.pq = load_pq_from_spider(schema, spider_sql)
return new_query
| from numbers import Number
from .proto.duoquest_pb2 import *
from .schema import JoinEdge
from .external.process_sql import AGG_OPS, WHERE_OPS
def to_str_tribool(proto_tribool):
if proto_tribool == UNKNOWN:
return None
elif proto_tribool == TRUE:
return True
else:
return False
def to_proto_tribool(boolval):
if boolval is None:
return UNKNOWN
elif boolval:
return TRUE
else:
return FALSE
def to_proto_set_op(set_op):
if set_op == 'none':
return NO_SET_OP
elif set_op == 'intersect':
return INTERSECT
elif set_op == 'except':
return EXCEPT
elif set_op == 'union':
return UNION
else:
raise Exception('Unknown set_op: {}'.format(set_op))
def to_proto_agg(agg):
if agg == 'none':
return NO_AGG
elif agg == 'max':
return MAX
elif agg == 'min':
return MIN
elif agg == 'count':
return COUNT
elif agg == 'sum':
return SUM
elif agg == 'avg':
return AVG
else:
raise Exception('Unrecognized agg: {}'.format(agg))
def to_str_agg(proto_agg):
if proto_agg == MAX:
return 'max'
elif proto_agg == MIN:
return 'min'
elif proto_agg == COUNT:
return 'count'
elif proto_agg == SUM:
return 'sum'
elif proto_agg == AVG:
return 'avg'
else:
raise Exception('Unrecognized agg: {}'.format(proto_agg))
def to_proto_logical_op(logical_op):
if logical_op == 'and':
return AND
elif logical_op == 'or':
return OR
else:
raise Exception('Unknown logical_op: {}'.format(logical_op))
def to_str_logical_op(proto_logical_op):
if proto_logical_op == AND:
return 'and'
elif proto_logical_op == OR:
return 'or'
else:
raise Exception('Unknown logical_op: {}'.format(proto_logical_op))
def to_proto_old_op(not_op, op):
if op == 'between':
return BETWEEN
elif op == '=':
return EQUALS
elif op == '>':
return GT
elif op == '<':
return LT
elif op == '>=':
return GEQ
elif op == '<=':
return LEQ
elif op == '!=':
return NEQ
elif op == 'in' and not not_op:
return IN
elif op == 'in' and not_op:
return NOT_IN
elif op == 'like':
return LIKE
else:
raise Exception('Unrecognized op: {}'.format(op))
def to_proto_op(op):
if op == '=':
return EQUALS
elif op == '>':
return GT
elif op == '<':
return LT
elif op == '>=':
return GEQ
elif op == '<=':
return LEQ
elif op == '!=':
return NEQ
elif op == 'like':
return LIKE
elif op == 'in':
return IN
elif op == 'not in':
return NOT_IN
elif op == 'between':
return BETWEEN
else:
raise Exception('Unrecognized op: {}'.format(op))
def to_str_op(proto_op):
if proto_op == EQUALS:
return '='
elif proto_op == GT:
return '>'
elif proto_op == LT:
return '<'
elif proto_op == GEQ:
return '>='
elif proto_op == LEQ:
return '<='
elif proto_op == NEQ:
return '!='
elif proto_op == LIKE:
return 'like'
elif proto_op == IN:
return 'in'
elif proto_op == NOT_IN:
return 'not in'
elif proto_op == BETWEEN:
return 'between'
else:
raise Exception('Unrecognized op: {}'.format(proto_op))
def to_proto_dir(dir):
if dir == 'desc':
return DESC
elif dir == 'asc':
return ASC
else:
raise Exception('Unrecognized dir: {}'.format(dir))
def to_str_dir(proto_dir):
if proto_dir == DESC:
return 'desc'
elif proto_dir == ASC:
return 'asc'
else:
raise Exception('Unrecognized dir: {}'.format(proto_dir))
def gen_alias(alias_idx, alias_prefix):
if alias_prefix:
return '{}t{}'.format(alias_prefix, alias_idx)
else:
return 't{}'.format(alias_idx)
def from_clause_str(pq, schema, alias_prefix):
aliases = {}
join_exprs = ['FROM']
tables = list(map(lambda x: schema.get_table(x),
pq.from_clause.edge_map.keys()))
tbl = min(tables, key=lambda x: x.syn_name)
# single table case, no aliases
if len(tables) == 1:
join_exprs.append(u'{}'.format(tbl.syn_name))
return u' '.join(join_exprs), aliases
alias = gen_alias(len(aliases) + 1, alias_prefix)
aliases[tbl.syn_name] = alias
join_exprs.append(u'{} AS {}'.format(tbl.syn_name, alias))
stack = [tbl]
while stack:
tbl = stack.pop()
for edge in pq.from_clause.edge_map[tbl.id].edges:
edge = JoinEdge(
schema.get_col(edge.fk_col_id),
schema.get_col(edge.pk_col_id)
)
other_tbl = edge.other(tbl)
if other_tbl.syn_name in aliases:
continue
alias = gen_alias(len(aliases) + 1, alias_prefix)
aliases[other_tbl.syn_name] = alias
join_exprs.append(
u'JOIN {} AS {} ON {}.{} = {}.{}'.format(
other_tbl.syn_name, alias,
aliases[tbl.syn_name], edge.key(tbl).syn_name,
aliases[other_tbl.syn_name], edge.key(other_tbl).syn_name
)
)
stack.append(other_tbl)
return u' '.join(join_exprs), aliases
def select_clause_str(pq, schema, aliases, select_aliases=None):
projs = []
for i, agg_col in enumerate(pq.select):
if agg_col.has_agg == TRUE:
if agg_col.agg == COUNT and \
schema.get_col(agg_col.col_id).syn_name != '*':
proj_str = u'{}(DISTINCT {})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
else:
proj_str = u'{}({})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
if select_aliases:
proj_str = f'{proj_str} AS {select_aliases[i]}'
projs.append(proj_str)
else:
projs.append(schema.get_aliased_col(aliases, agg_col.col_id))
if pq.distinct:
return u'SELECT DISTINCT ' + ', '.join(projs)
else:
return u'SELECT ' + ', '.join(projs)
def where_clause_str(pq, schema, aliases, verify=None):
where_exprs = []
predicates = []
for i, pred in enumerate(pq.where.predicates):
if i != 0:
predicates.append(to_str_logical_op(pq.where.logical_op))
col_type = schema.get_col(pred.col_id).type
where_val = None
if pred.has_subquery == TRUE:
where_val = u'({})'.format(
generate_sql_str(pred.subquery, schema,
alias_prefix='w{}'.format(i))
)
else:
if not pred.value:
raise Exception('Value is empty when generating where clause.')
if pred.op in (IN, NOT_IN):
where_val = u"({})".format(
u','.join(
map(lambda x: format_literal(col_type, x),
pred.value)
))
elif pred.op == BETWEEN:
where_val = u"{} AND {}".format(
format_literal(col_type, pred.value[0]),
format_literal(col_type, pred.value[1])
)
else:
where_val = format_literal(col_type, pred.value[0])
pred_str = u' '.join([
schema.get_aliased_col(aliases, pred.col_id),
to_str_op(pred.op),
where_val
])
predicates.append(pred_str)
verify_preds = []
if verify:
for i, item in enumerate(verify):
agg_col, tsq_const = item
assert(agg_col.has_agg == FALSE)
assert(tsq_const is not None)
col_type = schema.get_col(agg_col.col_id).type
if col_type == 'number':
where_col = 'CAST({} AS FLOAT)'.format(
schema.get_aliased_col(aliases, agg_col.col_id)
)
else:
where_col = schema.get_aliased_col(aliases, agg_col.col_id)
if isinstance(tsq_const, list): # range constraint
verify_preds.append(
u' '.join([where_col, '>=', str(tsq_const[0])])
)
verify_preds.append(
u' '.join([where_col, '<=', str(tsq_const[1])])
)
else: # exact constraint
verify_preds.append(u' '.join([
where_col,
'=',
format_literal(col_type, tsq_const)
]))
if predicates and verify_preds:
where_exprs.append(u'({})'.format(u' '.join(predicates)))
where_exprs.append(u'({})'.format(u' AND '.join(verify_preds)))
else:
if predicates:
where_exprs.append(u'{}'.format(u' '.join(predicates)))
if verify_preds:
where_exprs.append(u'{}'.format(u' AND '.join(verify_preds)))
return u'WHERE {}'.format(u' AND '.join(where_exprs))
def group_by_clause_str(pq, schema, aliases):
group_by_cols = []
for col_id in pq.group_by:
group_by_cols.append(schema.get_aliased_col(aliases, col_id))
return u'GROUP BY {}'.format(u', '.join(group_by_cols))
def having_clause_str(pq, schema, aliases, verify=None):
having_exprs = []
predicates = []
for i, pred in enumerate(pq.having.predicates):
if i != 0:
predicates.append(to_str_logical_op(pq.having.logical_op))
assert(pred.has_agg == TRUE)
if pred.agg == COUNT and \
schema.get_col(pred.col_id).syn_name != '*':
having_col = u'{}(DISTINCT {})'.format(
to_str_agg(pred.agg),
schema.get_aliased_col(aliases, pred.col_id)
)
else:
having_col = u'{}({})'.format(
to_str_agg(pred.agg),
schema.get_aliased_col(aliases, pred.col_id)
)
col_type = schema.get_col(pred.col_id).type
having_val = None
if pred.has_subquery == TRUE:
having_val = '({})'.format(
generate_sql_str(pred.subquery, schema,
alias_prefix='h{}'.format(i))
)
elif pred.op in (IN, NOT_IN):
having_val = u"({})".format(
u','.join(
map(lambda x: format_literal('number', x),
pred.value)
))
elif pred.op == BETWEEN:
having_val = u"{} AND {}".format(
format_literal('number', pred.value[0]),
format_literal('number', pred.value[1])
)
else:
having_val = format_literal('number', pred.value[0])
pred_str = u' '.join([having_col, to_str_op(pred.op), having_val])
predicates.append(pred_str)
verify_preds = []
if verify:
for i, item in enumerate(verify):
agg_col, tsq_const = item
assert(agg_col.has_agg == TRUE)
assert(tsq_const is not None)
col_type = schema.get_col(agg_col.col_id).type
if agg_col.col_id == 0:
having_col = u'{}({})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
elif agg_col.agg == COUNT:
having_col = u'{}(DISTINCT {})'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
else:
having_col = u'{}(DISTINCT CAST({} AS FLOAT))'.format(
to_str_agg(agg_col.agg),
schema.get_aliased_col(aliases, agg_col.col_id)
)
if isinstance(tsq_const, list): # range constraint
verify_preds.append(
u' '.join([having_col, '>=', str(tsq_const[0])])
)
verify_preds.append(
u' '.join([having_col, '<=', str(tsq_const[1])])
)
else: # exact constraint
verify_preds.append(u' '.join([
having_col,
'=',
format_literal('number', tsq_const)
]))
if predicates and verify_preds:
having_exprs.append(u'({})'.format(u' '.join(predicates)))
having_exprs.append(u'({})'.format(u' AND '.join(verify_preds)))
else:
if predicates:
having_exprs.append(u'{}'.format(u' '.join(predicates)))
if verify_preds:
having_exprs.append(u'{}'.format(u' AND '.join(verify_preds)))
return u'HAVING {}'.format(u' AND '.join(having_exprs))
def order_by_clause_str(pq, schema, aliases):
order_by_cols = []
for ordered_col in pq.order_by:
if ordered_col.agg_col.has_agg == TRUE:
order_by_cols.append('{}({}) {}'.format(
to_str_agg(ordered_col.agg_col.agg),
schema.get_aliased_col(aliases, ordered_col.agg_col.col_id),
to_str_dir(ordered_col.dir)
))
else:
order_by_cols.append('{} {}'.format(
schema.get_aliased_col(aliases, ordered_col.agg_col.col_id),
to_str_dir(ordered_col.dir)
))
return u'ORDER BY {}'.format(u', '.join(order_by_cols))
def limit_clause_str(pq):
if pq.limit == 0: # if not set, default to 1
pq.limit = 1
return u'LIMIT {}'.format(pq.limit)
def format_literal(type, literal):
if isinstance(literal, Number):
return str(literal)
# escape apostrophes
literal = literal.replace("'", "''")
if type == 'number':
try:
float(literal)
return literal
except Exception as e:
raise InvalidValueException()
else:
return f"'{literal}'"
def verify_sql_str(pq, schema, tsq_row, strict=False):
verify_agg = [] # tuples: (agg_col, tsq constraint)
verify_non_agg = [] # tuples: (agg_col, tsq constraint)
for i, agg_col in enumerate(pq.select):
if tsq_row[i] is None:
continue
if agg_col.has_agg == TRUE:
verify_agg.append((agg_col, tsq_row[i]))
elif agg_col.has_agg == FALSE:
verify_non_agg.append((agg_col, tsq_row[i]))
else:
raise Exception('Cannot verify AggCol with has_agg UNKNOWN.')
if not verify_agg and not verify_non_agg:
return None # nothing to verify!
from_clause, aliases = from_clause_str(pq, schema, None)
if from_clause is None:
raise Exception('FROM clause not generated.')
# Special Case: all aggregates and no group by, because SQLite does not
# permit HAVING clause without a GROUP BY
if verify_agg and not verify_non_agg and pq.has_group_by == FALSE:
select_aliases = []
where_preds = []
for i, agg_col in enumerate(pq.select):
tsq_const = tsq_row[i]
select_alias = f's{i}'
select_aliases.append(select_alias)
if tsq_const is None:
continue
col_type = schema.get_col(agg_col.col_id).type
if isinstance(tsq_const, list): # range constraint
where_preds.append(
u' '.join([select_alias, '>=', str(tsq_const[0])])
)
where_preds.append(
u' '.join([select_alias, '<=', str(tsq_const[1])])
)
else: # exact constraint
where_preds.append(u' '.join([
select_alias,
'=',
format_literal(col_type, tsq_const)
]))
return 'SELECT 1 FROM ({}) WHERE {}'.format(
generate_sql_str(pq, schema, select_aliases=select_aliases,
no_order_by=True),
u' AND '.join(where_preds)
)
else:
clauses = []
clauses.append('SELECT 1')
clauses.append(from_clause)
if (pq.has_where == TRUE and pq.where.predicates) or verify_non_agg:
clauses.append(where_clause_str(pq, schema, aliases,
verify=verify_non_agg))
if pq.has_group_by == TRUE and pq.done_group_by:
clauses.append(group_by_clause_str(pq, schema, aliases))
if (pq.has_having == TRUE and pq.having.predicates) or verify_agg:
clauses.append(having_clause_str(pq, schema, aliases,
verify=verify_agg))
clauses.append('LIMIT 1')
return u' '.join(clauses)
def generate_sql_str(pq, schema, alias_prefix=None, select_aliases=None,
no_order_by=False):
if pq.set_op != NO_SET_OP:
set_op_str = None
if pq.set_op == INTERSECT:
set_op_str = 'INTERSECT'
elif pq.set_op == UNION:
set_op_str = 'UNION'
elif pq.set_op == EXCEPT:
set_op_str = 'EXCEPT'
return u'{} {} {}'.format(
generate_sql_str(pq.left, schema),
set_op_str,
generate_sql_str(pq.right, schema, alias_prefix=set_op_str[0])
)
from_clause, aliases = from_clause_str(pq, schema, alias_prefix)
if from_clause is None:
raise Exception('FROM clause not generated.')
clauses = []
clauses.append(select_clause_str(pq, schema, aliases,
select_aliases=select_aliases))
clauses.append(from_clause)
if pq.has_where == TRUE and pq.where.predicates:
clauses.append(where_clause_str(pq, schema, aliases))
if pq.has_group_by == TRUE:
clauses.append(group_by_clause_str(pq, schema, aliases))
if pq.has_having == TRUE and pq.having.predicates:
clauses.append(having_clause_str(pq, schema, aliases))
if pq.has_order_by == TRUE and not no_order_by:
clauses.append(order_by_clause_str(pq, schema, aliases))
if pq.has_limit == TRUE and not no_order_by:
clauses.append(limit_clause_str(pq))
return u' '.join(clauses)
# Get all tables used in PQ. Does not consider subqueries.
def get_tables(schema, pq):
# assuming no duplicate tables, change to list() if allowing self-join
tables = set()
for agg_col in pq.select:
tbl = schema.get_col(agg_col.col_id).table
if tbl: # check in case tbl is None for '*' column case
tables.add(tbl)
if pq.has_where == TRUE:
for pred in pq.where.predicates:
tbl = schema.get_col(pred.col_id).table
if tbl:
tables.add(tbl)
if pq.has_group_by == TRUE:
for col_id in pq.group_by:
tbl = schema.get_col(col_id).table
if tbl:
tables.add(tbl)
if pq.has_having == TRUE:
for pred in pq.having.predicates:
tbl = schema.get_col(pred.col_id).table
if tbl:
tables.add(tbl)
if pq.has_order_by == TRUE:
for ordered_col in pq.order_by:
tbl = schema.get_col(ordered_col.agg_col.col_id).table
if tbl:
tables.add(tbl)
return tables
# Only considers whether join path for current localized pq needs updating.
# Does not consider for subqueries or set op children
# Returns:
# - True: if join path needs to be and can be updated
# - False: if join path needs no updating
def join_path_needs_update(schema, pq):
tables_in_cur_jp = set(map(lambda x: schema.get_table(x),
pq.from_clause.edge_map.keys()))
# if SELECT has a column (i.e. inference started) and there are no tables
if pq.select and len(tables_in_cur_jp) == 0:
return True
# if the current join path doesn't account for all tables in protoquery
tables = get_tables(schema, pq)
if tables_in_cur_jp >= tables:
return False
else:
return True
def with_updated_join_paths(schema, pq, minimal_join_paths=False):
for agg_col in pq.select:
if agg_col.agg == COUNT and agg_col.col_id == 0:
minimal_join_paths = False
jps = schema.get_join_paths(get_tables(schema, pq),
minimal_join_paths=minimal_join_paths)
new_pqs = []
for jp in jps:
new_pq = ProtoQuery()
new_pq.CopyFrom(pq)
set_proto_from(new_pq, jp)
new_pqs.append(new_pq)
return new_pqs
def set_proto_from(pq, jp):
# reset from clause
del pq.from_clause.edge_list.edges[:]
for key in pq.from_clause.edge_map.keys():
del pq.from_clause.edge_map[key]
if jp.distinct:
pq.distinct = True
for edge in jp.edges:
proto_edge = ProtoJoinEdge()
proto_edge.fk_col_id = edge.fk_col.id
proto_edge.pk_col_id = edge.pk_col.id
pq.from_clause.edge_list.edges.append(proto_edge)
for tbl, edges in jp.edge_map.items():
# initialize table in protobuf even if edges don't exist
pq.from_clause.edge_map.get_or_create(tbl.id)
for edge in edges:
proto_edge = ProtoJoinEdge()
proto_edge.fk_col_id = edge.fk_col.id
proto_edge.pk_col_id = edge.pk_col.id
pq.from_clause.edge_map[tbl.id].edges.append(proto_edge)
class ColumnBinaryOpException(Exception):
pass
class FromSubqueryException(Exception):
pass
class MultipleLogicalOpException(Exception):
pass
class MultipleOrderByException(Exception):
pass
class SetOpException(Exception):
pass
class InvalidValueException(Exception):
pass
class InvalidGroupByException(Exception):
pass
class AggTypeMismatchException(Exception):
pass
class OpTypeMismatchException(Exception):
pass
class SubqueryException(Exception):
pass
class EmptyResultException(Exception):
pass
class WildcardColumnException(Exception):
pass
class UnsupportedColumnTypeException(Exception):
pass
class ForeignKeyException(Exception):
pass
class InconsistentPredicateException(Exception):
pass
def load_pq_from_spider(schema, spider_sql, set_op=None):
pq = ProtoQuery()
if set_op is None:
if 'intersect' in spider_sql and spider_sql['intersect']:
raise SetOpException()
# pq.set_op = INTERSECT
# pq.left = load_pq_from_spider(schema, spider_sql,
# set_op='intersect')
# pq.right = load_pq_from_spider(schema, spider_sql['intersect'],
# set_op='intersect')
return pq
elif 'except' in spider_sql and spider_sql['except']:
raise SetOpException()
# pq.set_op = EXCEPT
# pq.left = load_pq_from_spider(schema, spider_sql, set_op='except')
# pq.right = load_pq_from_spider(schema, spider_sql['except'],
# set_op='except')
return pq
elif 'union' in spider_sql and spider_sql['union']:
raise SetOpException()
# pq.set_op = UNION
# pq.left = load_pq_from_spider(schema, spider_sql, set_op='union')
# pq.right = load_pq_from_spider(schema, spider_sql['union'],
# set_op='union')
return pq
tables = set()
# SELECT
pq.distinct = spider_sql['select'][0]
agg_projs = []
non_agg_projs = []
for agg, val_unit in spider_sql['select'][1]:
if val_unit[0] != 0:
raise ColumnBinaryOpException()
proj = pq.select.add()
col = schema.get_col(val_unit[1][1])
if col.fk_ref:
proj.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
proj.col_id = col.id
if col.id != 0:
tables.add(col.table)
proj.agg = to_proto_agg(AGG_OPS[agg])
if proj.agg != NO_AGG:
proj.has_agg = TRUE
agg_projs.append(proj)
else:
proj.has_agg = FALSE
non_agg_projs.append(proj)
pq.min_select_cols = len(pq.select)
# WHERE
equality_cols = set()
if 'where' in spider_sql and spider_sql['where']:
pq.has_where = TRUE
logical_op_set = False
for cond in spider_sql['where']:
if cond in ('and', 'or'):
if logical_op_set and \
to_proto_logical_op(cond) != pq.where.logical_op:
raise MultipleLogicalOpException()
else:
pq.where.logical_op = to_proto_logical_op(cond)
logical_op_set = True
else:
if cond[2][0] != 0:
raise ColumnBinaryOpException()
pred = pq.where.predicates.add()
pred.has_agg = FALSE
col = schema.get_col(cond[2][1][1])
if col.fk_ref:
pred.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
pred.col_id = col.id
tables.add(col.table)
pred.op = to_proto_old_op(cond[0], WHERE_OPS[cond[1]])
if pred.op == EQUALS:
if pred.col_id in equality_cols:
raise InconsistentPredicateException()
equality_cols.add(pred.col_id)
if isinstance(cond[3], dict):
pred.has_subquery = TRUE
pred.subquery.CopyFrom(load_pq_from_spider(schema, cond[3]))
elif isinstance(cond[3], Number) or isinstance(cond[3], str):
pred.has_subquery = FALSE
val_str = str(cond[3]).replace('"', '')
pred.value.append(val_str)
else:
raise InvalidValueException()
if cond[4] is not None:
pred.value.append(str(cond[4]))
pq.min_where_preds = len(pq.where.predicates)
else:
pq.has_where = FALSE
# GROUP BY
if 'groupBy' in spider_sql and spider_sql['groupBy']:
pq.has_group_by = TRUE
for col_unit in spider_sql['groupBy']:
col = schema.get_col(col_unit[1])
if col.fk_ref:
pq.group_by.append(col.fk_ref)
tables.add(schema.get_col(col.fk_ref).table)
else:
pq.group_by.append(col.id)
tables.add(col.table)
pq.min_group_by_cols = len(pq.group_by)
else:
pq.has_group_by = FALSE
# HAVING
if pq.has_group_by == TRUE:
if 'having' in spider_sql and spider_sql['having']:
pq.has_having = TRUE
logical_op_set = False
for cond in spider_sql['having']:
if cond in ('and', 'or'):
if logical_op_set and \
to_proto_logical_op(cond) != pq.having.logical_op:
raise MultipleLogicalOpException()
else:
pq.having.logical_op = to_proto_logical_op(cond)
logical_op_set = True
else:
if cond[2][0] != 0:
raise ColumnBinaryOpException()
pred = pq.having.predicates.add()
pred.has_agg = TRUE
pred.agg = to_proto_agg(AGG_OPS[cond[2][1][0]])
if pred.agg == NO_AGG:
raise AggTypeMismatchException()
col = schema.get_col(cond[2][1][1])
if col.fk_ref:
pred.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
pred.col_id = col.id
tables.add(col.table)
pred.op = to_proto_old_op(cond[0], WHERE_OPS[cond[1]])
if isinstance(cond[3], dict):
pred.has_subquery = TRUE
pred.subquery.CopyFrom(load_pq_from_spider(schema,
cond[3]))
elif isinstance(cond[3], Number) or isinstance(cond[3],
str):
pred.has_subquery = FALSE
val_str = str(cond[3]).replace('"', '')
pred.value.append(val_str)
else:
raise InvalidValueException()
if cond[4] is not None:
pred.value.append(str(cond[4]))
pq.min_having_preds = len(pq.having.predicates)
else:
pq.has_having = FALSE
# ORDER BY
if 'orderBy' in spider_sql and spider_sql['orderBy']:
pq.has_order_by = TRUE
if len(spider_sql['orderBy'][1]) != 1:
raise MultipleOrderByException()
if spider_sql['orderBy'][1][0][0] != 0:
raise ColumnBinaryOpException()
order_col = pq.order_by.add()
order_col.dir = to_proto_dir(spider_sql['orderBy'][0])
order_col.agg_col.agg = to_proto_agg(
AGG_OPS[spider_sql['orderBy'][1][0][1][0]])
if order_col.agg_col.agg != NO_AGG:
order_col.agg_col.has_agg = TRUE
else:
order_col.agg_col.has_agg = FALSE
col = schema.get_col(spider_sql['orderBy'][1][0][1][1])
if col.fk_ref:
order_col.agg_col.col_id = col.fk_ref
tables.add(schema.get_col(col.fk_ref).table)
else:
order_col.agg_col.col_id = col.id
tables.add(col.table)
pq.min_order_by_cols = len(pq.order_by)
else:
pq.has_order_by = FALSE
# LIMIT
if pq.has_order_by == TRUE:
if 'limit' in spider_sql and spider_sql['limit']:
pq.has_limit = TRUE
pq.limit = spider_sql['limit']
else:
pq.has_limit = FALSE
if len(agg_projs) > 0 and len(non_agg_projs) > 0:
# GROUP BY must exist if both agg and non_agg exist
if pq.has_group_by == FALSE:
raise InvalidGroupByException()
elif len(agg_projs) > 0:
# if only agg exists and there is GROUP BY,
# add GROUP BY columns to projection
if pq.has_group_by == TRUE:
for col_id in pq.group_by:
proj = pq.select.add()
proj.has_agg = FALSE
proj.col_id = col_id
else:
# if only non-agg exists and there is GROUP BY,
# add aggregated columns from elsewhere to projection
if pq.has_group_by == TRUE:
added = False
for pred in pq.having.predicates:
proj = pq.select.add()
proj.has_agg = TRUE
proj.col_id = pred.col_id
proj.agg = pred.agg
added = True
for oc in pq.order_by:
if oc.agg_col.has_agg == TRUE:
proj = pq.select.add()
proj.CopyFrom(oc.agg_col)
added = True
if not added:
raise InvalidGroupByException()
# FROM
self_join_check = set()
for tbl_unit in spider_sql['from']['table_units']:
if tbl_unit[0] != 'table_unit':
raise FromSubqueryException()
tables.add(schema.get_table(tbl_unit[1]))
jp = schema.steiner(tables)
set_proto_from(pq, jp)
pq.done_select = True
pq.done_where = True
pq.done_group_by = True
pq.done_having = True
pq.done_order_by = True
pq.done_limit = True
pq.done_query = True
return pq
class Query():
def __init__(self, schema, protoquery=None):
self.schema = schema
if protoquery is None:
protoquery = ProtoQuery()
self.pq = protoquery
def copy(self):
new_query = Query(self.schema)
new_query.pq.CopyFrom(self.pq)
return new_query
@staticmethod
def from_spider(schema, spider_sql):
new_query = Query(schema)
new_query.pq = load_pq_from_spider(schema, spider_sql)
return new_query
| en | 0.624019 | # single table case, no aliases # range constraint # exact constraint # range constraint # exact constraint # if not set, default to 1 # escape apostrophes # tuples: (agg_col, tsq constraint) # tuples: (agg_col, tsq constraint) # nothing to verify! # Special Case: all aggregates and no group by, because SQLite does not # permit HAVING clause without a GROUP BY # range constraint # exact constraint # Get all tables used in PQ. Does not consider subqueries. # assuming no duplicate tables, change to list() if allowing self-join # check in case tbl is None for '*' column case # Only considers whether join path for current localized pq needs updating. # Does not consider for subqueries or set op children # Returns: # - True: if join path needs to be and can be updated # - False: if join path needs no updating # if SELECT has a column (i.e. inference started) and there are no tables # if the current join path doesn't account for all tables in protoquery # reset from clause # initialize table in protobuf even if edges don't exist # pq.set_op = INTERSECT # pq.left = load_pq_from_spider(schema, spider_sql, # set_op='intersect') # pq.right = load_pq_from_spider(schema, spider_sql['intersect'], # set_op='intersect') # pq.set_op = EXCEPT # pq.left = load_pq_from_spider(schema, spider_sql, set_op='except') # pq.right = load_pq_from_spider(schema, spider_sql['except'], # set_op='except') # pq.set_op = UNION # pq.left = load_pq_from_spider(schema, spider_sql, set_op='union') # pq.right = load_pq_from_spider(schema, spider_sql['union'], # set_op='union') # SELECT # WHERE # GROUP BY # HAVING # ORDER BY # LIMIT # GROUP BY must exist if both agg and non_agg exist # if only agg exists and there is GROUP BY, # add GROUP BY columns to projection # if only non-agg exists and there is GROUP BY, # add aggregated columns from elsewhere to projection # FROM | 2.422475 | 2 |
astra/writers.py | hhslepicka/lume-astra | 0 | 6625187 | import numpy as np
from numbers import Number
import os
def namelist_lines(namelist_dict, name):
"""
Converts namelist dict to output lines, for writing to file.
Only allow scalars or lists.
Do not allow np arrays or any other types from simplicity.
"""
lines = []
lines.append('&'+name)
# parse
for key, value in namelist_dict.items():
#if type(value) == type(1) or type(value) == type(1.): # numbers
if isinstance(value, Number): # numbers
line= key + ' = ' + str(value)
elif type(value) == type([]) or isinstance(value, np.ndarray): # lists or np arrays
liststr = ''
for item in value:
liststr += str(item) + ' '
line = key + ' = ' + liststr
elif type(value) == type('a'): # strings
line = key + ' = ' + "'" + value.strip("''") + "'" # input may need apostrophes
elif bool(value) == value:
line= key + ' = ' + str(value)
else:
#print 'skipped: key, value = ', key, value
raise ValueError(f'Problem writing input key: {key}, value: {value}, type: {type(value)}')
lines.append(line)
lines.append('/')
return lines
def make_namelist_symlinks(namelist, path, prefixes=['file_', 'distribution'], verbose=False):
"""
Looks for keys that start with prefixes.
If the value is a path that exists, a symlink will be made.
Old symlinks will be replaced.
A replacement dict is returned
"""
replacements = {}
for key in namelist:
if any([key.startswith(prefix) for prefix in prefixes]):
src = namelist[key]
if os.path.exists(os.path.join(path, src)) and not os.path.isabs(src):
if verbose:
f'File {src} already in path, skipping.'
continue
if not os.path.exists(src):
if verbose:
print('Path does not exist for symlink:', src)
continue
_, file = os.path.split(src)
dest = os.path.join(path, file)
replacements[key] = file
# Replace old symlinks.
if os.path.islink(dest):
os.unlink(dest)
elif os.path.exists(dest):
if verbose:
print(dest, 'exists, will not symlink')
continue
# Note that the following will raise an error if the dest is an actual file that exists
os.symlink(src, dest)
if verbose:
print('Linked', src, 'to', dest)
return replacements
def write_namelists(namelists, filePath, make_symlinks=False, prefixes=['file_', 'distribution'], verbose=False):
"""
Simple function to write namelist lines to a file
If make_symlinks, prefixes will be searched for paths and the appropriate links will be made.
For Windows, make_symlinks is ignored and it is always False.See note at https://docs.python.org/3/library/os.html#os.symlink .
"""
# With Windows 10, users need Administator Privileges or run on Developer mode
# in order to be able to create symlinks.
# More info: https://docs.python.org/3/library/os.html#os.symlink
if os.name == 'nt':
make_symlinks = False
with open(filePath, 'w') as f:
for key in namelists:
namelist = namelists[key]
if make_symlinks:
# Work on a copy
namelist = namelist.copy()
path, _ = os.path.split(filePath)
replacements = make_namelist_symlinks(namelist, path, prefixes=prefixes, verbose=verbose)
namelist.update(replacements)
lines = namelist_lines(namelist, key)
for l in lines:
f.write(l+'\n')
def fstr(s):
"""
Makes a fixed string for h5 files
"""
return np.string_(s)
def opmd_init(h5, basePath='/screen/%T/', particlesPath='/' ):
"""
Root attribute initialization.
h5 should be the root of the file.
"""
d = {
'basePath':basePath,
'dataType':'openPMD',
'openPMD':'2.0.0',
'openPMDextension':'BeamPhysics;SpeciesType',
'particlesPath':particlesPath
}
for k,v in d.items():
h5.attrs[k] = fstr(v)
def write_astra_particles_h5(h5, name, astra_data, species='electron'):
"""
Write particle data at a screen in openPMD BeamPhysics format
https://github.com/DavidSagan/openPMD-standard/blob/EXT_BeamPhysics/EXT_BeamPhysics.md
"""
g = h5.create_group(name)
n_particle = len(astra_data['x'])
# Indices of good particles
good = np.where(astra_data['status'] == 5)
#-----------
# Attributes
g.attrs['speciesType'] = fstr(species)
g.attrs['numParticles'] = n_particle
g.attrs['chargeLive'] = abs(np.sum(astra_data['qmacro'][good])) # Make positive
g.attrs['chargeUnitSI'] = 1
#g.attrs['chargeUnitDimension']=(0., 0., 1, 1., 0., 0., 0.) # Amp*s = Coulomb
g.attrs['totalCharge'] = abs(np.sum(astra_data['qmacro']))
#---------
# Datasets
# Position
g['position/x']=astra_data['x'] # in meters
g['position/y']=astra_data['y']
g['position/z']=astra_data['z_rel']
for component in ['position/x', 'position/y', 'position/z', 'position']: # Add units to all components
g[component].attrs['unitSI'] = 1.0
g[component].attrs['unitDimension']=(1., 0., 0., 0., 0., 0., 0.) # m
# positionOffset (Constant record)
# Just z
g2 = g.create_group('positionOffset/z')
g2.attrs['value'] = astra_data['z_ref']
g2.attrs['shape'] = (n_particle)
g2.attrs['unitSI'] = g['position'].attrs['unitSI']
g2.attrs['unitDimension'] = g['position'].attrs['unitDimension']
# momenta
g['momentum/x']=astra_data['px'] # m*c*gamma*beta_x in eV/c
g['momentum/y']=astra_data['py']
g['momentum/z']=astra_data['pz_rel']
for component in ['momentum/x', 'momentum/y', 'momentum/z', 'momentum']:
g[component].attrs['unitSI']= 5.34428594864784788094e-28 # eV/c in J/(m/s) = kg*m / s
g[component].attrs['unitDimension']=(1., 1., -1., 0., 0., 0., 0.) # kg*m / s
# momentumOffset (Constant record)
# Just pz
g2 = g.create_group('momentumOffset/z')
g2.attrs['value'] = astra_data['pz_ref']
g2.attrs['shape'] = (n_particle)
g2.attrs['unitSI'] = g['momentum'].attrs['unitSI']
g2.attrs['unitDimension'] = g['momentum'].attrs['unitDimension']
# Time
g['time'] = astra_data['t_rel']
g['time'].attrs['unitSI'] = 1.0 # s
g['time'].attrs['unitDimension'] = (0., 0., 1., 0., 0., 0., 0.) # s
# Time offset (Constant record)
g2 = g.create_group('timeOffset')
g2.attrs['value'] = astra_data['t_ref']
g2.attrs['shape'] = (n_particle)
g2.attrs['unitSI'] = g['time'].attrs['unitSI']
g2.attrs['unitDimension'] = g['time'].attrs['unitDimension']
# Weights
g['weight'] = astra_data['qmacro']
g['weight'].attrs['unitSI'] = 1.0
g['weight'].attrs['unitDimension']=(0., 0., 1, 1., 0., 0., 0.) # Amp*s = Coulomb
# Status
# The standard defines 1 as a live particle, but astra uses 1 as a 'passive' particle
# and 5 as a 'standard' particle. 2 is not used.
# To preserve this information, make 1->2 and then 5->1
status = astra_data['status'].copy()
where_1 = np.where(status==1)
where_5 = good # was defined above
status[where_1] = 2
status[where_5] = 1
g['particleStatus'] = status
g['particleStatus'].attrs['unitSI'] = 1.0
g['particleStatus'].attrs['unitDimension']=(0., 0., 0, 0., 0., 0., 0.) # Dimensionless
def write_screens_h5(h5, astra_screens, name='screen'):
"""
Write all screens to file, simply named by their index
"""
g = h5.create_group(name)
# Set base attributes
opmd_init(h5, basePath='/'+name+'/%T/', particlesPath='/' )
# Loop over screens
for i in range(len(astra_screens)):
name = str(i)
write_astra_particles_h5(g, name, astra_screens[i])
| import numpy as np
from numbers import Number
import os
def namelist_lines(namelist_dict, name):
"""
Converts namelist dict to output lines, for writing to file.
Only allow scalars or lists.
Do not allow np arrays or any other types from simplicity.
"""
lines = []
lines.append('&'+name)
# parse
for key, value in namelist_dict.items():
#if type(value) == type(1) or type(value) == type(1.): # numbers
if isinstance(value, Number): # numbers
line= key + ' = ' + str(value)
elif type(value) == type([]) or isinstance(value, np.ndarray): # lists or np arrays
liststr = ''
for item in value:
liststr += str(item) + ' '
line = key + ' = ' + liststr
elif type(value) == type('a'): # strings
line = key + ' = ' + "'" + value.strip("''") + "'" # input may need apostrophes
elif bool(value) == value:
line= key + ' = ' + str(value)
else:
#print 'skipped: key, value = ', key, value
raise ValueError(f'Problem writing input key: {key}, value: {value}, type: {type(value)}')
lines.append(line)
lines.append('/')
return lines
def make_namelist_symlinks(namelist, path, prefixes=['file_', 'distribution'], verbose=False):
"""
Looks for keys that start with prefixes.
If the value is a path that exists, a symlink will be made.
Old symlinks will be replaced.
A replacement dict is returned
"""
replacements = {}
for key in namelist:
if any([key.startswith(prefix) for prefix in prefixes]):
src = namelist[key]
if os.path.exists(os.path.join(path, src)) and not os.path.isabs(src):
if verbose:
f'File {src} already in path, skipping.'
continue
if not os.path.exists(src):
if verbose:
print('Path does not exist for symlink:', src)
continue
_, file = os.path.split(src)
dest = os.path.join(path, file)
replacements[key] = file
# Replace old symlinks.
if os.path.islink(dest):
os.unlink(dest)
elif os.path.exists(dest):
if verbose:
print(dest, 'exists, will not symlink')
continue
# Note that the following will raise an error if the dest is an actual file that exists
os.symlink(src, dest)
if verbose:
print('Linked', src, 'to', dest)
return replacements
def write_namelists(namelists, filePath, make_symlinks=False, prefixes=['file_', 'distribution'], verbose=False):
"""
Simple function to write namelist lines to a file
If make_symlinks, prefixes will be searched for paths and the appropriate links will be made.
For Windows, make_symlinks is ignored and it is always False.See note at https://docs.python.org/3/library/os.html#os.symlink .
"""
# With Windows 10, users need Administator Privileges or run on Developer mode
# in order to be able to create symlinks.
# More info: https://docs.python.org/3/library/os.html#os.symlink
if os.name == 'nt':
make_symlinks = False
with open(filePath, 'w') as f:
for key in namelists:
namelist = namelists[key]
if make_symlinks:
# Work on a copy
namelist = namelist.copy()
path, _ = os.path.split(filePath)
replacements = make_namelist_symlinks(namelist, path, prefixes=prefixes, verbose=verbose)
namelist.update(replacements)
lines = namelist_lines(namelist, key)
for l in lines:
f.write(l+'\n')
def fstr(s):
"""
Makes a fixed string for h5 files
"""
return np.string_(s)
def opmd_init(h5, basePath='/screen/%T/', particlesPath='/' ):
"""
Root attribute initialization.
h5 should be the root of the file.
"""
d = {
'basePath':basePath,
'dataType':'openPMD',
'openPMD':'2.0.0',
'openPMDextension':'BeamPhysics;SpeciesType',
'particlesPath':particlesPath
}
for k,v in d.items():
h5.attrs[k] = fstr(v)
def write_astra_particles_h5(h5, name, astra_data, species='electron'):
"""
Write particle data at a screen in openPMD BeamPhysics format
https://github.com/DavidSagan/openPMD-standard/blob/EXT_BeamPhysics/EXT_BeamPhysics.md
"""
g = h5.create_group(name)
n_particle = len(astra_data['x'])
# Indices of good particles
good = np.where(astra_data['status'] == 5)
#-----------
# Attributes
g.attrs['speciesType'] = fstr(species)
g.attrs['numParticles'] = n_particle
g.attrs['chargeLive'] = abs(np.sum(astra_data['qmacro'][good])) # Make positive
g.attrs['chargeUnitSI'] = 1
#g.attrs['chargeUnitDimension']=(0., 0., 1, 1., 0., 0., 0.) # Amp*s = Coulomb
g.attrs['totalCharge'] = abs(np.sum(astra_data['qmacro']))
#---------
# Datasets
# Position
g['position/x']=astra_data['x'] # in meters
g['position/y']=astra_data['y']
g['position/z']=astra_data['z_rel']
for component in ['position/x', 'position/y', 'position/z', 'position']: # Add units to all components
g[component].attrs['unitSI'] = 1.0
g[component].attrs['unitDimension']=(1., 0., 0., 0., 0., 0., 0.) # m
# positionOffset (Constant record)
# Just z
g2 = g.create_group('positionOffset/z')
g2.attrs['value'] = astra_data['z_ref']
g2.attrs['shape'] = (n_particle)
g2.attrs['unitSI'] = g['position'].attrs['unitSI']
g2.attrs['unitDimension'] = g['position'].attrs['unitDimension']
# momenta
g['momentum/x']=astra_data['px'] # m*c*gamma*beta_x in eV/c
g['momentum/y']=astra_data['py']
g['momentum/z']=astra_data['pz_rel']
for component in ['momentum/x', 'momentum/y', 'momentum/z', 'momentum']:
g[component].attrs['unitSI']= 5.34428594864784788094e-28 # eV/c in J/(m/s) = kg*m / s
g[component].attrs['unitDimension']=(1., 1., -1., 0., 0., 0., 0.) # kg*m / s
# momentumOffset (Constant record)
# Just pz
g2 = g.create_group('momentumOffset/z')
g2.attrs['value'] = astra_data['pz_ref']
g2.attrs['shape'] = (n_particle)
g2.attrs['unitSI'] = g['momentum'].attrs['unitSI']
g2.attrs['unitDimension'] = g['momentum'].attrs['unitDimension']
# Time
g['time'] = astra_data['t_rel']
g['time'].attrs['unitSI'] = 1.0 # s
g['time'].attrs['unitDimension'] = (0., 0., 1., 0., 0., 0., 0.) # s
# Time offset (Constant record)
g2 = g.create_group('timeOffset')
g2.attrs['value'] = astra_data['t_ref']
g2.attrs['shape'] = (n_particle)
g2.attrs['unitSI'] = g['time'].attrs['unitSI']
g2.attrs['unitDimension'] = g['time'].attrs['unitDimension']
# Weights
g['weight'] = astra_data['qmacro']
g['weight'].attrs['unitSI'] = 1.0
g['weight'].attrs['unitDimension']=(0., 0., 1, 1., 0., 0., 0.) # Amp*s = Coulomb
# Status
# The standard defines 1 as a live particle, but astra uses 1 as a 'passive' particle
# and 5 as a 'standard' particle. 2 is not used.
# To preserve this information, make 1->2 and then 5->1
status = astra_data['status'].copy()
where_1 = np.where(status==1)
where_5 = good # was defined above
status[where_1] = 2
status[where_5] = 1
g['particleStatus'] = status
g['particleStatus'].attrs['unitSI'] = 1.0
g['particleStatus'].attrs['unitDimension']=(0., 0., 0, 0., 0., 0., 0.) # Dimensionless
def write_screens_h5(h5, astra_screens, name='screen'):
"""
Write all screens to file, simply named by their index
"""
g = h5.create_group(name)
# Set base attributes
opmd_init(h5, basePath='/'+name+'/%T/', particlesPath='/' )
# Loop over screens
for i in range(len(astra_screens)):
name = str(i)
write_astra_particles_h5(g, name, astra_screens[i])
| en | 0.717049 | Converts namelist dict to output lines, for writing to file. Only allow scalars or lists. Do not allow np arrays or any other types from simplicity. # parse #if type(value) == type(1) or type(value) == type(1.): # numbers # numbers # lists or np arrays # strings # input may need apostrophes #print 'skipped: key, value = ', key, value Looks for keys that start with prefixes. If the value is a path that exists, a symlink will be made. Old symlinks will be replaced. A replacement dict is returned # Replace old symlinks. # Note that the following will raise an error if the dest is an actual file that exists Simple function to write namelist lines to a file If make_symlinks, prefixes will be searched for paths and the appropriate links will be made. For Windows, make_symlinks is ignored and it is always False.See note at https://docs.python.org/3/library/os.html#os.symlink . # With Windows 10, users need Administator Privileges or run on Developer mode # in order to be able to create symlinks. # More info: https://docs.python.org/3/library/os.html#os.symlink # Work on a copy Makes a fixed string for h5 files Root attribute initialization. h5 should be the root of the file. Write particle data at a screen in openPMD BeamPhysics format https://github.com/DavidSagan/openPMD-standard/blob/EXT_BeamPhysics/EXT_BeamPhysics.md # Indices of good particles #----------- # Attributes # Make positive #g.attrs['chargeUnitDimension']=(0., 0., 1, 1., 0., 0., 0.) # Amp*s = Coulomb #--------- # Datasets # Position # in meters # Add units to all components # m # positionOffset (Constant record) # Just z # momenta # m*c*gamma*beta_x in eV/c # eV/c in J/(m/s) = kg*m / s # kg*m / s # momentumOffset (Constant record) # Just pz # Time # s # s # Time offset (Constant record) # Weights # Amp*s = Coulomb # Status # The standard defines 1 as a live particle, but astra uses 1 as a 'passive' particle # and 5 as a 'standard' particle. 2 is not used. # To preserve this information, make 1->2 and then 5->1 # was defined above # Dimensionless Write all screens to file, simply named by their index # Set base attributes # Loop over screens | 3.134382 | 3 |
michelanglo_protein/generate/__init__.py | matteoferla/protein-module-for-VENUS | 1 | 6625188 | <reponame>matteoferla/protein-module-for-VENUS
from ._protein_gatherer import ProteinGatherer
from ._proteome_gatherer import ProteomeGatherer
# | from ._protein_gatherer import ProteinGatherer
from ._proteome_gatherer import ProteomeGatherer
# | none | 1 | 1.034849 | 1 | |
ecomm/addresses/forms.py | aruntnp/MYPROJECTS | 0 | 6625189 | <filename>ecomm/addresses/forms.py
from django import forms
from .models import Address
class AddressForm(forms.ModelForm):
class Meta:
model = Address
fields = [
# 'billing_profile', # It should NOT display to user
# 'address_type', #This also come with logic
'address_line_1',
'address_line_2',
'city',
'country',
'state',
'postal_code',
]
| <filename>ecomm/addresses/forms.py
from django import forms
from .models import Address
class AddressForm(forms.ModelForm):
class Meta:
model = Address
fields = [
# 'billing_profile', # It should NOT display to user
# 'address_type', #This also come with logic
'address_line_1',
'address_line_2',
'city',
'country',
'state',
'postal_code',
]
| en | 0.79389 | # 'billing_profile', # It should NOT display to user # 'address_type', #This also come with logic | 2.27596 | 2 |
slack_bolt/middleware/message_listener_matches/async_message_listener_matches.py | Exhorder6/bolt-python | 0 | 6625190 | import re
from typing import Callable, Awaitable, Union, Pattern
from slack_bolt.request.async_request import AsyncBoltRequest
from slack_bolt.response import BoltResponse
from slack_bolt.middleware.async_middleware import AsyncMiddleware
class AsyncMessageListenerMatches(AsyncMiddleware):
def __init__(self, keyword: Union[str, Pattern]):
"""Captures matched keywords and saves the values in context."""
self.keyword = keyword
async def async_process(
self,
*,
req: AsyncBoltRequest,
resp: BoltResponse,
next: Callable[[], Awaitable[BoltResponse]],
) -> BoltResponse:
text = req.body.get("event", {}).get("text", "")
if text:
m = re.findall(self.keyword, text)
if m is not None and m != []:
if type(m[0]) is not tuple:
m = tuple(m)
else:
m = m[0]
req.context["matches"] = m # tuple or list
return await next()
# As the text doesn't match, skip running the listener
return resp
| import re
from typing import Callable, Awaitable, Union, Pattern
from slack_bolt.request.async_request import AsyncBoltRequest
from slack_bolt.response import BoltResponse
from slack_bolt.middleware.async_middleware import AsyncMiddleware
class AsyncMessageListenerMatches(AsyncMiddleware):
def __init__(self, keyword: Union[str, Pattern]):
"""Captures matched keywords and saves the values in context."""
self.keyword = keyword
async def async_process(
self,
*,
req: AsyncBoltRequest,
resp: BoltResponse,
next: Callable[[], Awaitable[BoltResponse]],
) -> BoltResponse:
text = req.body.get("event", {}).get("text", "")
if text:
m = re.findall(self.keyword, text)
if m is not None and m != []:
if type(m[0]) is not tuple:
m = tuple(m)
else:
m = m[0]
req.context["matches"] = m # tuple or list
return await next()
# As the text doesn't match, skip running the listener
return resp
| en | 0.744257 | Captures matched keywords and saves the values in context. # tuple or list # As the text doesn't match, skip running the listener | 2.37782 | 2 |
alipay/aop/api/domain/AlipayUserAgreementAuthApplyModel.py | snowxmas/alipay-sdk-python-all | 213 | 6625191 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAgreementAuthApplyModel(object):
def __init__(self):
self._agreement_no = None
self._auth_confirm_type = None
self._auth_scene = None
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def auth_confirm_type(self):
return self._auth_confirm_type
@auth_confirm_type.setter
def auth_confirm_type(self, value):
self._auth_confirm_type = value
@property
def auth_scene(self):
return self._auth_scene
@auth_scene.setter
def auth_scene(self, value):
self._auth_scene = value
def to_alipay_dict(self):
params = dict()
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.auth_confirm_type:
if hasattr(self.auth_confirm_type, 'to_alipay_dict'):
params['auth_confirm_type'] = self.auth_confirm_type.to_alipay_dict()
else:
params['auth_confirm_type'] = self.auth_confirm_type
if self.auth_scene:
if hasattr(self.auth_scene, 'to_alipay_dict'):
params['auth_scene'] = self.auth_scene.to_alipay_dict()
else:
params['auth_scene'] = self.auth_scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAgreementAuthApplyModel()
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'auth_confirm_type' in d:
o.auth_confirm_type = d['auth_confirm_type']
if 'auth_scene' in d:
o.auth_scene = d['auth_scene']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAgreementAuthApplyModel(object):
def __init__(self):
self._agreement_no = None
self._auth_confirm_type = None
self._auth_scene = None
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def auth_confirm_type(self):
return self._auth_confirm_type
@auth_confirm_type.setter
def auth_confirm_type(self, value):
self._auth_confirm_type = value
@property
def auth_scene(self):
return self._auth_scene
@auth_scene.setter
def auth_scene(self, value):
self._auth_scene = value
def to_alipay_dict(self):
params = dict()
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.auth_confirm_type:
if hasattr(self.auth_confirm_type, 'to_alipay_dict'):
params['auth_confirm_type'] = self.auth_confirm_type.to_alipay_dict()
else:
params['auth_confirm_type'] = self.auth_confirm_type
if self.auth_scene:
if hasattr(self.auth_scene, 'to_alipay_dict'):
params['auth_scene'] = self.auth_scene.to_alipay_dict()
else:
params['auth_scene'] = self.auth_scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAgreementAuthApplyModel()
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'auth_confirm_type' in d:
o.auth_confirm_type = d['auth_confirm_type']
if 'auth_scene' in d:
o.auth_scene = d['auth_scene']
return o
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.040171 | 2 |
makahiki/apps/widgets/home/tests.py | justinslee/Wai-Not-Makahiki | 1 | 6625192 | <reponame>justinslee/Wai-Not-Makahiki
"""
home page tests
"""
import json
import datetime
from django.test import TransactionTestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.challenge_mgr.models import RoundSetting
from apps.managers.player_mgr.models import Profile
from apps.utils import test_utils
from apps.widgets.help.models import HelpTopic
from apps.widgets.smartgrid import SETUP_WIZARD_ACTIVITY
from apps.widgets.smartgrid.models import Activity
class HomeFunctionalTestCase(TransactionTestCase):
"""Home Test Case."""
def testIndex(self):
"""Check that we can load the index."""
test_utils.set_competition_round()
User.objects.create_user("user", "<EMAIL>", password="<PASSWORD>")
self.client.login(username="user", password="<PASSWORD>")
challenge_mgr.register_page_widget("home", "home")
response = self.client.get(reverse("home_index"))
self.failUnlessEqual(response.status_code, 200)
class CompetitionMiddlewareTestCase(TransactionTestCase):
"""competition middleware test."""
def setUp(self):
User.objects.create_user("user", "<EMAIL>", password="<PASSWORD>")
self.client.login(username="user", password="<PASSWORD>")
def testBeforeCompetition(self):
"""
Check that the user is redirected before the competition starts.
"""
start = datetime.datetime.today() + datetime.timedelta(days=1)
end = start + datetime.timedelta(days=7)
RoundSetting.objects.create(name="Round 1", start=start, end=end)
response = self.client.get(reverse("home_index"), follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response,
"widgets/home/templates/restricted.html")
self.assertContains(response, "The competition starts at")
def testAfterCompetition(self):
"""
Check that the user is redirected after the competition ends.
"""
start = datetime.datetime.today() - datetime.timedelta(days=8)
end = start - datetime.timedelta(days=7)
RoundSetting.objects.create(name="Round 1", start=start, end=end)
response = self.client.get(reverse("home_index"), follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response,
"widgets/home/templates/restricted.html")
self.assertContains(response, "The Kukui Cup is now over")
class SetupWizardFunctionalTestCase(TransactionTestCase):
"""setup widzard test cases."""
def setUp(self):
"""setup."""
test_utils.set_competition_round()
self.user = User.objects.create_user("user", "<EMAIL>", password="<PASSWORD>")
# create the term help-topic
HelpTopic.objects.create(title="", slug="terms-and-conditions", category="faq", contents="")
# create the setup activity
Activity.objects.create(slug=SETUP_WIZARD_ACTIVITY, name="", title="", duration=5)
challenge_mgr.register_page_widget("home", "home")
self.client.login(username="user", password="<PASSWORD>")
def testDisplaySetupWizard(self):
"""Check that the setup wizard is shown for new users."""
response = self.client.get(reverse("home_index"))
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, "Welcome to the Kukui Cup")
def testSetupTerms(self):
"""Check that we can access the terms page of the setup wizard."""
response = self.client.get(reverse("setup_terms"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/terms.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testReferralStep(self):
"""
Test that we can record referral emails from the setup page.
"""
user2 = User.objects.create_user("user2", "<EMAIL>")
# Test we can get the referral page.
response = self.client.get(reverse('setup_referral'), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
# Test referring using their own email
response = self.client.post(reverse('setup_referral'), {
'referrer_email': self.user.email,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/referral.html")
self.assertEqual(len(response.context['form'].errors), 1,
"Using their own email as referrer should raise an error.")
# Test referring using the email of a user who is not in the system.
response = self.client.post(reverse('setup_referral'), {
'referrer_email': '<EMAIL>',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/referral.html")
self.assertEqual(len(response.context['form'].errors), 1,
'Using external email as referrer should raise an error.')
# Test bad email.
response = self.client.post(reverse('setup_referral'), {
'referrer_email': 'foo',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1,
'Using a bad email should insert an error.')
self.assertTemplateUsed(response, "first-login/referral.html")
# Staff user should not be able to be referred.
user2.is_staff = True
user2.save()
response = self.client.post(reverse('setup_referral'), {
'referrer_email': user2.email,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1,
'Using an admin as a referrer should raise an error.')
self.assertTemplateUsed(response, "first-login/referral.html")
user2.is_staff = False
user2.save()
# Test no referrer.
response = self.client.post(reverse('setup_referral'), {
'referrer_email': '',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
# Test successful referrer
response = self.client.post(reverse('setup_referral'), {
'referrer_email': user2.email,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
profile = Profile.objects.get(user=self.user)
self.assertEqual(profile.referring_user,
user2,
'User 1 should be referred by user 2.')
# Test getting the referral page now has user2's email.
response = self.client.get(reverse('setup_referral'), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response,
user2.email,
msg_prefix="Going back to referral page should " \
"have second user's email.")
def testSetupProfile(self):
"""Check that we can access the profile page of the setup wizard."""
profile = self.user.get_profile()
profile.name = "Test User"
profile.save()
response = self.client.get(reverse("setup_profile"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, profile.name)
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testSetupProfileUpdate(self):
"""Check that we can update the profile of the user in the setup
wizard."""
profile = self.user.get_profile()
points = profile.points()
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/activity.html")
user = User.objects.get(username="user")
self.assertEqual(points + 5, user.get_profile().points(),
"Check that the user has been awarded points.")
self.assertTrue(user.get_profile().setup_profile,
"Check that the user has now set up their profile.")
# Check that updating again does not award more points.
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>",
}, follow=True)
user = User.objects.get(username="user")
self.assertEqual(points + 5, user.get_profile().points(),
"Check that the user was not awarded any more points.")
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/activity.html")
def testSetupProfileWithoutName(self):
"""Test that there is an error when the user does not supply a
username."""
_ = self.user.get_profile()
response = self.client.post(reverse("setup_profile"), {
"display_name": "",
})
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
def testSetupProfileWithDupName(self):
"""Test that there is an error when the user uses a duplicate display
name."""
_ = self.user.get_profile()
user2 = User.objects.create_user("user2", "<EMAIL>")
profile2 = user2.get_profile()
profile2.name = "<NAME>."
profile2.save()
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>.",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, "Please use another name.",
msg_prefix="Duplicate name should raise an error.")
response = self.client.post(reverse("setup_profile"), {
"display_name": " <NAME>. ",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, "Please use another name.",
msg_prefix="Duplicate name with whitespace should raise an error.")
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>.",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, "Please use another name.",
msg_prefix="Duplicate name with whitespace should raise an error.")
def testSetupActivity(self):
"""Check that we can access the activity page of the setup wizard."""
response = self.client.get(reverse("setup_activity"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/activity.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testSetupQuestion(self):
"""Check that we can access the question page of the setup wizard."""
response = self.client.get(reverse("setup_question"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/question.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testSetupComplete(self):
"""
Check that we can access the complete page of the setup wizard.
"""
# Test a normal GET request (answer was incorrect).
response = self.client.get(reverse("setup_complete"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/complete.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
user = User.objects.get(username="user")
self.assertTrue(user.get_profile().setup_complete,
"Check that the user has completed the profile setup.")
# Test a normal POST request (answer was correct).
profile = user.get_profile()
profile.setup_complete = False
profile.save()
response = self.client.post(reverse("setup_complete"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/complete.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
user = User.objects.get(username="user")
self.assertTrue(user.get_profile().setup_complete,
"Check that the user has completed the profile setup.")
| """
home page tests
"""
import json
import datetime
from django.test import TransactionTestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.challenge_mgr.models import RoundSetting
from apps.managers.player_mgr.models import Profile
from apps.utils import test_utils
from apps.widgets.help.models import HelpTopic
from apps.widgets.smartgrid import SETUP_WIZARD_ACTIVITY
from apps.widgets.smartgrid.models import Activity
class HomeFunctionalTestCase(TransactionTestCase):
"""Home Test Case."""
def testIndex(self):
"""Check that we can load the index."""
test_utils.set_competition_round()
User.objects.create_user("user", "<EMAIL>", password="<PASSWORD>")
self.client.login(username="user", password="<PASSWORD>")
challenge_mgr.register_page_widget("home", "home")
response = self.client.get(reverse("home_index"))
self.failUnlessEqual(response.status_code, 200)
class CompetitionMiddlewareTestCase(TransactionTestCase):
"""competition middleware test."""
def setUp(self):
User.objects.create_user("user", "<EMAIL>", password="<PASSWORD>")
self.client.login(username="user", password="<PASSWORD>")
def testBeforeCompetition(self):
"""
Check that the user is redirected before the competition starts.
"""
start = datetime.datetime.today() + datetime.timedelta(days=1)
end = start + datetime.timedelta(days=7)
RoundSetting.objects.create(name="Round 1", start=start, end=end)
response = self.client.get(reverse("home_index"), follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response,
"widgets/home/templates/restricted.html")
self.assertContains(response, "The competition starts at")
def testAfterCompetition(self):
"""
Check that the user is redirected after the competition ends.
"""
start = datetime.datetime.today() - datetime.timedelta(days=8)
end = start - datetime.timedelta(days=7)
RoundSetting.objects.create(name="Round 1", start=start, end=end)
response = self.client.get(reverse("home_index"), follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response,
"widgets/home/templates/restricted.html")
self.assertContains(response, "The Kukui Cup is now over")
class SetupWizardFunctionalTestCase(TransactionTestCase):
"""setup widzard test cases."""
def setUp(self):
"""setup."""
test_utils.set_competition_round()
self.user = User.objects.create_user("user", "<EMAIL>", password="<PASSWORD>")
# create the term help-topic
HelpTopic.objects.create(title="", slug="terms-and-conditions", category="faq", contents="")
# create the setup activity
Activity.objects.create(slug=SETUP_WIZARD_ACTIVITY, name="", title="", duration=5)
challenge_mgr.register_page_widget("home", "home")
self.client.login(username="user", password="<PASSWORD>")
def testDisplaySetupWizard(self):
"""Check that the setup wizard is shown for new users."""
response = self.client.get(reverse("home_index"))
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, "Welcome to the Kukui Cup")
def testSetupTerms(self):
"""Check that we can access the terms page of the setup wizard."""
response = self.client.get(reverse("setup_terms"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/terms.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testReferralStep(self):
"""
Test that we can record referral emails from the setup page.
"""
user2 = User.objects.create_user("user2", "<EMAIL>")
# Test we can get the referral page.
response = self.client.get(reverse('setup_referral'), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
# Test referring using their own email
response = self.client.post(reverse('setup_referral'), {
'referrer_email': self.user.email,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/referral.html")
self.assertEqual(len(response.context['form'].errors), 1,
"Using their own email as referrer should raise an error.")
# Test referring using the email of a user who is not in the system.
response = self.client.post(reverse('setup_referral'), {
'referrer_email': '<EMAIL>',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/referral.html")
self.assertEqual(len(response.context['form'].errors), 1,
'Using external email as referrer should raise an error.')
# Test bad email.
response = self.client.post(reverse('setup_referral'), {
'referrer_email': 'foo',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1,
'Using a bad email should insert an error.')
self.assertTemplateUsed(response, "first-login/referral.html")
# Staff user should not be able to be referred.
user2.is_staff = True
user2.save()
response = self.client.post(reverse('setup_referral'), {
'referrer_email': user2.email,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1,
'Using an admin as a referrer should raise an error.')
self.assertTemplateUsed(response, "first-login/referral.html")
user2.is_staff = False
user2.save()
# Test no referrer.
response = self.client.post(reverse('setup_referral'), {
'referrer_email': '',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
# Test successful referrer
response = self.client.post(reverse('setup_referral'), {
'referrer_email': user2.email,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
profile = Profile.objects.get(user=self.user)
self.assertEqual(profile.referring_user,
user2,
'User 1 should be referred by user 2.')
# Test getting the referral page now has user2's email.
response = self.client.get(reverse('setup_referral'), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response,
user2.email,
msg_prefix="Going back to referral page should " \
"have second user's email.")
def testSetupProfile(self):
"""Check that we can access the profile page of the setup wizard."""
profile = self.user.get_profile()
profile.name = "Test User"
profile.save()
response = self.client.get(reverse("setup_profile"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, profile.name)
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testSetupProfileUpdate(self):
"""Check that we can update the profile of the user in the setup
wizard."""
profile = self.user.get_profile()
points = profile.points()
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/activity.html")
user = User.objects.get(username="user")
self.assertEqual(points + 5, user.get_profile().points(),
"Check that the user has been awarded points.")
self.assertTrue(user.get_profile().setup_profile,
"Check that the user has now set up their profile.")
# Check that updating again does not award more points.
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>",
}, follow=True)
user = User.objects.get(username="user")
self.assertEqual(points + 5, user.get_profile().points(),
"Check that the user was not awarded any more points.")
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/activity.html")
def testSetupProfileWithoutName(self):
"""Test that there is an error when the user does not supply a
username."""
_ = self.user.get_profile()
response = self.client.post(reverse("setup_profile"), {
"display_name": "",
})
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
def testSetupProfileWithDupName(self):
"""Test that there is an error when the user uses a duplicate display
name."""
_ = self.user.get_profile()
user2 = User.objects.create_user("user2", "<EMAIL>")
profile2 = user2.get_profile()
profile2.name = "<NAME>."
profile2.save()
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>.",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, "Please use another name.",
msg_prefix="Duplicate name should raise an error.")
response = self.client.post(reverse("setup_profile"), {
"display_name": " <NAME>. ",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, "Please use another name.",
msg_prefix="Duplicate name with whitespace should raise an error.")
response = self.client.post(reverse("setup_profile"), {
"display_name": "<NAME>.",
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, "first-login/profile.html")
self.assertContains(response, "Please use another name.",
msg_prefix="Duplicate name with whitespace should raise an error.")
def testSetupActivity(self):
"""Check that we can access the activity page of the setup wizard."""
response = self.client.get(reverse("setup_activity"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/activity.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testSetupQuestion(self):
"""Check that we can access the question page of the setup wizard."""
response = self.client.get(reverse("setup_question"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/question.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
def testSetupComplete(self):
"""
Check that we can access the complete page of the setup wizard.
"""
# Test a normal GET request (answer was incorrect).
response = self.client.get(reverse("setup_complete"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/complete.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
user = User.objects.get(username="user")
self.assertTrue(user.get_profile().setup_complete,
"Check that the user has completed the profile setup.")
# Test a normal POST request (answer was correct).
profile = user.get_profile()
profile.setup_complete = False
profile.save()
response = self.client.post(reverse("setup_complete"), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "first-login/complete.html")
try:
json.loads(response.content)
except ValueError:
self.fail("Response JSON could not be decoded.")
user = User.objects.get(username="user")
self.assertTrue(user.get_profile().setup_complete,
"Check that the user has completed the profile setup.") | en | 0.904426 | home page tests Home Test Case. Check that we can load the index. competition middleware test. Check that the user is redirected before the competition starts. Check that the user is redirected after the competition ends. setup widzard test cases. setup. # create the term help-topic # create the setup activity Check that the setup wizard is shown for new users. Check that we can access the terms page of the setup wizard. Test that we can record referral emails from the setup page. # Test we can get the referral page. # Test referring using their own email # Test referring using the email of a user who is not in the system. # Test bad email. # Staff user should not be able to be referred. # Test no referrer. # Test successful referrer # Test getting the referral page now has user2's email. Check that we can access the profile page of the setup wizard. Check that we can update the profile of the user in the setup wizard. # Check that updating again does not award more points. Test that there is an error when the user does not supply a username. Test that there is an error when the user uses a duplicate display name. Check that we can access the activity page of the setup wizard. Check that we can access the question page of the setup wizard. Check that we can access the complete page of the setup wizard. # Test a normal GET request (answer was incorrect). # Test a normal POST request (answer was correct). | 2.220437 | 2 |
loop-example.py | eltechno/python_course | 4 | 6625193 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 11:03:16 2019
@author: techno
"""
number = 0
result = 0
i = 0
while i < 4:
number = int (input ("Please type a number then i will add :"))
result += number
i += 1
# =============================================================================
# loop while
# =============================================================================
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 11:03:16 2019
@author: techno
"""
number = 0
result = 0
i = 0
while i < 4:
number = int (input ("Please type a number then i will add :"))
result += number
i += 1
# =============================================================================
# loop while
# =============================================================================
| en | 0.473555 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Feb 11 11:03:16 2019 @author: techno # ============================================================================= # loop while # ============================================================================= | 4.126171 | 4 |
djangocms_personlist/admin.py | kohout/djangocms-getaweb-personlist | 0 | 6625194 | from django.contrib import admin
from django.utils.translation import ugettext as _
from mptt.admin import MPTTModelAdmin
from .models import Team, Person, Membership, PersonImage
from easy_thumbnails.exceptions import InvalidImageFormatError
from adminsortable.admin import SortableInlineAdminMixin
class PreviewMixin(object):
def render_preview(self, o):
if not o.image:
return u''
try:
url = o.image['preview'].url
except InvalidImageFormatError:
return u''
if url:
return u'<img src="%s">' % url
else:
return u''
render_preview.allow_tags = True
render_preview.short_description = _(u'Preview')
class PersonImageInline(SortableInlineAdminMixin, admin.TabularInline):
fields = ('render_preview', 'image', 'title', 'alt', 'ordering', )
readonly_fields = ('render_preview', )
model = PersonImage
extra = 0
sortable_field_name = 'ordering'
def render_preview(self, person_image):
url = person_image.image['preview'].url
if url:
return u'<img src="%s">' % url
else:
return u''
render_preview.allow_tags = True
render_preview.short_description = _(u'Preview')
class MembershipInline(SortableInlineAdminMixin, admin.TabularInline):
model = Membership
extra = 0
class PersonAdmin(PreviewMixin, admin.ModelAdmin):
search_fields = ('first_name', 'last_name', 'position', 'get_sites', )
list_display = ('render_preview', 'first_name', 'last_name', 'position', 'get_sites', 'active')
list_display_links = ('render_preview', 'first_name', 'last_name', )
fields = (
('active', ),
('first_name', 'last_name', ),
('alias', 'gender', ),
('position', 'image', ),
('hobbies', ),
('abstract', ),
('phone', 'email', ),
('sites', ),
)
inlines = [MembershipInline, PersonImageInline]
def get_sites(self, obj):
return "\n".join([s.name for s in obj.sites.all()])
class TeamAdmin(PreviewMixin, MPTTModelAdmin):
list_display = ('is_active', 'render_preview', 'name', )
list_display_links = ('render_preview', 'name', )
readonly_fields = ('render_preview', )
inlines = [MembershipInline]
fields = (
('is_active', ),
('name', 'parent', ),
('image', ),
('description', ),
)
admin.site.register(Team, TeamAdmin)
admin.site.register(Person, PersonAdmin)
| from django.contrib import admin
from django.utils.translation import ugettext as _
from mptt.admin import MPTTModelAdmin
from .models import Team, Person, Membership, PersonImage
from easy_thumbnails.exceptions import InvalidImageFormatError
from adminsortable.admin import SortableInlineAdminMixin
class PreviewMixin(object):
def render_preview(self, o):
if not o.image:
return u''
try:
url = o.image['preview'].url
except InvalidImageFormatError:
return u''
if url:
return u'<img src="%s">' % url
else:
return u''
render_preview.allow_tags = True
render_preview.short_description = _(u'Preview')
class PersonImageInline(SortableInlineAdminMixin, admin.TabularInline):
fields = ('render_preview', 'image', 'title', 'alt', 'ordering', )
readonly_fields = ('render_preview', )
model = PersonImage
extra = 0
sortable_field_name = 'ordering'
def render_preview(self, person_image):
url = person_image.image['preview'].url
if url:
return u'<img src="%s">' % url
else:
return u''
render_preview.allow_tags = True
render_preview.short_description = _(u'Preview')
class MembershipInline(SortableInlineAdminMixin, admin.TabularInline):
model = Membership
extra = 0
class PersonAdmin(PreviewMixin, admin.ModelAdmin):
search_fields = ('first_name', 'last_name', 'position', 'get_sites', )
list_display = ('render_preview', 'first_name', 'last_name', 'position', 'get_sites', 'active')
list_display_links = ('render_preview', 'first_name', 'last_name', )
fields = (
('active', ),
('first_name', 'last_name', ),
('alias', 'gender', ),
('position', 'image', ),
('hobbies', ),
('abstract', ),
('phone', 'email', ),
('sites', ),
)
inlines = [MembershipInline, PersonImageInline]
def get_sites(self, obj):
return "\n".join([s.name for s in obj.sites.all()])
class TeamAdmin(PreviewMixin, MPTTModelAdmin):
list_display = ('is_active', 'render_preview', 'name', )
list_display_links = ('render_preview', 'name', )
readonly_fields = ('render_preview', )
inlines = [MembershipInline]
fields = (
('is_active', ),
('name', 'parent', ),
('image', ),
('description', ),
)
admin.site.register(Team, TeamAdmin)
admin.site.register(Person, PersonAdmin)
| none | 1 | 1.929153 | 2 | |
plurkenv.py | chickenzord/plurk-cli | 6 | 6625195 | import os
from plurk_oauth.PlurkAPI import PlurkAPI
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
APP_TOKEN = os.environ.get("APP_TOKEN")
APP_SECRET = os.environ.get("APP_SECRET")
def init(consumer_key = CONSUMER_KEY, consumer_secret = CONSUMER_SECRET, app_token = APP_TOKEN, app_secret = APP_SECRET):
return PlurkAPI(consumer_key, consumer_secret, app_token, app_secret)
| import os
from plurk_oauth.PlurkAPI import PlurkAPI
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
APP_TOKEN = os.environ.get("APP_TOKEN")
APP_SECRET = os.environ.get("APP_SECRET")
def init(consumer_key = CONSUMER_KEY, consumer_secret = CONSUMER_SECRET, app_token = APP_TOKEN, app_secret = APP_SECRET):
return PlurkAPI(consumer_key, consumer_secret, app_token, app_secret)
| none | 1 | 2.111176 | 2 | |
deepstream/app/utils/misc.py | ml6team/deepstream-python | 21 | 6625196 | <reponame>ml6team/deepstream-python<filename>deepstream/app/utils/misc.py
import ctypes
import sys
sys.path.append('/opt/nvidia/deepstream/deepstream/lib')
def long_to_int(long):
value = ctypes.c_int(long & 0xffffffff).value
return value
| import ctypes
import sys
sys.path.append('/opt/nvidia/deepstream/deepstream/lib')
def long_to_int(long):
value = ctypes.c_int(long & 0xffffffff).value
return value | none | 1 | 2.007619 | 2 | |
deploy_nltk.py | wolfsinem/product-tagging | 0 | 6625197 | <filename>deploy_nltk.py
"""
This file is only for the deployment of the tags generator based on the input
text given by the user. We will use the NLTK library for this http://www.nltk.org/howto/
"""
from collections import Counter
from nltk.corpus import stopwords
import nltk
# First we import the tokenize_string function we made in the tags_generator.py
# file and use this to split the given input string into substrings using regular
# expression using RegexpTokenizer. Additionally it counts the occurence of each
# word and returns the top x words which can be used as tags
# The second function we use is the tokenized_list() function.
# This is almost the same as the original one in our tags_generator.py file
# but since we only take in the user text input rather than a CSV file its slightly
# different.
def tokenize_user_text_input(sentence, size_tags):
"""This function splits a string into substrings using a regular expression
using RegexpTokenizer. Additionally it counts the occurence of each word
and returns the top x words which can be used as tags
:param sentence: Text description of a product
:type sentence: string
"""
tokenizer = nltk.RegexpTokenizer(r"\w+")
new_words = tokenizer.tokenize(str(sentence))
new_words = [token.lower() for token in new_words]
stop_words = set(stopwords.words('english'))
filter_tokens = [w for w in new_words if not w in stop_words]
count_terms = Counter(filter_tokens).most_common(size_tags)
count_terms = [item[0] for item in count_terms]
token_lists = []
for i in count_terms:
token_lists.append(i)
token_lists = [item for item in token_lists if not item.isdigit()]
return token_lists
if __name__ == "__main__":
# user_input = """The legend continues to live in the Nike Air Force 1 '07 - Men's, a
# modern version of the iconic AF1, combining classic style and modern
# details. The low design offers optimum soil adhesion and a classic
# look. This version of the Nike Air Force 1 features rippled leather
# edges for a cleaner, slimmer line and more refined details. The
# leather and fabric upper features external layers positioned at
# strategic points for a lifetime durability and support. The
# perforated inserts favor the breathability to keep the foot always
# fresh and dry.")"""
user_input = input("Enter a (product) description here: \n")
print("\n")
N = 10
generator = tokenize_user_text_input(user_input,N)
print("The generated set of tags are: \n")
for tag in generator:
print(tag)
print("\n") | <filename>deploy_nltk.py
"""
This file is only for the deployment of the tags generator based on the input
text given by the user. We will use the NLTK library for this http://www.nltk.org/howto/
"""
from collections import Counter
from nltk.corpus import stopwords
import nltk
# First we import the tokenize_string function we made in the tags_generator.py
# file and use this to split the given input string into substrings using regular
# expression using RegexpTokenizer. Additionally it counts the occurence of each
# word and returns the top x words which can be used as tags
# The second function we use is the tokenized_list() function.
# This is almost the same as the original one in our tags_generator.py file
# but since we only take in the user text input rather than a CSV file its slightly
# different.
def tokenize_user_text_input(sentence, size_tags):
"""This function splits a string into substrings using a regular expression
using RegexpTokenizer. Additionally it counts the occurence of each word
and returns the top x words which can be used as tags
:param sentence: Text description of a product
:type sentence: string
"""
tokenizer = nltk.RegexpTokenizer(r"\w+")
new_words = tokenizer.tokenize(str(sentence))
new_words = [token.lower() for token in new_words]
stop_words = set(stopwords.words('english'))
filter_tokens = [w for w in new_words if not w in stop_words]
count_terms = Counter(filter_tokens).most_common(size_tags)
count_terms = [item[0] for item in count_terms]
token_lists = []
for i in count_terms:
token_lists.append(i)
token_lists = [item for item in token_lists if not item.isdigit()]
return token_lists
if __name__ == "__main__":
# user_input = """The legend continues to live in the Nike Air Force 1 '07 - Men's, a
# modern version of the iconic AF1, combining classic style and modern
# details. The low design offers optimum soil adhesion and a classic
# look. This version of the Nike Air Force 1 features rippled leather
# edges for a cleaner, slimmer line and more refined details. The
# leather and fabric upper features external layers positioned at
# strategic points for a lifetime durability and support. The
# perforated inserts favor the breathability to keep the foot always
# fresh and dry.")"""
user_input = input("Enter a (product) description here: \n")
print("\n")
N = 10
generator = tokenize_user_text_input(user_input,N)
print("The generated set of tags are: \n")
for tag in generator:
print(tag)
print("\n") | en | 0.84611 | This file is only for the deployment of the tags generator based on the input text given by the user. We will use the NLTK library for this http://www.nltk.org/howto/ # First we import the tokenize_string function we made in the tags_generator.py # file and use this to split the given input string into substrings using regular # expression using RegexpTokenizer. Additionally it counts the occurence of each # word and returns the top x words which can be used as tags # The second function we use is the tokenized_list() function. # This is almost the same as the original one in our tags_generator.py file # but since we only take in the user text input rather than a CSV file its slightly # different. This function splits a string into substrings using a regular expression using RegexpTokenizer. Additionally it counts the occurence of each word and returns the top x words which can be used as tags :param sentence: Text description of a product :type sentence: string # user_input = """The legend continues to live in the Nike Air Force 1 '07 - Men's, a # modern version of the iconic AF1, combining classic style and modern # details. The low design offers optimum soil adhesion and a classic # look. This version of the Nike Air Force 1 features rippled leather # edges for a cleaner, slimmer line and more refined details. The # leather and fabric upper features external layers positioned at # strategic points for a lifetime durability and support. The # perforated inserts favor the breathability to keep the foot always # fresh and dry.")""" | 3.635355 | 4 |
bootstrapeg.py | lessen/src | 0 | 6625198 | from eg import eg
from random import random as r
from bootstrap import bootstrap as bst
from time import process_time as now
import random
def base0(n):
return [r() for _ in range(n)]
@eg
def _b0(n=30,div=100, boo=bst,same=None,f=base0):
print(boo.__name__)
base = f(n)
same0 = None
t0 = None
other=None
for conf in [90,95,99]:
print("")
for b in [32,64,128,256,512,1024]:
report=[]
t=0
for n in range(0,10,2):
other = [x+ (r()*n/div) for x in base]
t1 = now()
same = boo(base, other,b=b,conf=conf)
same0 = same if same0 == None else same0
t2 = now()
t += t2 - t1
report += ["=" if same else "."]
t0 = t if t0 == None else t0
print(''.join(report),dict(conf=conf,b=b,time=round(t/t0,2)))
print("list first:",[round(x,2) for x in sorted(base)[::2]])
print("list last:",[round(x,2) for x in sorted(other)[::2]])
assert same0,"first must be the same"
assert not same,"last should be different"
@eg
def _b1():
for k in [0.5,1,2,4]:
print("")
print(dict(shape=k))
f = lambda n: [random.weibullvariate(1,k)
for _ in range(n)]
_b0(f=f)
if __name__ == "__main__": eg()
| from eg import eg
from random import random as r
from bootstrap import bootstrap as bst
from time import process_time as now
import random
def base0(n):
return [r() for _ in range(n)]
@eg
def _b0(n=30,div=100, boo=bst,same=None,f=base0):
print(boo.__name__)
base = f(n)
same0 = None
t0 = None
other=None
for conf in [90,95,99]:
print("")
for b in [32,64,128,256,512,1024]:
report=[]
t=0
for n in range(0,10,2):
other = [x+ (r()*n/div) for x in base]
t1 = now()
same = boo(base, other,b=b,conf=conf)
same0 = same if same0 == None else same0
t2 = now()
t += t2 - t1
report += ["=" if same else "."]
t0 = t if t0 == None else t0
print(''.join(report),dict(conf=conf,b=b,time=round(t/t0,2)))
print("list first:",[round(x,2) for x in sorted(base)[::2]])
print("list last:",[round(x,2) for x in sorted(other)[::2]])
assert same0,"first must be the same"
assert not same,"last should be different"
@eg
def _b1():
for k in [0.5,1,2,4]:
print("")
print(dict(shape=k))
f = lambda n: [random.weibullvariate(1,k)
for _ in range(n)]
_b0(f=f)
if __name__ == "__main__": eg()
| none | 1 | 2.517369 | 3 | |
net/migrations/0012_auto_20170701_1535.py | dehu4ka/lna | 0 | 6625199 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-01 10:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('net', '0011_credentials_equipment'),
]
operations = [
migrations.AlterField(
model_name='equipment',
name='ne_ip',
field=models.GenericIPAddressField(db_index=True, protocol='IPv4', unique=True),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-01 10:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('net', '0011_credentials_equipment'),
]
operations = [
migrations.AlterField(
model_name='equipment',
name='ne_ip',
field=models.GenericIPAddressField(db_index=True, protocol='IPv4', unique=True),
),
]
| en | 0.727278 | # -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-07-01 10:35 | 1.533723 | 2 |
fragbuilder/bio_pdb/Model.py | larsbratholm/fragbuilder | 0 | 6625200 | <filename>fragbuilder/bio_pdb/Model.py
# Copyright (C) 2002, <NAME> (<EMAIL>)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Model class, used in Structure objects."""
from .Entity import Entity
class Model(Entity):
"""
The object representing a model in a structure. In a structure
derived from an X-ray crystallography experiment, only a single
model will be present (with some exceptions). NMR structures
normally contain many different models.
"""
def __init__(self, id, serial_num = None):
"""
Arguments:
o id - int
o serial_num - int
"""
self.level="M"
if serial_num is None:
self.serial_num=id
else:
self.serial_num=serial_num
Entity.__init__(self, id)
# Private methods
def _sort(self, c1, c2):
"""Sort the Chains instances in the Model instance.
Chain instances are sorted alphabetically according to
their chain id. Blank chains come last, as they often consist
of waters.
Arguments:
o c1, c2 - Chain objects
"""
id1=c1.get_id()
id2= c2.get_id()
# make sure blank chains come last (often waters)
if id1==" " and not id2==" ":
return 1
elif id2==" " and not id1==" ":
return -1
return cmp(id1, id2)
# Special methods
def __repr__(self):
return "<Model id=%s>" % self.get_id()
# Public
def get_residues(self):
for c in self:
for r in c:
yield r
def get_atoms(self):
for r in self.get_residues():
for a in r:
yield a
| <filename>fragbuilder/bio_pdb/Model.py
# Copyright (C) 2002, <NAME> (<EMAIL>)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Model class, used in Structure objects."""
from .Entity import Entity
class Model(Entity):
"""
The object representing a model in a structure. In a structure
derived from an X-ray crystallography experiment, only a single
model will be present (with some exceptions). NMR structures
normally contain many different models.
"""
def __init__(self, id, serial_num = None):
"""
Arguments:
o id - int
o serial_num - int
"""
self.level="M"
if serial_num is None:
self.serial_num=id
else:
self.serial_num=serial_num
Entity.__init__(self, id)
# Private methods
def _sort(self, c1, c2):
"""Sort the Chains instances in the Model instance.
Chain instances are sorted alphabetically according to
their chain id. Blank chains come last, as they often consist
of waters.
Arguments:
o c1, c2 - Chain objects
"""
id1=c1.get_id()
id2= c2.get_id()
# make sure blank chains come last (often waters)
if id1==" " and not id2==" ":
return 1
elif id2==" " and not id1==" ":
return -1
return cmp(id1, id2)
# Special methods
def __repr__(self):
return "<Model id=%s>" % self.get_id()
# Public
def get_residues(self):
for c in self:
for r in c:
yield r
def get_atoms(self):
for r in self.get_residues():
for a in r:
yield a
| en | 0.93612 | # Copyright (C) 2002, <NAME> (<EMAIL>) # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. Model class, used in Structure objects. The object representing a model in a structure. In a structure derived from an X-ray crystallography experiment, only a single model will be present (with some exceptions). NMR structures normally contain many different models. Arguments: o id - int o serial_num - int # Private methods Sort the Chains instances in the Model instance. Chain instances are sorted alphabetically according to their chain id. Blank chains come last, as they often consist of waters. Arguments: o c1, c2 - Chain objects # make sure blank chains come last (often waters) # Special methods # Public | 3.035362 | 3 |
src/BehaviorTaskMaster/emotionTasks/emotionStim/emotionstimtask.py | FongAnthonyM/BehaviorTaskMaster | 0 | 6625201 | <reponame>FongAnthonyM/BehaviorTaskMaster
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" emotionstimtask.py
Description:
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__credits__ = ["<NAME>"]
__license__ = ""
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = ""
__status__ = "Prototype"
# Default Libraries #
import sys
import pathlib
import copy
import datetime
# Downloaded Libraries #
from PySide2 import QtGui, QtWidgets, QtMultimedia
from PySide2.QtCore import QDir
from PySide2.QtGui import QKeySequence
from PySide2.QtWidgets import QWidget, QAction, QFileDialog, QAbstractItemView, QStyle
# Local Libraries #
from ...utility.iotriggers import AudioTrigger
from ...utility.eventlogger import SubjectEventLogger
from ...QtUtility.utilitywidgets import WidgetContainer, WidgetContainerSequencer
from ...QtUtility.taskwidgets import TaskWindow
from ..emotionwidgets import EmotionInstructions, EmotionWashout, EmotionFinish, EmotionVideoPlayer, EmotionQuestionnaireImage
from ..emotionCategorization.UI.emotionparameters import Ui_EmotionParameters
from ..UI.emotioncontrol import Ui_EmotionControl
# Definitions #
# Constants #
START_DIR = ""
# Classes #
class EmotionStimTask:
EXPERIMENT_NAME = "Emotion Stimulation Control"
def __init__(self, parent=None, stack=None, r_widget=None):
self.parent = parent
self.widget_stack = stack
self.return_widget = r_widget
self.trigger = AudioTrigger()
self.trigger.audio_device.device = 3
self.trigger.add_square_wave('square_wave', amplitude=5, samples=22000)
self.trigger.current_waveform = 'square_wave'
self.task_window = TaskWindow()
self.events = SubjectEventLogger(io_trigger=self.trigger)
self.sequencer = WidgetContainerSequencer()
self.task_window.sequencer = self.sequencer
self.parameters = EmotionParameters()
self.control = EmotionControl(events=self.events, x_name=self.EXPERIMENT_NAME)
self.instructions = EmotionInstructions(path=pathlib.Path(__file__).parent.joinpath('instructions.txt'),
events=self.events)
self.video_player = EmotionVideoPlayer(events=self.events)
self.questionnaire = EmotionQuestionnaireImage(events=self.events)
self.washout = EmotionWashout(events=self.events)
self.finished = EmotionFinish(events=self.events)
self.block_widgets = {'instructions': self.instructions, 'video_player': self.video_player,
'questionnaire': self.questionnaire, 'washout': self.washout, 'finish': self.finished}
self.sequence_order = ['instructions', '*block*', 'washout', 'finish']
self.block_order = ['questionnaire']
def load_task(self, stack=None):
if stack is not None:
self.widget_stack = stack
if self.return_widget is None:
_, self.return_widget, _ = self.widget_stack.current()
self.widget_stack.load(self.parameters)
self.widget_stack.load(self.control)
self.task_window.load(self.instructions)
self.task_window.load(self.washout)
self.task_window.load(self.video_player)
self.task_window.load(self.questionnaire)
self.task_window.load(self.finished)
self.control.task_window = self.task_window
self.control.sequencer = self.sequencer
self.control.sequence_order = self.sequence_order
self.control.parameters = self.parameters.parameters
self.control.block_widgets = self.block_widgets
self.control.player = self.video_player
def unload_task(self, back=True, clear_widget=False):
if back:
self.widget_stack.set(self.return_widget)
self.widget_stack.unload(self.parameters, back=False, clear_widget=clear_widget)
self.widget_stack.unload(self.control, back=False, clear_widget=clear_widget)
self.task_window.close()
self.task_window.unload(self.instructions, back=False, clear_widget=clear_widget)
self.task_window.unload(self.washout, back=False, clear_widget=clear_widget)
self.task_window.unload(self.video_player, back=False, clear_widget=clear_widget)
self.task_window.unload(self.questionnaire, back=False, clear_widget=clear_widget)
def setup_task(self):
self.parameters.run(self.control_task, self.unload_task)
def control_task(self):
self.control.run(self.parameters.run)
class EmotionParameters(WidgetContainer):
def __init__(self, name="EmotionParameters", init=False):
WidgetContainer.__init__(self, name, init)
self.ok_action = None
self.back_action = self.remove_from_stack
self._parameters = None
@property
def parameters(self):
try:
out = self.widget.parameters
self._parameters = out
except:
out = self._parameters
return out
@property
def loops(self):
return self.widget.loops
@property
def randomize(self):
return self.widget.randomize
def construct_widget(self):
self.widget = ParametersWidget()
def run(self, ok_action=None, back_action=None):
if ok_action is not None:
self.ok_action = ok_action
if back_action is not None:
self.back_action = back_action
self.widget.ok_action = self.ok_action
self.widget.back_action = self.back_action
super().run()
class ParametersWidget(QWidget):
header = ('Video', 'Questions', 'Video Path', 'Question Path')
v_types = ('*.avi', '*.mp4', '*.ogg', '*.qt', '*.wmv', '*.yuv')
q_types = ('*.toml',)
def __init__(self):
super(ParametersWidget, self).__init__()
self.ok_action = self.default_ok
self.back_action = self.default_back
self._parameters = {}
self.subject = []
self.session = []
self.blocks = []
self.ui = Ui_EmotionParameters()
self.ui.setupUi(self)
self.list_model = None
self._construct_video_list()
self.deleteAction = None
self._construct_deleteAction()
self.okAction = None
self._construct_okAction()
self._construct_backAction()
@property
def parameters(self):
self._parameters['subject'] = self.subject
self._parameters['session'] = self.session
self._parameters['blocks'] = self.blocks
return self._parameters
@property
def static_parameters(self):
self._parameters['blocks'] = self.blocks
self._parameters['loops'] = self.loops
self._parameters['randomize'] = self.randomize
return copy.deepcopy(self._parameters)
def _construct_video_list(self):
self.list_model = QtGui.QStandardItemModel(0, 4)
self.list_model.setHorizontalHeaderLabels(self.header)
self.ui.videoList.setModel(self.list_model)
self.ui.videoList.setDragDropMode(QAbstractItemView.InternalMove)
self.ui.videoList.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.videoList.setColumnWidth(0, 200)
self.ui.videoList.setColumnWidth(1, 200)
self.ui.videoList.setColumnWidth(2, 100)
self.ui.videoList.setColumnWidth(3, 100)
self.ui.videoList.doubleClicked.connect(self.double_click)
self.ui.addVideoButton.clicked.connect(self.add_videos)
self.ui.addQuestionsButton.clicked.connect(self.add_questions)
self.ui.videoDirectory.clicked.connect(self.video_directory)
self.ui.questionDirectory.clicked.connect(self.question_directory)
self.ui.deleteLastButton.clicked.connect(self.delete_last)
self.ui.clearAll.clicked.connect(self.clear_all)
def _construct_deleteAction(self):
self.deleteAction = QAction("delete", self)
self.deleteAction.setShortcut(QKeySequence.Delete)
self.deleteAction.triggered.connect(self.delete_key)
self.addAction(self.deleteAction)
def _construct_okAction(self):
self.okAction = QAction("OK", self)
self.okAction.setShortcut(QKeySequence("Shift+Return"))
self.okAction.triggered.connect(self.ok_action)
self.addAction(self.okAction)
self.ui.okButton.clicked.connect(self.ok)
def _construct_backAction(self):
self.ui.backButton.clicked.connect(self.back)
def double_click(self, index):
if index.column() in (0, 2):
self.change_video(index.row())
elif index.column() in (1, 3):
self.change_question(index.row())
def delete_key(self):
fw = self.focusWidget()
if fw is self.ui.videoList:
self.delete_video()
def find_last_row(self, item=''):
end = self.list_model.rowCount()
index = -1
for i in reversed(range(0, end)):
video = self.list_model.item(i, 0).text()
question = self.list_model.item(i, 1).text()
if item == 'video':
text = video
elif item == 'question':
text = question
elif item == 'video&question':
text = video + question
else:
break
if text == '':
index = i
else:
break
return index
def add_item(self, video='', question='', index=-1):
# Make Row Objects
video_name = QtGui.QStandardItem(pathlib.Path(video).name)
questions_name = QtGui.QStandardItem(pathlib.Path(question).name)
videos = QtGui.QStandardItem(video)
questions = QtGui.QStandardItem(question)
# Row Settings
video_name.setEditable(False)
video_name.setDragEnabled(True)
video_name.setDropEnabled(False)
questions_name.setEditable(False)
questions_name.setDropEnabled(False)
videos.setEditable(False)
videos.setDropEnabled(False)
questions.setEditable(False)
if index == -1:
index = self.list_model.rowCount()
self.list_model.appendRow(video_name)
else:
self.list_model.insertRow(index, video_name)
self.list_model.setItem(index, 1, questions_name)
self.list_model.setItem(index, 2, videos)
self.list_model.setItem(index, 3, questions)
def edit_item(self, index=None, video='', question=''):
if index is None:
item = ''
if video != '' and question != '':
item = 'video&question'
elif video != '':
item = 'video'
elif question != '':
item = 'question'
index = self.find_last_row(item=item)
videos_name = self.list_model.item(index, 0)
questions_name = self.list_model.item(index, 1)
videos = self.list_model.item(index, 2)
questions = self.list_model.item(index, 3)
if video != '':
videos_name.setText(pathlib.Path(video).name)
videos.setText(video)
if question != '':
questions_name.setText(pathlib.Path(question).name)
questions.setText(question)
def change_video(self, row):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Video", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
video_name = self.list_model.item(row, 0)
videos = self.list_model.item(row, 2)
v = dialog.selectedFiles()[0]
video_name.setText(pathlib.Path(v).name)
videos.setText(v)
def change_question(self, row):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Question", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
questions_name = self.list_model.item(row, 1)
questions = self.list_model.item(row, 3)
q = dialog.selectedFiles()[0]
questions_name.setText(pathlib.Path(q).name)
questions.setText(q)
def add_videos(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Video", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
video_names = dialog.selectedFiles()
for video in video_names:
last = self.find_last_row('video')
if last == -1:
self.add_item(video=video)
else:
self.edit_item(index=last, video=video)
def add_questions(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Questions", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
question_names = dialog.selectedFiles()
for question in question_names:
last = self.find_last_row('question')
if last == -1:
self.add_item(question=question)
else:
self.edit_item(index=last, question=question)
def video_directory(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Video Directory", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.Directory)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
dir_names = dialog.selectedFiles()
dir_path = pathlib.Path(dir_names[0])
files = []
for ext in self.v_types:
files.extend(dir_path.glob(ext))
for video in files:
last = self.find_last_row('video')
if last == -1:
self.add_item(video=str(video))
else:
self.edit_item(index=last, video=str(video))
def question_directory(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Questions Directory", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.Directory)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
dir_names = dialog.selectedFiles()
dir_path = pathlib.Path(dir_names[0])
files = []
if len(self.q_types) < 1 or '*' in self.q_types:
files = dir_path.iterdir()
else:
for ext in self.q_types:
files.extend(dir_path.glob(ext))
for question in files:
last = self.find_last_row('question')
if last == -1:
self.add_item(question=str(question))
else:
self.edit_item(index=last, question=str(question))
def delete_last(self):
last = self.list_model.rowCount() - 1
self.list_model.removeRow(last)
def delete_video(self):
items = self.ui.videoList.selectedIndexes()
indices = []
for i in items:
indices.append(i.row())
indices.sort(reverse=True)
for i in indices:
self.list_model.removeRow(i)
def clear_all(self):
self.list_model.clear()
self.list_model.setHorizontalHeaderLabels(self.header)
self.ui.videoList.setColumnWidth(0, 200)
self.ui.videoList.setColumnWidth(1, 200)
self.ui.videoList.setColumnWidth(2, 100)
self.ui.videoList.setColumnWidth(3, 100)
def evaluate(self):
self.subject.clear()
self.session.clear()
self.blocks.clear()
self.subject.append(self.ui.subjectIDEdit.text())
self.session.append(self.ui.blockEdit.text())
for i in range(0, self.list_model.rowCount()):
video = pathlib.Path(self.list_model.item(i, 2).text())
question = pathlib.Path(self.list_model.item(i, 3).text())
washout = self.ui.washoutBox.value()
self.blocks.append({'video': video, 'questions': question, 'washout': washout})
def ok(self):
self.evaluate()
self.ok_action()
def default_ok(self):
print("Not Connected")
def back(self):
self.back_action()
def default_back(self):
sys.exit()
class EmotionControl(WidgetContainer):
def __init__(self, name="EmotionControl", x_name="", events=None, init=False):
WidgetContainer.__init__(self, name, init)
self.back_action = self.remove_from_stack
self.experiment_name = x_name
self._events = events
@property
def task_window(self):
return self.widget.task_window
@task_window.setter
def task_window(self, value):
self.widget.task_window = value
@property
def sequencer(self):
return self.widget.sequencer
@sequencer.setter
def sequencer(self, value):
self.widget.sequencer = value
@property
def block_widgets(self):
return self.widget.block_widgets
@block_widgets.setter
def block_widgets(self, value):
self.widget.block_widgets = value
@property
def sequence_order(self):
return self.widget.sequence_order
@sequence_order.setter
def sequence_order(self, value):
self.widget.sequence_order = value
@property
def player(self):
return self.widget.player
@player.setter
def player(self, value):
self.widget.player = value
@property
def parameters(self):
return self.widget.paremeters
@parameters.setter
def parameters(self, value):
self.widget.parameters = value
@property
def events(self):
try:
out = self.widget.events
except AttributeError:
out = self._events
return out
@events.setter
def events(self, value):
self._events = value
if self.widget is not None:
self.widget.events = value
def construct_widget(self):
self.widget = ControlWidget()
self.widget.events = self._events
self.widget.experiment_name = self.experiment_name
def run(self, back_action=None):
if back_action is not None:
self.back_action = back_action
self.widget.back_action = self.back_action
self.widget.construct()
self.widget.construct_blocks()
super().run()
class ControlWidget(QWidget):
header = ('Video', 'Questions', 'Washout', '')
def __init__(self, player=None, init=False, **kwargs):
super().__init__(**kwargs)
self.back_action = self.default_back
self.start_action = self.default_start
self.ui = Ui_EmotionControl()
self.ui.setupUi(self)
self.play_icon = self.style().standardIcon(QStyle.SP_MediaPlay)
self.pause_icon = self.style().standardIcon(QStyle.SP_MediaPause)
self.stop_icon = self.style().standardIcon(QStyle.SP_MediaStop)
self.skip_icon = self.style().standardIcon(QStyle.SP_MediaSkipForward)
self.volume_icon = self.style().standardIcon(QStyle.SP_MediaVolume)
self.mute_icon = self.style().standardIcon(QStyle.SP_MediaVolumeMuted)
self._path = None
self.subject = None
self.session = None
self.experiment_name = None
self.events = None
self.m_duration = 0
self.mute = False
self.task_window = None
self.sequencer = None
self._player = None
self.media_player = None
self.player = player
self.parameters = None
self.block_widgets = None
self.block_sequence = -1
self.sequence_order = []
self.running = False
self.blocks = None
if init:
self.construct()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
if isinstance(value, pathlib.Path) or value is None:
self._path = value
else:
self._path = pathlib.Path(value)
@property
def player(self):
return self._player
@player.setter
def player(self, value):
self._player = value
if value is not None:
self.media_player = value.media_player
def construct(self):
self.subject = self.parameters['subject'][0]
self.session = self.parameters['session'][0]
self._construct_startAction()
self._construct_backAction()
self._construct_showAction()
self._construct_fullScreenAction()
self._construct_player_controls()
self._construct_volume_controls()
self.update_buttons(self.media_player.state())
def construct_path(self):
now = datetime.datetime.now().isoformat('_', 'seconds').replace(':', '~')
file_name = self.parameters['subject'][0] + '_' + self.parameters['session'][0] + '_' + now + '.h5'
return pathlib.Path(__file__).parent.joinpath(file_name)
def construct_blocks(self):
self.blocks = self.parameters['blocks']
self._construct_queue()
self.playing_model = QtGui.QStandardItemModel(0, 4)
self.playing_model.setHorizontalHeaderLabels(self.header)
self.ui.playingBlock.setModel(self.playing_model)
self.ui.playingBlock.setColumnWidth(2, 75)
self.ui.playingBlock.setColumnWidth(3, 25)
self.complete_model = QtGui.QStandardItemModel(0, 4)
self.complete_model.setHorizontalHeaderLabels(self.header)
self.ui.completedBlocks.setModel(self.complete_model)
# self.ui.completedBlocks.setDragDropMode(QAbstractItemView.InternalMove)
# self.ui.completedBlocks.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.completedBlocks.setColumnWidth(2, 75)
self.ui.completedBlocks.setColumnWidth(3, 25)
def _construct_queue(self):
self.queue_model = QtGui.QStandardItemModel(0, 4)
self.queue_model.setHorizontalHeaderLabels(self.header)
self.ui.quequedBlocks.setModel(self.queue_model)
# self.ui.quequedBlocks.setDragDropMode(QAbstractItemView.InternalMove)
# self.ui.quequedBlocks.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.quequedBlocks.setColumnWidth(2, 75)
self.ui.quequedBlocks.setColumnWidth(3, 25)
for i, block in enumerate(self.blocks):
self.add_item(self.queue_model, _id=i, video=block['video'], question=block['questions'],
washout=block['washout'])
@staticmethod
def add_item(model, _id=0, video=pathlib.Path, question=pathlib.Path, washout=0, index=-1):
# Make Row Objects
id_number = QtGui.QStandardItem(str(_id))
video_name = QtGui.QStandardItem(video.name)
questions_name = QtGui.QStandardItem(question.name)
washout_name = QtGui.QStandardItem(str(washout) + "s")
# Row Settings
video_name.setEditable(False)
video_name.setDragEnabled(True)
video_name.setDropEnabled(False)
questions_name.setEditable(False)
questions_name.setDropEnabled(False)
washout_name.setEditable(False)
washout_name.setDropEnabled(False)
id_number.setEnabled(False)
id_number.setDropEnabled(False)
if index == -1:
index = model.rowCount()
model.appendRow(video_name)
else:
model.insertRow(index, video_name)
model.setItem(index, 1, questions_name)
model.setItem(index, 2, washout_name)
model.setItem(index, 3, id_number)
def _construct_startAction(self):
self.ui.startButton.clicked.connect(self.start)
def _construct_backAction(self):
self.ui.backButton.clicked.connect(self.back)
def _construct_showAction(self):
self.ui.showButton.clicked.connect(self.task_window.show)
def _construct_fullScreenAction(self):
self.ui.fullscreenButton.clicked.connect(self.task_window.fullscreen_action)
def _construct_player_controls(self):
self.media_player.durationChanged.connect(self.duration_change)
self.media_player.positionChanged.connect(self.position_change)
self.media_player.stateChanged.connect(self.update_buttons)
self.ui.playButton.setIcon(self.play_icon)
self.ui.stopButton.setIcon(self.stop_icon)
self.ui.stopButton.clicked.connect(self.media_player.stop)
self.ui.skipButton.setIcon(self.skip_icon)
self.ui.skipButton.clicked.connect(self.skip_action)
def _construct_volume_controls(self):
self.media_player.stateChanged.connect(self.update_buttons)
self.ui.muteButton.setIcon(self.volume_icon)
self.ui.muteButton.clicked.connect(self.mute_action)
self.mute = False
self.ui.volumeSlider.setValue(self.media_player.volume())
self.ui.volumeSlider.valueChanged.connect(self.media_player.setVolume)
def update_buttons(self, state):
self.ui.stopButton.setEnabled(state != QtMultimedia.QMediaPlayer.StoppedState)
if state == QtMultimedia.QMediaPlayer.PlayingState:
self.ui.playButton.clicked.connect(self.media_player.pause)
self.ui.playButton.setIcon(self.pause_icon)
elif state != QtMultimedia.QMediaPlayer.PlayingState:
self.ui.playButton.clicked.connect(self.media_player.play)
self.ui.playButton.setIcon(self.play_icon)
def duration_change(self, dur):
self.m_duration = dur / 1000
self.ui.durationSlider.setMaximum(self.m_duration)
def position_change(self, progress):
if not self.ui.durationSlider.isSliderDown():
self.ui.durationSlider.setValue(progress / 1000)
self.set_duration_label(progress / 1000)
def set_duration_label(self, progress):
pos = str(int(progress // 60)) + ':' + str(progress % 60)
total_dur = str(int(self.m_duration // 60)) + ':' + str(self.m_duration % 60)
self.ui.durationLabel.setText(pos + ' / ' + total_dur)
def mute_action(self):
if self.mute:
self.mute = False
self.ui.muteButton.setIcon(self.volume_icon)
else:
self.mute = True
self.ui.muteButton.setIcon(self.mute_icon)
self.media_player.setMuted(self.mute)
def skip_action(self):
self.media_player.stop()
video = self.block_widgets['video_player'].video
if isinstance(video, pathlib.Path):
video = video.name
event = {'type_': 'Skip', 'Video': video}
while self.sequencer.next_index() != 0:
self.sequencer.skip()
self.advance_block(event=event)
def start_sequence(self):
self.sequencer.clear()
block_sequence = self.sequence_order.index('*block*')
sequence_order = self.sequence_order[:block_sequence]
if len(sequence_order) > 1:
first = sequence_order.pop(0)
self.sequencer.insert(self.block_widgets[first], ok_action=self.advance, back_action=self.task_window.hide)
last = sequence_order.pop()
for item in sequence_order:
self.sequencer.insert(self.block_widgets[item], ok_action=self.advance)
self.sequencer.insert(self.block_widgets[last], ok_action=self.advance_block)
def end_sequence(self):
block = self.blocks[-1]
block_sequence = self.sequence_order.index('*block*')
sequence_order = self.sequence_order[block_sequence + 1:]
self.sequencer.insert(self.block_widgets['washout'], milliseconds=block['washout'] * 1000,
timer_action=self.advance)
self.sequencer.insert(self.block_widgets['finish'])
def next_queue(self):
if self.playing_model.rowCount() > 0:
self.complete_model.clear()
self.queue_model.clear()
self.playing_model.clear()
self.complete_model.setHorizontalHeaderLabels(self.header)
self.queue_model.setHorizontalHeaderLabels(self.header)
self.playing_model.setHorizontalHeaderLabels(self.header)
for i, block in enumerate(self.blocks):
self.add_item(self.queue_model, _id=i, video=block['video'], question=block['questions'],
washout=block['washout'])
self.playing_model.clear()
self.playing_model.setHorizontalHeaderLabels(self.header)
if self.queue_model.rowCount() > 0:
play_index = int(self.queue_model.item(0, 3).text())
block = self.blocks[play_index]
self.add_item(self.playing_model, _id=play_index, video=block['video'], question=block['questions'],
washout=block['washout'])
self.queue_model.removeRow(0)
flag = True
else:
flag = False
return flag
def next_block(self):
play_index = int(self.playing_model.item(0, 3).text())
block = self.blocks[play_index]
self.sequencer.insert(self.block_widgets['questionnaire'], path=block['questions'],
finish_action=self.advance_block)
def advance(self, event=None, caller=None):
self.events.append(**event)
next(self.sequencer)
def advance_trigger(self, event=None, caller=None):
event = {'SubType': 'VideoEnd'}
self.events.trigger_event(**event)
next(self.sequencer)
def advance_block(self, event=None, caller=None):
more_blocks = self.next_queue()
if more_blocks:
self.next_block()
else:
self.end_sequence()
self.advance(event=event, caller=caller)
def start(self):
if self.running:
self.running_action(caller=self)
else:
self.running = True
self.start_action(caller=self)
def default_start(self, caller=None):
self.events.path = self.construct_path()
self.events.construct()
self.events.Subject = self.subject
self.events.Task = self.experiment_name
self.events.Block = self.session
self.events.open()
self.events.set_time()
self.start_sequence()
self.ui.startButton.setEnabled(False)
self.ui.backButton.setText(QtWidgets.QApplication.translate("EmotionControl", 'Stop', None, -1))
self.sequencer.start()
self.task_window.show()
def running_action(self, caller=None):
pass
def back(self):
if self.running:
self.stop()
else:
self.back_action()
def default_back(self, caller=None):
sys.exit()
def stop(self):
if self.running:
self.media_player.stop()
self.sequencer.clear()
event = {'type_': 'ManualStop'}
self.events.append(**event)
self.running = False
self.reset()
self.ui.startButton.setEnabled(True)
self.ui.backButton.setText(QtWidgets.QApplication.translate("EmotionControl", 'Back', None, -1))
def reset(self):
if not self.running:
self.events.clear()
self.sequencer.clear()
self.complete_model.clear()
self.queue_model.clear()
self.playing_model.clear()
self.complete_model.setHorizontalHeaderLabels(self.header)
self.queue_model.setHorizontalHeaderLabels(self.header)
self.playing_model.setHorizontalHeaderLabels(self.header)
for i, block in enumerate(self.blocks):
self.add_item(self.queue_model, _id=i, video=block['video'], question=block['questions'],
washout=block['washout'])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" emotionstimtask.py
Description:
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__credits__ = ["<NAME>"]
__license__ = ""
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = ""
__status__ = "Prototype"
# Default Libraries #
import sys
import pathlib
import copy
import datetime
# Downloaded Libraries #
from PySide2 import QtGui, QtWidgets, QtMultimedia
from PySide2.QtCore import QDir
from PySide2.QtGui import QKeySequence
from PySide2.QtWidgets import QWidget, QAction, QFileDialog, QAbstractItemView, QStyle
# Local Libraries #
from ...utility.iotriggers import AudioTrigger
from ...utility.eventlogger import SubjectEventLogger
from ...QtUtility.utilitywidgets import WidgetContainer, WidgetContainerSequencer
from ...QtUtility.taskwidgets import TaskWindow
from ..emotionwidgets import EmotionInstructions, EmotionWashout, EmotionFinish, EmotionVideoPlayer, EmotionQuestionnaireImage
from ..emotionCategorization.UI.emotionparameters import Ui_EmotionParameters
from ..UI.emotioncontrol import Ui_EmotionControl
# Definitions #
# Constants #
START_DIR = ""
# Classes #
class EmotionStimTask:
EXPERIMENT_NAME = "Emotion Stimulation Control"
def __init__(self, parent=None, stack=None, r_widget=None):
self.parent = parent
self.widget_stack = stack
self.return_widget = r_widget
self.trigger = AudioTrigger()
self.trigger.audio_device.device = 3
self.trigger.add_square_wave('square_wave', amplitude=5, samples=22000)
self.trigger.current_waveform = 'square_wave'
self.task_window = TaskWindow()
self.events = SubjectEventLogger(io_trigger=self.trigger)
self.sequencer = WidgetContainerSequencer()
self.task_window.sequencer = self.sequencer
self.parameters = EmotionParameters()
self.control = EmotionControl(events=self.events, x_name=self.EXPERIMENT_NAME)
self.instructions = EmotionInstructions(path=pathlib.Path(__file__).parent.joinpath('instructions.txt'),
events=self.events)
self.video_player = EmotionVideoPlayer(events=self.events)
self.questionnaire = EmotionQuestionnaireImage(events=self.events)
self.washout = EmotionWashout(events=self.events)
self.finished = EmotionFinish(events=self.events)
self.block_widgets = {'instructions': self.instructions, 'video_player': self.video_player,
'questionnaire': self.questionnaire, 'washout': self.washout, 'finish': self.finished}
self.sequence_order = ['instructions', '*block*', 'washout', 'finish']
self.block_order = ['questionnaire']
def load_task(self, stack=None):
if stack is not None:
self.widget_stack = stack
if self.return_widget is None:
_, self.return_widget, _ = self.widget_stack.current()
self.widget_stack.load(self.parameters)
self.widget_stack.load(self.control)
self.task_window.load(self.instructions)
self.task_window.load(self.washout)
self.task_window.load(self.video_player)
self.task_window.load(self.questionnaire)
self.task_window.load(self.finished)
self.control.task_window = self.task_window
self.control.sequencer = self.sequencer
self.control.sequence_order = self.sequence_order
self.control.parameters = self.parameters.parameters
self.control.block_widgets = self.block_widgets
self.control.player = self.video_player
def unload_task(self, back=True, clear_widget=False):
if back:
self.widget_stack.set(self.return_widget)
self.widget_stack.unload(self.parameters, back=False, clear_widget=clear_widget)
self.widget_stack.unload(self.control, back=False, clear_widget=clear_widget)
self.task_window.close()
self.task_window.unload(self.instructions, back=False, clear_widget=clear_widget)
self.task_window.unload(self.washout, back=False, clear_widget=clear_widget)
self.task_window.unload(self.video_player, back=False, clear_widget=clear_widget)
self.task_window.unload(self.questionnaire, back=False, clear_widget=clear_widget)
def setup_task(self):
self.parameters.run(self.control_task, self.unload_task)
def control_task(self):
self.control.run(self.parameters.run)
class EmotionParameters(WidgetContainer):
def __init__(self, name="EmotionParameters", init=False):
WidgetContainer.__init__(self, name, init)
self.ok_action = None
self.back_action = self.remove_from_stack
self._parameters = None
@property
def parameters(self):
try:
out = self.widget.parameters
self._parameters = out
except:
out = self._parameters
return out
@property
def loops(self):
return self.widget.loops
@property
def randomize(self):
return self.widget.randomize
def construct_widget(self):
self.widget = ParametersWidget()
def run(self, ok_action=None, back_action=None):
if ok_action is not None:
self.ok_action = ok_action
if back_action is not None:
self.back_action = back_action
self.widget.ok_action = self.ok_action
self.widget.back_action = self.back_action
super().run()
class ParametersWidget(QWidget):
header = ('Video', 'Questions', 'Video Path', 'Question Path')
v_types = ('*.avi', '*.mp4', '*.ogg', '*.qt', '*.wmv', '*.yuv')
q_types = ('*.toml',)
def __init__(self):
super(ParametersWidget, self).__init__()
self.ok_action = self.default_ok
self.back_action = self.default_back
self._parameters = {}
self.subject = []
self.session = []
self.blocks = []
self.ui = Ui_EmotionParameters()
self.ui.setupUi(self)
self.list_model = None
self._construct_video_list()
self.deleteAction = None
self._construct_deleteAction()
self.okAction = None
self._construct_okAction()
self._construct_backAction()
@property
def parameters(self):
self._parameters['subject'] = self.subject
self._parameters['session'] = self.session
self._parameters['blocks'] = self.blocks
return self._parameters
@property
def static_parameters(self):
self._parameters['blocks'] = self.blocks
self._parameters['loops'] = self.loops
self._parameters['randomize'] = self.randomize
return copy.deepcopy(self._parameters)
def _construct_video_list(self):
self.list_model = QtGui.QStandardItemModel(0, 4)
self.list_model.setHorizontalHeaderLabels(self.header)
self.ui.videoList.setModel(self.list_model)
self.ui.videoList.setDragDropMode(QAbstractItemView.InternalMove)
self.ui.videoList.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.videoList.setColumnWidth(0, 200)
self.ui.videoList.setColumnWidth(1, 200)
self.ui.videoList.setColumnWidth(2, 100)
self.ui.videoList.setColumnWidth(3, 100)
self.ui.videoList.doubleClicked.connect(self.double_click)
self.ui.addVideoButton.clicked.connect(self.add_videos)
self.ui.addQuestionsButton.clicked.connect(self.add_questions)
self.ui.videoDirectory.clicked.connect(self.video_directory)
self.ui.questionDirectory.clicked.connect(self.question_directory)
self.ui.deleteLastButton.clicked.connect(self.delete_last)
self.ui.clearAll.clicked.connect(self.clear_all)
def _construct_deleteAction(self):
self.deleteAction = QAction("delete", self)
self.deleteAction.setShortcut(QKeySequence.Delete)
self.deleteAction.triggered.connect(self.delete_key)
self.addAction(self.deleteAction)
def _construct_okAction(self):
self.okAction = QAction("OK", self)
self.okAction.setShortcut(QKeySequence("Shift+Return"))
self.okAction.triggered.connect(self.ok_action)
self.addAction(self.okAction)
self.ui.okButton.clicked.connect(self.ok)
def _construct_backAction(self):
self.ui.backButton.clicked.connect(self.back)
def double_click(self, index):
if index.column() in (0, 2):
self.change_video(index.row())
elif index.column() in (1, 3):
self.change_question(index.row())
def delete_key(self):
fw = self.focusWidget()
if fw is self.ui.videoList:
self.delete_video()
def find_last_row(self, item=''):
end = self.list_model.rowCount()
index = -1
for i in reversed(range(0, end)):
video = self.list_model.item(i, 0).text()
question = self.list_model.item(i, 1).text()
if item == 'video':
text = video
elif item == 'question':
text = question
elif item == 'video&question':
text = video + question
else:
break
if text == '':
index = i
else:
break
return index
def add_item(self, video='', question='', index=-1):
# Make Row Objects
video_name = QtGui.QStandardItem(pathlib.Path(video).name)
questions_name = QtGui.QStandardItem(pathlib.Path(question).name)
videos = QtGui.QStandardItem(video)
questions = QtGui.QStandardItem(question)
# Row Settings
video_name.setEditable(False)
video_name.setDragEnabled(True)
video_name.setDropEnabled(False)
questions_name.setEditable(False)
questions_name.setDropEnabled(False)
videos.setEditable(False)
videos.setDropEnabled(False)
questions.setEditable(False)
if index == -1:
index = self.list_model.rowCount()
self.list_model.appendRow(video_name)
else:
self.list_model.insertRow(index, video_name)
self.list_model.setItem(index, 1, questions_name)
self.list_model.setItem(index, 2, videos)
self.list_model.setItem(index, 3, questions)
def edit_item(self, index=None, video='', question=''):
if index is None:
item = ''
if video != '' and question != '':
item = 'video&question'
elif video != '':
item = 'video'
elif question != '':
item = 'question'
index = self.find_last_row(item=item)
videos_name = self.list_model.item(index, 0)
questions_name = self.list_model.item(index, 1)
videos = self.list_model.item(index, 2)
questions = self.list_model.item(index, 3)
if video != '':
videos_name.setText(pathlib.Path(video).name)
videos.setText(video)
if question != '':
questions_name.setText(pathlib.Path(question).name)
questions.setText(question)
def change_video(self, row):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Video", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
video_name = self.list_model.item(row, 0)
videos = self.list_model.item(row, 2)
v = dialog.selectedFiles()[0]
video_name.setText(pathlib.Path(v).name)
videos.setText(v)
def change_question(self, row):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Question", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
questions_name = self.list_model.item(row, 1)
questions = self.list_model.item(row, 3)
q = dialog.selectedFiles()[0]
questions_name.setText(pathlib.Path(q).name)
questions.setText(q)
def add_videos(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Video", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
video_names = dialog.selectedFiles()
for video in video_names:
last = self.find_last_row('video')
if last == -1:
self.add_item(video=video)
else:
self.edit_item(index=last, video=video)
def add_questions(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Questions", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
question_names = dialog.selectedFiles()
for question in question_names:
last = self.find_last_row('question')
if last == -1:
self.add_item(question=question)
else:
self.edit_item(index=last, question=question)
def video_directory(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Video Directory", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.Directory)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
dir_names = dialog.selectedFiles()
dir_path = pathlib.Path(dir_names[0])
files = []
for ext in self.v_types:
files.extend(dir_path.glob(ext))
for video in files:
last = self.find_last_row('video')
if last == -1:
self.add_item(video=str(video))
else:
self.edit_item(index=last, video=str(video))
def question_directory(self):
start_dir = pathlib.Path.home()
other = start_dir.joinpath(START_DIR)
if other.is_dir():
start_dir = other
dialog = QFileDialog(self, caption="Open Questions Directory", directory=start_dir.as_posix())
dialog.setFileMode(QFileDialog.Directory)
dialog.setViewMode(QFileDialog.Detail)
if dialog.exec_():
dir_names = dialog.selectedFiles()
dir_path = pathlib.Path(dir_names[0])
files = []
if len(self.q_types) < 1 or '*' in self.q_types:
files = dir_path.iterdir()
else:
for ext in self.q_types:
files.extend(dir_path.glob(ext))
for question in files:
last = self.find_last_row('question')
if last == -1:
self.add_item(question=str(question))
else:
self.edit_item(index=last, question=str(question))
def delete_last(self):
last = self.list_model.rowCount() - 1
self.list_model.removeRow(last)
def delete_video(self):
items = self.ui.videoList.selectedIndexes()
indices = []
for i in items:
indices.append(i.row())
indices.sort(reverse=True)
for i in indices:
self.list_model.removeRow(i)
def clear_all(self):
self.list_model.clear()
self.list_model.setHorizontalHeaderLabels(self.header)
self.ui.videoList.setColumnWidth(0, 200)
self.ui.videoList.setColumnWidth(1, 200)
self.ui.videoList.setColumnWidth(2, 100)
self.ui.videoList.setColumnWidth(3, 100)
def evaluate(self):
self.subject.clear()
self.session.clear()
self.blocks.clear()
self.subject.append(self.ui.subjectIDEdit.text())
self.session.append(self.ui.blockEdit.text())
for i in range(0, self.list_model.rowCount()):
video = pathlib.Path(self.list_model.item(i, 2).text())
question = pathlib.Path(self.list_model.item(i, 3).text())
washout = self.ui.washoutBox.value()
self.blocks.append({'video': video, 'questions': question, 'washout': washout})
def ok(self):
self.evaluate()
self.ok_action()
def default_ok(self):
print("Not Connected")
def back(self):
self.back_action()
def default_back(self):
sys.exit()
class EmotionControl(WidgetContainer):
def __init__(self, name="EmotionControl", x_name="", events=None, init=False):
WidgetContainer.__init__(self, name, init)
self.back_action = self.remove_from_stack
self.experiment_name = x_name
self._events = events
@property
def task_window(self):
return self.widget.task_window
@task_window.setter
def task_window(self, value):
self.widget.task_window = value
@property
def sequencer(self):
return self.widget.sequencer
@sequencer.setter
def sequencer(self, value):
self.widget.sequencer = value
@property
def block_widgets(self):
return self.widget.block_widgets
@block_widgets.setter
def block_widgets(self, value):
self.widget.block_widgets = value
@property
def sequence_order(self):
return self.widget.sequence_order
@sequence_order.setter
def sequence_order(self, value):
self.widget.sequence_order = value
@property
def player(self):
return self.widget.player
@player.setter
def player(self, value):
self.widget.player = value
@property
def parameters(self):
return self.widget.paremeters
@parameters.setter
def parameters(self, value):
self.widget.parameters = value
@property
def events(self):
try:
out = self.widget.events
except AttributeError:
out = self._events
return out
@events.setter
def events(self, value):
self._events = value
if self.widget is not None:
self.widget.events = value
def construct_widget(self):
self.widget = ControlWidget()
self.widget.events = self._events
self.widget.experiment_name = self.experiment_name
def run(self, back_action=None):
if back_action is not None:
self.back_action = back_action
self.widget.back_action = self.back_action
self.widget.construct()
self.widget.construct_blocks()
super().run()
class ControlWidget(QWidget):
header = ('Video', 'Questions', 'Washout', '')
def __init__(self, player=None, init=False, **kwargs):
super().__init__(**kwargs)
self.back_action = self.default_back
self.start_action = self.default_start
self.ui = Ui_EmotionControl()
self.ui.setupUi(self)
self.play_icon = self.style().standardIcon(QStyle.SP_MediaPlay)
self.pause_icon = self.style().standardIcon(QStyle.SP_MediaPause)
self.stop_icon = self.style().standardIcon(QStyle.SP_MediaStop)
self.skip_icon = self.style().standardIcon(QStyle.SP_MediaSkipForward)
self.volume_icon = self.style().standardIcon(QStyle.SP_MediaVolume)
self.mute_icon = self.style().standardIcon(QStyle.SP_MediaVolumeMuted)
self._path = None
self.subject = None
self.session = None
self.experiment_name = None
self.events = None
self.m_duration = 0
self.mute = False
self.task_window = None
self.sequencer = None
self._player = None
self.media_player = None
self.player = player
self.parameters = None
self.block_widgets = None
self.block_sequence = -1
self.sequence_order = []
self.running = False
self.blocks = None
if init:
self.construct()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
if isinstance(value, pathlib.Path) or value is None:
self._path = value
else:
self._path = pathlib.Path(value)
@property
def player(self):
return self._player
@player.setter
def player(self, value):
self._player = value
if value is not None:
self.media_player = value.media_player
def construct(self):
self.subject = self.parameters['subject'][0]
self.session = self.parameters['session'][0]
self._construct_startAction()
self._construct_backAction()
self._construct_showAction()
self._construct_fullScreenAction()
self._construct_player_controls()
self._construct_volume_controls()
self.update_buttons(self.media_player.state())
def construct_path(self):
now = datetime.datetime.now().isoformat('_', 'seconds').replace(':', '~')
file_name = self.parameters['subject'][0] + '_' + self.parameters['session'][0] + '_' + now + '.h5'
return pathlib.Path(__file__).parent.joinpath(file_name)
def construct_blocks(self):
self.blocks = self.parameters['blocks']
self._construct_queue()
self.playing_model = QtGui.QStandardItemModel(0, 4)
self.playing_model.setHorizontalHeaderLabels(self.header)
self.ui.playingBlock.setModel(self.playing_model)
self.ui.playingBlock.setColumnWidth(2, 75)
self.ui.playingBlock.setColumnWidth(3, 25)
self.complete_model = QtGui.QStandardItemModel(0, 4)
self.complete_model.setHorizontalHeaderLabels(self.header)
self.ui.completedBlocks.setModel(self.complete_model)
# self.ui.completedBlocks.setDragDropMode(QAbstractItemView.InternalMove)
# self.ui.completedBlocks.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.completedBlocks.setColumnWidth(2, 75)
self.ui.completedBlocks.setColumnWidth(3, 25)
def _construct_queue(self):
self.queue_model = QtGui.QStandardItemModel(0, 4)
self.queue_model.setHorizontalHeaderLabels(self.header)
self.ui.quequedBlocks.setModel(self.queue_model)
# self.ui.quequedBlocks.setDragDropMode(QAbstractItemView.InternalMove)
# self.ui.quequedBlocks.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.quequedBlocks.setColumnWidth(2, 75)
self.ui.quequedBlocks.setColumnWidth(3, 25)
for i, block in enumerate(self.blocks):
self.add_item(self.queue_model, _id=i, video=block['video'], question=block['questions'],
washout=block['washout'])
@staticmethod
def add_item(model, _id=0, video=pathlib.Path, question=pathlib.Path, washout=0, index=-1):
# Make Row Objects
id_number = QtGui.QStandardItem(str(_id))
video_name = QtGui.QStandardItem(video.name)
questions_name = QtGui.QStandardItem(question.name)
washout_name = QtGui.QStandardItem(str(washout) + "s")
# Row Settings
video_name.setEditable(False)
video_name.setDragEnabled(True)
video_name.setDropEnabled(False)
questions_name.setEditable(False)
questions_name.setDropEnabled(False)
washout_name.setEditable(False)
washout_name.setDropEnabled(False)
id_number.setEnabled(False)
id_number.setDropEnabled(False)
if index == -1:
index = model.rowCount()
model.appendRow(video_name)
else:
model.insertRow(index, video_name)
model.setItem(index, 1, questions_name)
model.setItem(index, 2, washout_name)
model.setItem(index, 3, id_number)
def _construct_startAction(self):
self.ui.startButton.clicked.connect(self.start)
def _construct_backAction(self):
self.ui.backButton.clicked.connect(self.back)
def _construct_showAction(self):
self.ui.showButton.clicked.connect(self.task_window.show)
def _construct_fullScreenAction(self):
self.ui.fullscreenButton.clicked.connect(self.task_window.fullscreen_action)
def _construct_player_controls(self):
self.media_player.durationChanged.connect(self.duration_change)
self.media_player.positionChanged.connect(self.position_change)
self.media_player.stateChanged.connect(self.update_buttons)
self.ui.playButton.setIcon(self.play_icon)
self.ui.stopButton.setIcon(self.stop_icon)
self.ui.stopButton.clicked.connect(self.media_player.stop)
self.ui.skipButton.setIcon(self.skip_icon)
self.ui.skipButton.clicked.connect(self.skip_action)
def _construct_volume_controls(self):
self.media_player.stateChanged.connect(self.update_buttons)
self.ui.muteButton.setIcon(self.volume_icon)
self.ui.muteButton.clicked.connect(self.mute_action)
self.mute = False
self.ui.volumeSlider.setValue(self.media_player.volume())
self.ui.volumeSlider.valueChanged.connect(self.media_player.setVolume)
def update_buttons(self, state):
self.ui.stopButton.setEnabled(state != QtMultimedia.QMediaPlayer.StoppedState)
if state == QtMultimedia.QMediaPlayer.PlayingState:
self.ui.playButton.clicked.connect(self.media_player.pause)
self.ui.playButton.setIcon(self.pause_icon)
elif state != QtMultimedia.QMediaPlayer.PlayingState:
self.ui.playButton.clicked.connect(self.media_player.play)
self.ui.playButton.setIcon(self.play_icon)
def duration_change(self, dur):
self.m_duration = dur / 1000
self.ui.durationSlider.setMaximum(self.m_duration)
def position_change(self, progress):
if not self.ui.durationSlider.isSliderDown():
self.ui.durationSlider.setValue(progress / 1000)
self.set_duration_label(progress / 1000)
def set_duration_label(self, progress):
pos = str(int(progress // 60)) + ':' + str(progress % 60)
total_dur = str(int(self.m_duration // 60)) + ':' + str(self.m_duration % 60)
self.ui.durationLabel.setText(pos + ' / ' + total_dur)
def mute_action(self):
if self.mute:
self.mute = False
self.ui.muteButton.setIcon(self.volume_icon)
else:
self.mute = True
self.ui.muteButton.setIcon(self.mute_icon)
self.media_player.setMuted(self.mute)
def skip_action(self):
self.media_player.stop()
video = self.block_widgets['video_player'].video
if isinstance(video, pathlib.Path):
video = video.name
event = {'type_': 'Skip', 'Video': video}
while self.sequencer.next_index() != 0:
self.sequencer.skip()
self.advance_block(event=event)
def start_sequence(self):
self.sequencer.clear()
block_sequence = self.sequence_order.index('*block*')
sequence_order = self.sequence_order[:block_sequence]
if len(sequence_order) > 1:
first = sequence_order.pop(0)
self.sequencer.insert(self.block_widgets[first], ok_action=self.advance, back_action=self.task_window.hide)
last = sequence_order.pop()
for item in sequence_order:
self.sequencer.insert(self.block_widgets[item], ok_action=self.advance)
self.sequencer.insert(self.block_widgets[last], ok_action=self.advance_block)
def end_sequence(self):
block = self.blocks[-1]
block_sequence = self.sequence_order.index('*block*')
sequence_order = self.sequence_order[block_sequence + 1:]
self.sequencer.insert(self.block_widgets['washout'], milliseconds=block['washout'] * 1000,
timer_action=self.advance)
self.sequencer.insert(self.block_widgets['finish'])
def next_queue(self):
if self.playing_model.rowCount() > 0:
self.complete_model.clear()
self.queue_model.clear()
self.playing_model.clear()
self.complete_model.setHorizontalHeaderLabels(self.header)
self.queue_model.setHorizontalHeaderLabels(self.header)
self.playing_model.setHorizontalHeaderLabels(self.header)
for i, block in enumerate(self.blocks):
self.add_item(self.queue_model, _id=i, video=block['video'], question=block['questions'],
washout=block['washout'])
self.playing_model.clear()
self.playing_model.setHorizontalHeaderLabels(self.header)
if self.queue_model.rowCount() > 0:
play_index = int(self.queue_model.item(0, 3).text())
block = self.blocks[play_index]
self.add_item(self.playing_model, _id=play_index, video=block['video'], question=block['questions'],
washout=block['washout'])
self.queue_model.removeRow(0)
flag = True
else:
flag = False
return flag
def next_block(self):
play_index = int(self.playing_model.item(0, 3).text())
block = self.blocks[play_index]
self.sequencer.insert(self.block_widgets['questionnaire'], path=block['questions'],
finish_action=self.advance_block)
def advance(self, event=None, caller=None):
self.events.append(**event)
next(self.sequencer)
def advance_trigger(self, event=None, caller=None):
event = {'SubType': 'VideoEnd'}
self.events.trigger_event(**event)
next(self.sequencer)
def advance_block(self, event=None, caller=None):
more_blocks = self.next_queue()
if more_blocks:
self.next_block()
else:
self.end_sequence()
self.advance(event=event, caller=caller)
def start(self):
if self.running:
self.running_action(caller=self)
else:
self.running = True
self.start_action(caller=self)
def default_start(self, caller=None):
self.events.path = self.construct_path()
self.events.construct()
self.events.Subject = self.subject
self.events.Task = self.experiment_name
self.events.Block = self.session
self.events.open()
self.events.set_time()
self.start_sequence()
self.ui.startButton.setEnabled(False)
self.ui.backButton.setText(QtWidgets.QApplication.translate("EmotionControl", 'Stop', None, -1))
self.sequencer.start()
self.task_window.show()
def running_action(self, caller=None):
pass
def back(self):
if self.running:
self.stop()
else:
self.back_action()
def default_back(self, caller=None):
sys.exit()
def stop(self):
if self.running:
self.media_player.stop()
self.sequencer.clear()
event = {'type_': 'ManualStop'}
self.events.append(**event)
self.running = False
self.reset()
self.ui.startButton.setEnabled(True)
self.ui.backButton.setText(QtWidgets.QApplication.translate("EmotionControl", 'Back', None, -1))
def reset(self):
if not self.running:
self.events.clear()
self.sequencer.clear()
self.complete_model.clear()
self.queue_model.clear()
self.playing_model.clear()
self.complete_model.setHorizontalHeaderLabels(self.header)
self.queue_model.setHorizontalHeaderLabels(self.header)
self.playing_model.setHorizontalHeaderLabels(self.header)
for i, block in enumerate(self.blocks):
self.add_item(self.queue_model, _id=i, video=block['video'], question=block['questions'],
washout=block['washout']) | en | 0.319973 | #!/usr/bin/env python # -*- coding: utf-8 -*- emotionstimtask.py Description: # Default Libraries # # Downloaded Libraries # # Local Libraries # # Definitions # # Constants # # Classes # # Make Row Objects # Row Settings # self.ui.completedBlocks.setDragDropMode(QAbstractItemView.InternalMove) # self.ui.completedBlocks.setSelectionMode(QAbstractItemView.MultiSelection) # self.ui.quequedBlocks.setDragDropMode(QAbstractItemView.InternalMove) # self.ui.quequedBlocks.setSelectionMode(QAbstractItemView.MultiSelection) # Make Row Objects # Row Settings | 1.902498 | 2 |
application/workprogramsapp/files_export/views.py | ValeriyaArt/analytics_backend | 1 | 6625202 | <filename>application/workprogramsapp/files_export/views.py
import datetime
from docxtpl import DocxTemplate
from django.http import HttpResponse
from collections import OrderedDict
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
import html2text
from ..models import AcademicPlan, Zun, WorkProgramInFieldOfStudy, FieldOfStudy, WorkProgram
from ..serializers import WorkProgramSerializer
"""Скачивание рпд в формате docx/pdf"""
def render_context(context, **kwargs):
""" Функция, которая возвращает context с параметрами для шаблона """
fs_obj = FieldOfStudy.objects.get(pk=kwargs['field_of_study_id'])
ap_obj = AcademicPlan.objects.get(pk=kwargs['academic_plan_id'])
try:
for wpcb in context['work_program_in_change_block']:
if wpcb['discipline_block_module']['descipline_block']['academic_plan'][
'educational_profile'] == ap_obj.educational_profile:
wpcb_pk = wpcb['id']
semester = [{'s': i, 'c': wpcb['credit_units'][i]} for i in range(len(wpcb['credit_units'])) if
wpcb['credit_units'] if wpcb['credit_units'][i] != 0]
except:
semester = [{'s': '-', 'c': '-', 'h': '-', 'e': '-'}]
wpcb_pk = context['work_program_in_change_block'][0]['id']
wp_in_fs = WorkProgramInFieldOfStudy.objects.get(work_program_change_in_discipline_block_module__id=wpcb_pk,
work_program__id=context['id'])
zun_obj = Zun.objects.filter(wp_in_fs=wp_in_fs)
tbl_competence = []
for z in zun_obj:
outcomes = [o.item.name for o in z.items.all()]
tbl_competence.append(
{'competence': str(z.indicator_in_zun.competence.number) + ' ' + str(z.indicator_in_zun.competence.name),
'indicator': str(z.indicator_in_zun.number) + ' ' + str(z.indicator_in_zun.name),
'outcomes': ', '.join(map(str, set(outcomes)))})
contact_work, lecture_classes, laboratory, practical_lessons, SRO, total_hours = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
online_sections, url_online_course, evaluation_tools = [], [], []
for i in context['discipline_sections']:
online_names, topics_list = [], []
if i['contact_work'] is None:
i['contact_work'] = ''
else:
contact_work += float(i['contact_work'])
if i['lecture_classes'] is None:
i['lecture_classes'] = ''
else:
lecture_classes += float(i['lecture_classes'])
if i['laboratory'] is None:
i['laboratory'] = ''
else:
laboratory += float(i['laboratory'])
if i['practical_lessons'] is None:
i['practical_lessons'] = ''
else:
practical_lessons += float(i['practical_lessons'])
if i['SRO'] is None:
i['SRO'] = ''
else:
SRO += float(i['SRO'])
total_hours += 0.0 if i['total_hours'] is None else float(i['total_hours'])
evaluation_tools.extend(i['evaluation_tools'])
for j in i['topics']:
topics_list.append(j['description'])
if j['url_online_course'] is None:
pass
else:
online_sections.append(i['ordinal_number'])
online_names.append(j['url_online_course']['title'])
if j['url_online_course'] not in url_online_course:
url_online_course.append(j['url_online_course'])
i['online_list'] = ', '.join(map(str, set(online_names)))
i['topics_list'] = ', '.join(map(str, set(topics_list)))
template_context = OrderedDict()
template_context['title'] = context['title']
template_context['field_of_study_code'] = fs_obj.number
template_context['field_of_study'] = fs_obj.title
if context['qualification'] == 'bachelor':
template_context['QUALIFICATION'] = 'БАКАЛАВР'
elif context['qualification'] == 'master':
template_context['QUALIFICATION'] = 'МАГИСТР'
else:
template_context['QUALIFICATION'] = 'ИНЖЕНЕР'
template_context['academic_plan'] = ap_obj.educational_profile
template_context['semester'] = semester
template_context['total_hours_1'] = [contact_work, lecture_classes, laboratory, practical_lessons, SRO]
template_context['year'] = kwargs['year']
if context['authors'] is None:
template_context['author'] = ''
template_context['authors'] = ''
else:
template_context['author'] = context['authors']
template_context['authors'] = context['authors'].split(', ')
template_context['tbl_competence'] = tbl_competence
template_context['total_hours'] = [contact_work, lecture_classes, laboratory, practical_lessons, SRO, total_hours]
template_context['is_no_online'] = True if online_sections == 0 else False
template_context['is_online'] = False if online_sections == 0 else True
template_context['X'] = 'X'
template_context['sections_online'] = ', '.join(map(str, set(online_sections)))
template_context['sections_replaced_onl'] = ''
template_context['bibliographic_reference'] = context['bibliographic_reference']
template_context['online_course'] = url_online_course
template_context['evaluation_tools'] = evaluation_tools
filename = str(fs_obj.number) + '_' + str(context['discipline_code']) + '_' + str(
context['qualification']) + '_' + str(kwargs['year']) + '_' + datetime.datetime.today().strftime(
"%Y-%m-%d-%H.%M.%S") + '.docx'
"""Данные для таблицы планирования результатов обучения по дисциплине (БаРС)"""
outcomes_evaluation_tool = []
current_evaluation_tool = []
items_max = []
items_min = []
for item in context['discipline_sections']:
for i in item['evaluation_tools']:
i['description'] = html2text.html2text(i['description'])
template_context['discipline_section'] = context['discipline_sections']
for item in context['outcomes']:
try:
for i in item['evaluation_tool']:
i['description'] = html2text.html2text(i['description'])
current_evaluation_tool.append(i)
if i['check_point']:
outcomes_evaluation_tool.append(i)
items_max.append(i['max'])
items_min.append(i['min'])
except:
continue
template_context['outcomes_evaluation_tool'] = outcomes_evaluation_tool
template_context['current_evaluation_tool'] = current_evaluation_tool
certification_evaluation_tools = []
for item in context['certification_evaluation_tools']:
try:
if item['max'] is not None:
items_max.append(item['max'])
if item['min'] is not None:
items_min.append(item['min'])
item['description'] = html2text.html2text(item['description'])
if item['type'] == '1':
item['type'] = 'Exam'
elif item['type'] == '2':
item['type'] = 'Differentiated credit'
elif item['type'] == '3':
item['type'] = 'Offset'
elif item['type'] == '4':
item['type'] = 'Coursework'
certification_evaluation_tools.append(item)
except:
continue
template_context['certification_evaluation_tools'] = certification_evaluation_tools
template_context['outcomes_max_all'] = sum(items_max) + int(context['extra_points'])
template_context['outcomes_min_all'] = sum(items_min)
template_context['extra_points'] = context['extra_points']
return template_context, filename
"""Контроллер для выгрузки docx-файла РПД"""
class DocxFileExportView(generics.ListAPIView):
"""
Возвращает РПД в формате docx в браузере
"""
queryset = WorkProgram.objects.all()
serializer = WorkProgramSerializer
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
tpl = DocxTemplate('/application/static-backend/export_template/RPD_shablon_2020_new.docx')
queryset = WorkProgram.objects.get(pk=kwargs['pk'])
serializer = WorkProgramSerializer(queryset)
data = dict(serializer.data)
context, filename = render_context(data, field_of_study_id=kwargs['fs_id'],
academic_plan_id=kwargs['ap_id'], year=kwargs['year'])
tpl.render(context)
# tpl.save('/application/'+str(filename)) #-- сохранение в папку локально (нужно указать актуальный путь!)
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'inline; filename="%s"' % filename
tpl.save(response)
return response
def render_context_syllabus(context, **kwargs):
""" Функция, которая возвращает context с параметрами для шаблона """
fs_obj = FieldOfStudy.objects.get(pk=kwargs['field_of_study_id'])
ap_obj = AcademicPlan.objects.get(pk=kwargs['academic_plan_id'])
try:
for wpcb in context['work_program_in_change_block']:
if wpcb['discipline_block_module']['descipline_block']['academic_plan'][
'educational_profile'] != ap_obj.educational_profile:
continue
semester = [(i, wpcb['credit_units'][i], wpcb['change_type']) for i in range(len(wpcb['credit_units']))
if wpcb['credit_units'] if wpcb['credit_units'][i] != 0]
except:
semester = [('-', '-', ' ')]
template_context = OrderedDict()
if context['qualification'] == 'bachelor':
template_context['Qualification'] = 'Бакалавриат'
elif context['qualification'] == 'master':
template_context['Qualification'] = 'Магистратура'
else:
template_context['Qualification'] = 'Специалитет'
template_context['Name'] = context['title']
# template_context['status'] = context['work_program_in_change_block']['change_type']
template_context['fs_code'] = str(fs_obj.number) + ' ' + str(fs_obj.title)
template_context['academic_plan'] = ap_obj.educational_profile
template_context['semester'] = semester[0][0]
template_context['credit'] = semester[0][1]
template_context['author'] = context['authors']
template_context['description'] = context['description']
template_context['prerequisites'] = ', '.join(map(str, [i['item']['name'] for i in context['prerequisites']]))
template_context['outcomes'] = ', '.join(map(str, [i['item']['name'] for i in context['outcomes']]))
template_context['concurent'] = '-'
template_context['discipline_section'] = context['discipline_sections']
evaluation_tools, temp = [], []
for i in context['discipline_sections']:
for tool in i['evaluation_tools']:
if tool['type'] not in evaluation_tools:
evaluation_tools.append(tool['type'])
i['topics_list'] = '. '.join(map(str, set([j['description'] for j in i['topics']])))
template_context['evaluation_tools'] = evaluation_tools
template_context['bibliographic_reference'] = context['bibliographic_reference']
filename = 'Syllabus_' + str(context['title']) + str(kwargs['year']) + '.docx'
return template_context, filename
class SyllabusExportView(generics.ListAPIView):
"""Возвращает РПД в формате docx в браузере"""
queryset = WorkProgram.objects.all()
serializer = WorkProgramSerializer
permission_classes = [IsAuthenticated, ]
def get(self, request, *args, **kwargs):
tpl = DocxTemplate('/application/static-backend/export_template/Syllabus_shablon_2020_new.docx')
queryset = WorkProgram.objects.get(pk=kwargs['pk'])
serializer = WorkProgramSerializer(queryset)
data = dict(serializer.data)
context, filename = render_context_syllabus(data, field_of_study_id=kwargs['fs_id'],
academic_plan_id=kwargs['ap_id'], year=kwargs['year'])
tpl.render(context)
# tpl.save('/application/upload/'+filename) #-- сохранение в папку локально (нужно указать актуальный путь!)
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'inline; filename="%s"' % str(filename)
tpl.save(response)
return response
| <filename>application/workprogramsapp/files_export/views.py
import datetime
from docxtpl import DocxTemplate
from django.http import HttpResponse
from collections import OrderedDict
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
import html2text
from ..models import AcademicPlan, Zun, WorkProgramInFieldOfStudy, FieldOfStudy, WorkProgram
from ..serializers import WorkProgramSerializer
"""Скачивание рпд в формате docx/pdf"""
def render_context(context, **kwargs):
""" Функция, которая возвращает context с параметрами для шаблона """
fs_obj = FieldOfStudy.objects.get(pk=kwargs['field_of_study_id'])
ap_obj = AcademicPlan.objects.get(pk=kwargs['academic_plan_id'])
try:
for wpcb in context['work_program_in_change_block']:
if wpcb['discipline_block_module']['descipline_block']['academic_plan'][
'educational_profile'] == ap_obj.educational_profile:
wpcb_pk = wpcb['id']
semester = [{'s': i, 'c': wpcb['credit_units'][i]} for i in range(len(wpcb['credit_units'])) if
wpcb['credit_units'] if wpcb['credit_units'][i] != 0]
except:
semester = [{'s': '-', 'c': '-', 'h': '-', 'e': '-'}]
wpcb_pk = context['work_program_in_change_block'][0]['id']
wp_in_fs = WorkProgramInFieldOfStudy.objects.get(work_program_change_in_discipline_block_module__id=wpcb_pk,
work_program__id=context['id'])
zun_obj = Zun.objects.filter(wp_in_fs=wp_in_fs)
tbl_competence = []
for z in zun_obj:
outcomes = [o.item.name for o in z.items.all()]
tbl_competence.append(
{'competence': str(z.indicator_in_zun.competence.number) + ' ' + str(z.indicator_in_zun.competence.name),
'indicator': str(z.indicator_in_zun.number) + ' ' + str(z.indicator_in_zun.name),
'outcomes': ', '.join(map(str, set(outcomes)))})
contact_work, lecture_classes, laboratory, practical_lessons, SRO, total_hours = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
online_sections, url_online_course, evaluation_tools = [], [], []
for i in context['discipline_sections']:
online_names, topics_list = [], []
if i['contact_work'] is None:
i['contact_work'] = ''
else:
contact_work += float(i['contact_work'])
if i['lecture_classes'] is None:
i['lecture_classes'] = ''
else:
lecture_classes += float(i['lecture_classes'])
if i['laboratory'] is None:
i['laboratory'] = ''
else:
laboratory += float(i['laboratory'])
if i['practical_lessons'] is None:
i['practical_lessons'] = ''
else:
practical_lessons += float(i['practical_lessons'])
if i['SRO'] is None:
i['SRO'] = ''
else:
SRO += float(i['SRO'])
total_hours += 0.0 if i['total_hours'] is None else float(i['total_hours'])
evaluation_tools.extend(i['evaluation_tools'])
for j in i['topics']:
topics_list.append(j['description'])
if j['url_online_course'] is None:
pass
else:
online_sections.append(i['ordinal_number'])
online_names.append(j['url_online_course']['title'])
if j['url_online_course'] not in url_online_course:
url_online_course.append(j['url_online_course'])
i['online_list'] = ', '.join(map(str, set(online_names)))
i['topics_list'] = ', '.join(map(str, set(topics_list)))
template_context = OrderedDict()
template_context['title'] = context['title']
template_context['field_of_study_code'] = fs_obj.number
template_context['field_of_study'] = fs_obj.title
if context['qualification'] == 'bachelor':
template_context['QUALIFICATION'] = 'БАКАЛАВР'
elif context['qualification'] == 'master':
template_context['QUALIFICATION'] = 'МАГИСТР'
else:
template_context['QUALIFICATION'] = 'ИНЖЕНЕР'
template_context['academic_plan'] = ap_obj.educational_profile
template_context['semester'] = semester
template_context['total_hours_1'] = [contact_work, lecture_classes, laboratory, practical_lessons, SRO]
template_context['year'] = kwargs['year']
if context['authors'] is None:
template_context['author'] = ''
template_context['authors'] = ''
else:
template_context['author'] = context['authors']
template_context['authors'] = context['authors'].split(', ')
template_context['tbl_competence'] = tbl_competence
template_context['total_hours'] = [contact_work, lecture_classes, laboratory, practical_lessons, SRO, total_hours]
template_context['is_no_online'] = True if online_sections == 0 else False
template_context['is_online'] = False if online_sections == 0 else True
template_context['X'] = 'X'
template_context['sections_online'] = ', '.join(map(str, set(online_sections)))
template_context['sections_replaced_onl'] = ''
template_context['bibliographic_reference'] = context['bibliographic_reference']
template_context['online_course'] = url_online_course
template_context['evaluation_tools'] = evaluation_tools
filename = str(fs_obj.number) + '_' + str(context['discipline_code']) + '_' + str(
context['qualification']) + '_' + str(kwargs['year']) + '_' + datetime.datetime.today().strftime(
"%Y-%m-%d-%H.%M.%S") + '.docx'
"""Данные для таблицы планирования результатов обучения по дисциплине (БаРС)"""
outcomes_evaluation_tool = []
current_evaluation_tool = []
items_max = []
items_min = []
for item in context['discipline_sections']:
for i in item['evaluation_tools']:
i['description'] = html2text.html2text(i['description'])
template_context['discipline_section'] = context['discipline_sections']
for item in context['outcomes']:
try:
for i in item['evaluation_tool']:
i['description'] = html2text.html2text(i['description'])
current_evaluation_tool.append(i)
if i['check_point']:
outcomes_evaluation_tool.append(i)
items_max.append(i['max'])
items_min.append(i['min'])
except:
continue
template_context['outcomes_evaluation_tool'] = outcomes_evaluation_tool
template_context['current_evaluation_tool'] = current_evaluation_tool
certification_evaluation_tools = []
for item in context['certification_evaluation_tools']:
try:
if item['max'] is not None:
items_max.append(item['max'])
if item['min'] is not None:
items_min.append(item['min'])
item['description'] = html2text.html2text(item['description'])
if item['type'] == '1':
item['type'] = 'Exam'
elif item['type'] == '2':
item['type'] = 'Differentiated credit'
elif item['type'] == '3':
item['type'] = 'Offset'
elif item['type'] == '4':
item['type'] = 'Coursework'
certification_evaluation_tools.append(item)
except:
continue
template_context['certification_evaluation_tools'] = certification_evaluation_tools
template_context['outcomes_max_all'] = sum(items_max) + int(context['extra_points'])
template_context['outcomes_min_all'] = sum(items_min)
template_context['extra_points'] = context['extra_points']
return template_context, filename
"""Контроллер для выгрузки docx-файла РПД"""
class DocxFileExportView(generics.ListAPIView):
"""
Возвращает РПД в формате docx в браузере
"""
queryset = WorkProgram.objects.all()
serializer = WorkProgramSerializer
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
tpl = DocxTemplate('/application/static-backend/export_template/RPD_shablon_2020_new.docx')
queryset = WorkProgram.objects.get(pk=kwargs['pk'])
serializer = WorkProgramSerializer(queryset)
data = dict(serializer.data)
context, filename = render_context(data, field_of_study_id=kwargs['fs_id'],
academic_plan_id=kwargs['ap_id'], year=kwargs['year'])
tpl.render(context)
# tpl.save('/application/'+str(filename)) #-- сохранение в папку локально (нужно указать актуальный путь!)
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'inline; filename="%s"' % filename
tpl.save(response)
return response
def render_context_syllabus(context, **kwargs):
""" Функция, которая возвращает context с параметрами для шаблона """
fs_obj = FieldOfStudy.objects.get(pk=kwargs['field_of_study_id'])
ap_obj = AcademicPlan.objects.get(pk=kwargs['academic_plan_id'])
try:
for wpcb in context['work_program_in_change_block']:
if wpcb['discipline_block_module']['descipline_block']['academic_plan'][
'educational_profile'] != ap_obj.educational_profile:
continue
semester = [(i, wpcb['credit_units'][i], wpcb['change_type']) for i in range(len(wpcb['credit_units']))
if wpcb['credit_units'] if wpcb['credit_units'][i] != 0]
except:
semester = [('-', '-', ' ')]
template_context = OrderedDict()
if context['qualification'] == 'bachelor':
template_context['Qualification'] = 'Бакалавриат'
elif context['qualification'] == 'master':
template_context['Qualification'] = 'Магистратура'
else:
template_context['Qualification'] = 'Специалитет'
template_context['Name'] = context['title']
# template_context['status'] = context['work_program_in_change_block']['change_type']
template_context['fs_code'] = str(fs_obj.number) + ' ' + str(fs_obj.title)
template_context['academic_plan'] = ap_obj.educational_profile
template_context['semester'] = semester[0][0]
template_context['credit'] = semester[0][1]
template_context['author'] = context['authors']
template_context['description'] = context['description']
template_context['prerequisites'] = ', '.join(map(str, [i['item']['name'] for i in context['prerequisites']]))
template_context['outcomes'] = ', '.join(map(str, [i['item']['name'] for i in context['outcomes']]))
template_context['concurent'] = '-'
template_context['discipline_section'] = context['discipline_sections']
evaluation_tools, temp = [], []
for i in context['discipline_sections']:
for tool in i['evaluation_tools']:
if tool['type'] not in evaluation_tools:
evaluation_tools.append(tool['type'])
i['topics_list'] = '. '.join(map(str, set([j['description'] for j in i['topics']])))
template_context['evaluation_tools'] = evaluation_tools
template_context['bibliographic_reference'] = context['bibliographic_reference']
filename = 'Syllabus_' + str(context['title']) + str(kwargs['year']) + '.docx'
return template_context, filename
class SyllabusExportView(generics.ListAPIView):
"""Возвращает РПД в формате docx в браузере"""
queryset = WorkProgram.objects.all()
serializer = WorkProgramSerializer
permission_classes = [IsAuthenticated, ]
def get(self, request, *args, **kwargs):
tpl = DocxTemplate('/application/static-backend/export_template/Syllabus_shablon_2020_new.docx')
queryset = WorkProgram.objects.get(pk=kwargs['pk'])
serializer = WorkProgramSerializer(queryset)
data = dict(serializer.data)
context, filename = render_context_syllabus(data, field_of_study_id=kwargs['fs_id'],
academic_plan_id=kwargs['ap_id'], year=kwargs['year'])
tpl.render(context)
# tpl.save('/application/upload/'+filename) #-- сохранение в папку локально (нужно указать актуальный путь!)
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'inline; filename="%s"' % str(filename)
tpl.save(response)
return response
| ru | 0.949792 | Скачивание рпд в формате docx/pdf Функция, которая возвращает context с параметрами для шаблона Данные для таблицы планирования результатов обучения по дисциплине (БаРС) Контроллер для выгрузки docx-файла РПД Возвращает РПД в формате docx в браузере # tpl.save('/application/'+str(filename)) #-- сохранение в папку локально (нужно указать актуальный путь!) Функция, которая возвращает context с параметрами для шаблона # template_context['status'] = context['work_program_in_change_block']['change_type'] Возвращает РПД в формате docx в браузере # tpl.save('/application/upload/'+filename) #-- сохранение в папку локально (нужно указать актуальный путь!) | 2.188554 | 2 |
4-3-2.py | MasazI/python-r-stan-bayesian-model | 2 | 6625203 | ###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
fish_num_climate_4 = pandas.read_csv('4-3-1-fish-num-4.csv')
print(fish_num_climate_4.head())
print(fish_num_climate_4.describe())
sns.scatterplot(
x='temperature',
y='fish_num',
hue='human',
data=fish_num_climate_4
)
plt.show()
fish_num_climate_4_d = pandas.get_dummies(fish_num_climate_4, columns=["human"])
print(fish_num_climate_4_d.head())
fish_num = fish_num_climate_4_d['fish_num']
sample_num = len(fish_num)
temperature = fish_num_climate_4_d['temperature']
# creating teamID
le = LabelEncoder()
le = le.fit(fish_num_climate_4['human'])
fish_num_climate_4['human'] = le.transform(fish_num_climate_4['human'])
sns.scatterplot(
x='temperature',
y='fish_num',
hue='human',
legend="full",
data=fish_num_climate_4
)
plt.show()
human_id = fish_num_climate_4['human'].values
human_id = human_id + 1
human_num = len(np.unique(human_id))
stan_data = {
'N': sample_num,
'fish_num': fish_num,
'temp': temperature,
'human_id': human_id,
'N_human': human_num
}
if os.path.exists('4-3-2-poisson-glmm.pkl'):
sm = pickle.load(open('4-3-2-poisson-glmm.pkl', 'rb'))
# sm = pystan.StanModel(file='4-3-1-poisson-glmm.stan')
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='4-3-2-poisson-glmm.stan')
control = {
'adapt_delta': 0.9999,
'max_treedepth': 16
}
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
control=control,
thin=1
)
print(mcmc_result)
mcmc_result.plot()
plt.show()
# saving compiled model
if not os.path.exists('4-3-2-poisson-glmm.pkl'):
with open('4-3-2-poisson-glmm.pkl', 'wb') as f:
pickle.dump(sm, f)
mcmc_sample = mcmc_result.extract()
print(mcmc_sample)
# visualization
label_temp = np.arange(10,20)
df = pandas.DataFrame(mcmc_sample, columns=['Intercept', 'b_temp'])
print(df.head())
r = mcmc_sample['r']
df_r = pandas.DataFrame(r)
print(df_r.head())
t = mcmc_sample['t']
df_t = pandas.DataFrame(t)
print(df_t.head())
for h in np.arange(human_num):
y = []
for i in label_temp:
y.append(np.exp(df['Intercept'].mean() +
(df['b_temp'].mean() + df_t[h].mean()) * i +
df_r[h].mean()))
plt.plot(label_temp, np.array(y), 'red', label='%d' % (h + 1))
plt.scatter(fish_num_climate_4.query('human == %d' % h)["temperature"],
fish_num_climate_4.query('human == %d' % h)["fish_num"],
c='r')
plt.legend()
plt.show()
| ###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
fish_num_climate_4 = pandas.read_csv('4-3-1-fish-num-4.csv')
print(fish_num_climate_4.head())
print(fish_num_climate_4.describe())
sns.scatterplot(
x='temperature',
y='fish_num',
hue='human',
data=fish_num_climate_4
)
plt.show()
fish_num_climate_4_d = pandas.get_dummies(fish_num_climate_4, columns=["human"])
print(fish_num_climate_4_d.head())
fish_num = fish_num_climate_4_d['fish_num']
sample_num = len(fish_num)
temperature = fish_num_climate_4_d['temperature']
# creating teamID
le = LabelEncoder()
le = le.fit(fish_num_climate_4['human'])
fish_num_climate_4['human'] = le.transform(fish_num_climate_4['human'])
sns.scatterplot(
x='temperature',
y='fish_num',
hue='human',
legend="full",
data=fish_num_climate_4
)
plt.show()
human_id = fish_num_climate_4['human'].values
human_id = human_id + 1
human_num = len(np.unique(human_id))
stan_data = {
'N': sample_num,
'fish_num': fish_num,
'temp': temperature,
'human_id': human_id,
'N_human': human_num
}
if os.path.exists('4-3-2-poisson-glmm.pkl'):
sm = pickle.load(open('4-3-2-poisson-glmm.pkl', 'rb'))
# sm = pystan.StanModel(file='4-3-1-poisson-glmm.stan')
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='4-3-2-poisson-glmm.stan')
control = {
'adapt_delta': 0.9999,
'max_treedepth': 16
}
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
control=control,
thin=1
)
print(mcmc_result)
mcmc_result.plot()
plt.show()
# saving compiled model
if not os.path.exists('4-3-2-poisson-glmm.pkl'):
with open('4-3-2-poisson-glmm.pkl', 'wb') as f:
pickle.dump(sm, f)
mcmc_sample = mcmc_result.extract()
print(mcmc_sample)
# visualization
label_temp = np.arange(10,20)
df = pandas.DataFrame(mcmc_sample, columns=['Intercept', 'b_temp'])
print(df.head())
r = mcmc_sample['r']
df_r = pandas.DataFrame(r)
print(df_r.head())
t = mcmc_sample['t']
df_t = pandas.DataFrame(t)
print(df_t.head())
for h in np.arange(human_num):
y = []
for i in label_temp:
y.append(np.exp(df['Intercept'].mean() +
(df['b_temp'].mean() + df_t[h].mean()) * i +
df_r[h].mean()))
plt.plot(label_temp, np.array(y), 'red', label='%d' % (h + 1))
plt.scatter(fish_num_climate_4.query('human == %d' % h)["temperature"],
fish_num_climate_4.query('human == %d' % h)["fish_num"],
c='r')
plt.legend()
plt.show()
| en | 0.580876 | ############### # # Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license # ############### # creating teamID # sm = pystan.StanModel(file='4-3-1-poisson-glmm.stan') # a model using prior for mu and sigma. # saving compiled model # visualization | 3.099433 | 3 |
app/migrations/versions/097d6eedce34.py | UWA-CITS3200-18-2021/ReSQ | 1 | 6625204 | """empty message
Revision ID: 097d6eedce34
Revises:
Create Date: 2021-09-18 07:55:41.307362
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '097d6eed<PASSWORD>4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('queue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('studentName', sa.String(length=64), nullable=False),
sa.Column('studentNumber', sa.Integer(), nullable=False),
sa.Column('unitCode', sa.String(length=8), nullable=False),
sa.Column('enquiry', sa.Enum('Essay', 'Grammer', 'Lab Report', 'Assignment', 'Literature Research', 'Resaerch Proposal', 'Thesis/Paper', 'IELTS', 'Oral Presentation', 'Referencing', 'Finding Sources', 'Endnote', 'Other', name='enquiryType'), nullable=False),
sa.Column('queue', sa.Enum('STUDYSmarter', 'Librarian', name='queueType'), nullable=False),
sa.Column('status', sa.Enum('Ended', 'In Queue', 'In Session', 'Completed', name='statusEnum'), nullable=False),
sa.Column('enterQueueTime', sa.DateTime(), nullable=False),
sa.Column('changeSessionTime', sa.DateTime(), nullable=True),
sa.Column('exitSessionTime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('role', sa.Enum('Admin', 'Student Helper', name='roleEnum'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_table('user')
op.drop_table('queue')
# ### end Alembic commands ### | """empty message
Revision ID: 097d6eedce34
Revises:
Create Date: 2021-09-18 07:55:41.307362
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '097d6eed<PASSWORD>4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('queue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('studentName', sa.String(length=64), nullable=False),
sa.Column('studentNumber', sa.Integer(), nullable=False),
sa.Column('unitCode', sa.String(length=8), nullable=False),
sa.Column('enquiry', sa.Enum('Essay', 'Grammer', 'Lab Report', 'Assignment', 'Literature Research', 'Resaerch Proposal', 'Thesis/Paper', 'IELTS', 'Oral Presentation', 'Referencing', 'Finding Sources', 'Endnote', 'Other', name='enquiryType'), nullable=False),
sa.Column('queue', sa.Enum('STUDYSmarter', 'Librarian', name='queueType'), nullable=False),
sa.Column('status', sa.Enum('Ended', 'In Queue', 'In Session', 'Completed', name='statusEnum'), nullable=False),
sa.Column('enterQueueTime', sa.DateTime(), nullable=False),
sa.Column('changeSessionTime', sa.DateTime(), nullable=True),
sa.Column('exitSessionTime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('role', sa.Enum('Admin', 'Student Helper', name='roleEnum'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_table('user')
op.drop_table('queue')
# ### end Alembic commands ### | en | 0.480807 | empty message Revision ID: 097d6eedce34 Revises: Create Date: 2021-09-18 07:55:41.307362 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.85467 | 2 |
johnny/transaction.py | bennylope/johnny-cache | 124 | 6625205 | <filename>johnny/transaction.py
from django.db import transaction, connection, DEFAULT_DB_ALIAS
from johnny import settings as johnny_settings
from johnny.compat import is_managed
from johnny.decorators import wraps, available_attrs
class TransactionManager(object):
"""
TransactionManager is a wrapper around a cache_backend that is
transaction aware.
If we are in a transaction, it will return the locally cached version.
* On rollback, it will flush all local caches
* On commit, it will push them up to the real shared cache backend
(ex. memcached).
"""
_patched_var = False
def __init__(self, cache_backend, keygen):
from johnny import cache, settings
self.timeout = settings.MIDDLEWARE_SECONDS
self.prefix = settings.MIDDLEWARE_KEY_PREFIX
self.cache_backend = cache_backend
self.local = cache.local
self.keygen = keygen(self.prefix)
self._originals = {}
self._dirty_backup = {}
self.local['trans_sids'] = {}
def _get_sid(self, using=None):
if 'trans_sids' not in self.local:
self.local['trans_sids'] = {}
d = self.local['trans_sids']
if using is None:
using = DEFAULT_DB_ALIAS
if using not in d:
d[using] = []
return d[using]
def _clear_sid_stack(self, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
if using in self.local.get('trans_sids', {}):
del self.local['trans_sids']
def is_managed(self, using=None):
return is_managed(using=using)
def get(self, key, default=None, using=None):
if self.is_managed(using) and self._patched_var:
val = self.local.get(key, None)
if val:
return val
if self._uses_savepoints():
val = self._get_from_savepoints(key, using)
if val:
return val
return self.cache_backend.get(key, default)
def _get_from_savepoints(self, key, using=None):
sids = self._get_sid(using)
cp = list(sids)
cp.reverse()
for sid in cp:
if key in self.local[sid]:
return self.local[sid][key]
def _trunc_using(self, using):
if using is None:
using = DEFAULT_DB_ALIAS
using = johnny_settings.DB_CACHE_KEYS[using]
if len(using) > 100:
using = using[0:68] + self.keygen.gen_key(using[68:])
return using
def set(self, key, val, timeout=None, using=None):
"""
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
"""
if timeout is None:
timeout = self.timeout
if self.is_managed(using=using) and self._patched_var:
self.local[key] = val
else:
self.cache_backend.set(key, val, timeout)
def _clear(self, using=None):
self.local.clear('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
def _flush(self, commit=True, using=None):
"""
Flushes the internal cache, either to the memcache or rolls back
"""
if commit:
# XXX: multi-set?
if self._uses_savepoints():
self._commit_all_savepoints(using)
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
for key, value in c.items():
self.cache_backend.set(key, value, self.timeout)
else:
if self._uses_savepoints():
self._rollback_all_savepoints(using)
self._clear(using)
self._clear_sid_stack(using)
def _patched(self, original, commit=True, unless_managed=False):
@wraps(original, assigned=available_attrs(original))
def newfun(using=None):
original(using=using)
# copying behavior of original func
# if it is an 'unless_managed' version we should do nothing if transaction is managed
if not unless_managed or not self.is_managed(using=using):
self._flush(commit=commit, using=using)
return newfun
def _uses_savepoints(self):
return connection.features.uses_savepoints
def _sid_key(self, sid, using=None):
if using is not None:
prefix = 'trans_savepoint_%s' % using
else:
prefix = 'trans_savepoint'
if sid is not None and sid.startswith(prefix):
return sid
return '%s_%s'%(prefix, sid)
def _create_savepoint(self, sid, using=None):
key = self._sid_key(sid, using)
#get all local dirty items
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
#store them to a dictionary in the localstore
if key not in self.local:
self.local[key] = {}
for k, v in c.items():
self.local[key][k] = v
#clear the dirty
self._clear(using)
#append the key to the savepoint stack
sids = self._get_sid(using)
if key not in sids:
sids.append(key)
def _rollback_savepoint(self, sid, using=None):
sids = self._get_sid(using)
key = self._sid_key(sid, using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
#delete items from localstore
for i in stack:
del self.local[i]
#clear dirty
self._clear(using)
except IndexError:
#key not found, don't delete from localstore, restore sid stack
for i in stack:
sids.insert(0, i)
def _commit_savepoint(self, sid, using=None):
# commit is not a commit but is in reality just a clear back to that
# savepoint and adds the items back to the dirty transaction.
key = self._sid_key(sid, using)
sids = self._get_sid(using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
self._store_dirty(using)
for i in stack:
for k, v in self.local.get(i, {}).items():
self.local[k] = v
del self.local[i]
self._restore_dirty(using)
except IndexError:
for i in stack:
sids.insert(0, i)
def _commit_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._commit_savepoint(sids[0], using)
def _rollback_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._rollback_savepoint(sids[0], using)
def _store_dirty(self, using=None):
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
backup = 'trans_dirty_store_%s' % self._trunc_using(using)
self.local[backup] = {}
for k, v in c.items():
self.local[backup][k] = v
self._clear(using)
def _restore_dirty(self, using=None):
backup = 'trans_dirty_store_%s' % self._trunc_using(using)
for k, v in self.local.get(backup, {}).items():
self.local[k] = v
del self.local[backup]
def _savepoint(self, original):
@wraps(original, assigned=available_attrs(original))
def newfun(using=None):
if using is not None:
sid = original(using=using)
else:
sid = original()
if self._uses_savepoints():
self._create_savepoint(sid, using)
return sid
return newfun
def _savepoint_rollback(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 2:
using = args[1]
else:
using = kwargs.get('using', None)
self._rollback_savepoint(sid, using)
return newfun
def _savepoint_commit(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 1:
using = args[0]
else:
using = kwargs.get('using', None)
self._commit_savepoint(sid, using)
return newfun
def _getreal(self, name):
return getattr(transaction, 'real_%s' % name,
getattr(transaction, name))
def patch(self):
"""
This function monkey patches commit and rollback
writes to the cache should not happen until commit (unless our state
isn't managed). It does not yet support savepoints.
"""
if not self._patched_var:
self._originals['rollback'] = self._getreal('rollback')
self._originals['rollback_unless_managed'] = self._getreal('rollback_unless_managed')
self._originals['commit'] = self._getreal('commit')
self._originals['commit_unless_managed'] = self._getreal('commit_unless_managed')
self._originals['savepoint'] = self._getreal('savepoint')
self._originals['savepoint_rollback'] = self._getreal('savepoint_rollback')
self._originals['savepoint_commit'] = self._getreal('savepoint_commit')
transaction.rollback = self._patched(transaction.rollback, False)
transaction.rollback_unless_managed = self._patched(transaction.rollback_unless_managed,
False, unless_managed=True)
transaction.commit = self._patched(transaction.commit, True)
transaction.commit_unless_managed = self._patched(transaction.commit_unless_managed,
True, unless_managed=True)
transaction.savepoint = self._savepoint(transaction.savepoint)
transaction.savepoint_rollback = self._savepoint_rollback(transaction.savepoint_rollback)
transaction.savepoint_commit = self._savepoint_commit(transaction.savepoint_commit)
self._patched_var = True
def unpatch(self):
for fun in self._originals:
setattr(transaction, fun, self._originals[fun])
self._patched_var = False
| <filename>johnny/transaction.py
from django.db import transaction, connection, DEFAULT_DB_ALIAS
from johnny import settings as johnny_settings
from johnny.compat import is_managed
from johnny.decorators import wraps, available_attrs
class TransactionManager(object):
"""
TransactionManager is a wrapper around a cache_backend that is
transaction aware.
If we are in a transaction, it will return the locally cached version.
* On rollback, it will flush all local caches
* On commit, it will push them up to the real shared cache backend
(ex. memcached).
"""
_patched_var = False
def __init__(self, cache_backend, keygen):
from johnny import cache, settings
self.timeout = settings.MIDDLEWARE_SECONDS
self.prefix = settings.MIDDLEWARE_KEY_PREFIX
self.cache_backend = cache_backend
self.local = cache.local
self.keygen = keygen(self.prefix)
self._originals = {}
self._dirty_backup = {}
self.local['trans_sids'] = {}
def _get_sid(self, using=None):
if 'trans_sids' not in self.local:
self.local['trans_sids'] = {}
d = self.local['trans_sids']
if using is None:
using = DEFAULT_DB_ALIAS
if using not in d:
d[using] = []
return d[using]
def _clear_sid_stack(self, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
if using in self.local.get('trans_sids', {}):
del self.local['trans_sids']
def is_managed(self, using=None):
return is_managed(using=using)
def get(self, key, default=None, using=None):
if self.is_managed(using) and self._patched_var:
val = self.local.get(key, None)
if val:
return val
if self._uses_savepoints():
val = self._get_from_savepoints(key, using)
if val:
return val
return self.cache_backend.get(key, default)
def _get_from_savepoints(self, key, using=None):
sids = self._get_sid(using)
cp = list(sids)
cp.reverse()
for sid in cp:
if key in self.local[sid]:
return self.local[sid][key]
def _trunc_using(self, using):
if using is None:
using = DEFAULT_DB_ALIAS
using = johnny_settings.DB_CACHE_KEYS[using]
if len(using) > 100:
using = using[0:68] + self.keygen.gen_key(using[68:])
return using
def set(self, key, val, timeout=None, using=None):
"""
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
"""
if timeout is None:
timeout = self.timeout
if self.is_managed(using=using) and self._patched_var:
self.local[key] = val
else:
self.cache_backend.set(key, val, timeout)
def _clear(self, using=None):
self.local.clear('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
def _flush(self, commit=True, using=None):
"""
Flushes the internal cache, either to the memcache or rolls back
"""
if commit:
# XXX: multi-set?
if self._uses_savepoints():
self._commit_all_savepoints(using)
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
for key, value in c.items():
self.cache_backend.set(key, value, self.timeout)
else:
if self._uses_savepoints():
self._rollback_all_savepoints(using)
self._clear(using)
self._clear_sid_stack(using)
def _patched(self, original, commit=True, unless_managed=False):
@wraps(original, assigned=available_attrs(original))
def newfun(using=None):
original(using=using)
# copying behavior of original func
# if it is an 'unless_managed' version we should do nothing if transaction is managed
if not unless_managed or not self.is_managed(using=using):
self._flush(commit=commit, using=using)
return newfun
def _uses_savepoints(self):
return connection.features.uses_savepoints
def _sid_key(self, sid, using=None):
if using is not None:
prefix = 'trans_savepoint_%s' % using
else:
prefix = 'trans_savepoint'
if sid is not None and sid.startswith(prefix):
return sid
return '%s_%s'%(prefix, sid)
def _create_savepoint(self, sid, using=None):
key = self._sid_key(sid, using)
#get all local dirty items
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
#store them to a dictionary in the localstore
if key not in self.local:
self.local[key] = {}
for k, v in c.items():
self.local[key][k] = v
#clear the dirty
self._clear(using)
#append the key to the savepoint stack
sids = self._get_sid(using)
if key not in sids:
sids.append(key)
def _rollback_savepoint(self, sid, using=None):
sids = self._get_sid(using)
key = self._sid_key(sid, using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
#delete items from localstore
for i in stack:
del self.local[i]
#clear dirty
self._clear(using)
except IndexError:
#key not found, don't delete from localstore, restore sid stack
for i in stack:
sids.insert(0, i)
def _commit_savepoint(self, sid, using=None):
# commit is not a commit but is in reality just a clear back to that
# savepoint and adds the items back to the dirty transaction.
key = self._sid_key(sid, using)
sids = self._get_sid(using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
self._store_dirty(using)
for i in stack:
for k, v in self.local.get(i, {}).items():
self.local[k] = v
del self.local[i]
self._restore_dirty(using)
except IndexError:
for i in stack:
sids.insert(0, i)
def _commit_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._commit_savepoint(sids[0], using)
def _rollback_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._rollback_savepoint(sids[0], using)
def _store_dirty(self, using=None):
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
backup = 'trans_dirty_store_%s' % self._trunc_using(using)
self.local[backup] = {}
for k, v in c.items():
self.local[backup][k] = v
self._clear(using)
def _restore_dirty(self, using=None):
backup = 'trans_dirty_store_%s' % self._trunc_using(using)
for k, v in self.local.get(backup, {}).items():
self.local[k] = v
del self.local[backup]
def _savepoint(self, original):
@wraps(original, assigned=available_attrs(original))
def newfun(using=None):
if using is not None:
sid = original(using=using)
else:
sid = original()
if self._uses_savepoints():
self._create_savepoint(sid, using)
return sid
return newfun
def _savepoint_rollback(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 2:
using = args[1]
else:
using = kwargs.get('using', None)
self._rollback_savepoint(sid, using)
return newfun
def _savepoint_commit(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 1:
using = args[0]
else:
using = kwargs.get('using', None)
self._commit_savepoint(sid, using)
return newfun
def _getreal(self, name):
return getattr(transaction, 'real_%s' % name,
getattr(transaction, name))
def patch(self):
"""
This function monkey patches commit and rollback
writes to the cache should not happen until commit (unless our state
isn't managed). It does not yet support savepoints.
"""
if not self._patched_var:
self._originals['rollback'] = self._getreal('rollback')
self._originals['rollback_unless_managed'] = self._getreal('rollback_unless_managed')
self._originals['commit'] = self._getreal('commit')
self._originals['commit_unless_managed'] = self._getreal('commit_unless_managed')
self._originals['savepoint'] = self._getreal('savepoint')
self._originals['savepoint_rollback'] = self._getreal('savepoint_rollback')
self._originals['savepoint_commit'] = self._getreal('savepoint_commit')
transaction.rollback = self._patched(transaction.rollback, False)
transaction.rollback_unless_managed = self._patched(transaction.rollback_unless_managed,
False, unless_managed=True)
transaction.commit = self._patched(transaction.commit, True)
transaction.commit_unless_managed = self._patched(transaction.commit_unless_managed,
True, unless_managed=True)
transaction.savepoint = self._savepoint(transaction.savepoint)
transaction.savepoint_rollback = self._savepoint_rollback(transaction.savepoint_rollback)
transaction.savepoint_commit = self._savepoint_commit(transaction.savepoint_commit)
self._patched_var = True
def unpatch(self):
for fun in self._originals:
setattr(transaction, fun, self._originals[fun])
self._patched_var = False
| en | 0.872179 | TransactionManager is a wrapper around a cache_backend that is transaction aware. If we are in a transaction, it will return the locally cached version. * On rollback, it will flush all local caches * On commit, it will push them up to the real shared cache backend (ex. memcached). Set will be using the generational key, so if another thread bumps this key, the localstore version will still be invalid. If the key is bumped during a transaction it will be new to the global cache on commit, so it will still be a bump. Flushes the internal cache, either to the memcache or rolls back # XXX: multi-set? # copying behavior of original func # if it is an 'unless_managed' version we should do nothing if transaction is managed #get all local dirty items #store them to a dictionary in the localstore #clear the dirty #append the key to the savepoint stack #delete items from localstore #clear dirty #key not found, don't delete from localstore, restore sid stack # commit is not a commit but is in reality just a clear back to that # savepoint and adds the items back to the dirty transaction. This function monkey patches commit and rollback writes to the cache should not happen until commit (unless our state isn't managed). It does not yet support savepoints. | 2.314808 | 2 |
pyelliptic/openssl.py | sharpbitmessage/PyBitmessage | 1 | 6625206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 <NAME> <<EMAIL>>
# See LICENSE for details.
#
# Software slightly changed by <NAME> <bitmessage at-symbol jonwarren.org>
import sys
import ctypes
OpenSSL = None
class CipherName:
def __init__(self, name, pointer, blocksize):
self._name = name
self._pointer = pointer
self._blocksize = blocksize
def __str__(self):
return "Cipher : " + self._name + " | Blocksize : " + str(self._blocksize) + " | Fonction pointer : " + str(self._pointer)
def get_pointer(self):
return self._pointer()
def get_name(self):
return self._name
def get_blocksize(self):
return self._blocksize
class _OpenSSL:
"""
Wrapper for OpenSSL using ctypes
"""
def __init__(self, library):
"""
Build the wrapper
"""
self._lib = ctypes.CDLL(library)
self.pointer = ctypes.pointer
self.c_int = ctypes.c_int
self.byref = ctypes.byref
self.create_string_buffer = ctypes.create_string_buffer
self.BN_new = self._lib.BN_new
self.BN_new.restype = ctypes.c_void_p
self.BN_new.argtypes = []
self.BN_free = self._lib.BN_free
self.BN_free.restype = None
self.BN_free.argtypes = [ctypes.c_void_p]
self.BN_num_bits = self._lib.BN_num_bits
self.BN_num_bits.restype = ctypes.c_int
self.BN_num_bits.argtypes = [ctypes.c_void_p]
self.BN_bn2bin = self._lib.BN_bn2bin
self.BN_bn2bin.restype = ctypes.c_int
self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.BN_bin2bn = self._lib.BN_bin2bn
self.BN_bin2bn.restype = ctypes.c_void_p
self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p]
self.EC_KEY_free = self._lib.EC_KEY_free
self.EC_KEY_free.restype = None
self.EC_KEY_free.argtypes = [ctypes.c_void_p]
self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name
self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key
self.EC_KEY_generate_key.restype = ctypes.c_int
self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_check_key = self._lib.EC_KEY_check_key
self.EC_KEY_check_key.restype = ctypes.c_int
self.EC_KEY_check_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key
self.EC_KEY_get0_private_key.restype = ctypes.c_void_p
self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key
self.EC_KEY_get0_public_key.restype = ctypes.c_void_p
self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group
self.EC_KEY_get0_group.restype = ctypes.c_void_p
self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
self.EC_POINT_get_affine_coordinates_GFp = self._lib.EC_POINT_get_affine_coordinates_GFp
self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int
self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
self.EC_KEY_set_private_key.restype = ctypes.c_int
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key
self.EC_KEY_set_public_key.restype = ctypes.c_int
self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
self.EC_KEY_set_group = self._lib.EC_KEY_set_group
self.EC_KEY_set_group.restype = ctypes.c_int
self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.EC_POINT_set_affine_coordinates_GFp = self._lib.EC_POINT_set_affine_coordinates_GFp
self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int
self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.EC_POINT_new = self._lib.EC_POINT_new
self.EC_POINT_new.restype = ctypes.c_void_p
self.EC_POINT_new.argtypes = [ctypes.c_void_p]
self.EC_POINT_free = self._lib.EC_POINT_free
self.EC_POINT_free.restype = None
self.EC_POINT_free.argtypes = [ctypes.c_void_p]
self.BN_CTX_free = self._lib.BN_CTX_free
self.BN_CTX_free.restype = None
self.BN_CTX_free.argtypes = [ctypes.c_void_p]
self.EC_POINT_mul = self._lib.EC_POINT_mul
self.EC_POINT_mul.restype = None
self.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
self.EC_KEY_set_private_key.restype = ctypes.c_int
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL
self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p
self._lib.ECDH_OpenSSL.argtypes = []
self.BN_CTX_new = self._lib.BN_CTX_new
self._lib.BN_CTX_new.restype = ctypes.c_void_p
self._lib.BN_CTX_new.argtypes = []
self.ECDH_set_method = self._lib.ECDH_set_method
self._lib.ECDH_set_method.restype = ctypes.c_int
self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.ECDH_compute_key = self._lib.ECDH_compute_key
self.ECDH_compute_key.restype = ctypes.c_int
self.ECDH_compute_key.argtypes = [ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex
self.EVP_CipherInit_ex.restype = ctypes.c_int
self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new
self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p
self.EVP_CIPHER_CTX_new.argtypes = []
# Cipher
self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128
self.EVP_aes_128_cfb128.restype = ctypes.c_void_p
self.EVP_aes_128_cfb128.argtypes = []
self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128
self.EVP_aes_256_cfb128.restype = ctypes.c_void_p
self.EVP_aes_256_cfb128.argtypes = []
self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc
self.EVP_aes_128_cbc.restype = ctypes.c_void_p
self.EVP_aes_128_cbc.argtypes = []
self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc
self.EVP_aes_256_cbc.restype = ctypes.c_void_p
self.EVP_aes_256_cbc.argtypes = []
#self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr
#self.EVP_aes_128_ctr.restype = ctypes.c_void_p
#self.EVP_aes_128_ctr.argtypes = []
#self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr
#self.EVP_aes_256_ctr.restype = ctypes.c_void_p
#self.EVP_aes_256_ctr.argtypes = []
self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb
self.EVP_aes_128_ofb.restype = ctypes.c_void_p
self.EVP_aes_128_ofb.argtypes = []
self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb
self.EVP_aes_256_ofb.restype = ctypes.c_void_p
self.EVP_aes_256_ofb.argtypes = []
self.EVP_bf_cbc = self._lib.EVP_bf_cbc
self.EVP_bf_cbc.restype = ctypes.c_void_p
self.EVP_bf_cbc.argtypes = []
self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64
self.EVP_bf_cfb64.restype = ctypes.c_void_p
self.EVP_bf_cfb64.argtypes = []
self.EVP_rc4 = self._lib.EVP_rc4
self.EVP_rc4.restype = ctypes.c_void_p
self.EVP_rc4.argtypes = []
self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup
self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int
self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p]
self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free
self.EVP_CIPHER_CTX_free.restype = None
self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p]
self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate
self.EVP_CipherUpdate.restype = ctypes.c_int
self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex
self.EVP_CipherFinal_ex.restype = ctypes.c_int
self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
self.EVP_DigestInit = self._lib.EVP_DigestInit
self.EVP_DigestInit.restype = ctypes.c_int
self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate
self.EVP_DigestUpdate.restype = ctypes.c_int
self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_int]
self.EVP_DigestFinal = self._lib.EVP_DigestFinal
self.EVP_DigestFinal.restype = ctypes.c_int
self.EVP_DigestFinal.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
self.EVP_ecdsa = self._lib.EVP_ecdsa
self._lib.EVP_ecdsa.restype = ctypes.c_void_p
self._lib.EVP_ecdsa.argtypes = []
self.ECDSA_sign = self._lib.ECDSA_sign
self.ECDSA_sign.restype = ctypes.c_int
self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.ECDSA_verify = self._lib.ECDSA_verify
self.ECDSA_verify.restype = ctypes.c_int
self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create
self.EVP_MD_CTX_create.restype = ctypes.c_void_p
self.EVP_MD_CTX_create.argtypes = []
self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init
self.EVP_MD_CTX_init.restype = None
self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p]
self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy
self.EVP_MD_CTX_destroy.restype = None
self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p]
self.RAND_bytes = self._lib.RAND_bytes
self.RAND_bytes.restype = None
self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int]
self.EVP_sha256 = self._lib.EVP_sha256
self.EVP_sha256.restype = ctypes.c_void_p
self.EVP_sha256.argtypes = []
self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey
self.i2o_ECPublicKey.restype = ctypes.c_void_p
self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.EVP_sha512 = self._lib.EVP_sha512
self.EVP_sha512.restype = ctypes.c_void_p
self.EVP_sha512.argtypes = []
self.HMAC = self._lib.HMAC
self.HMAC.restype = ctypes.c_void_p
self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC
self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_int,
ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p]
self._set_ciphers()
self._set_curves()
def _set_ciphers(self):
self.cipher_algo = {
'aes-128-cbc': CipherName('aes-128-cbc', self.EVP_aes_128_cbc, 16),
'aes-256-cbc': CipherName('aes-256-cbc', self.EVP_aes_256_cbc, 16),
'aes-128-cfb': CipherName('aes-128-cfb', self.EVP_aes_128_cfb128, 16),
'aes-256-cfb': CipherName('aes-256-cfb', self.EVP_aes_256_cfb128, 16),
'aes-128-ofb': CipherName('aes-128-ofb', self._lib.EVP_aes_128_ofb, 16),
'aes-256-ofb': CipherName('aes-256-ofb', self._lib.EVP_aes_256_ofb, 16),
#'aes-128-ctr': CipherName('aes-128-ctr', self._lib.EVP_aes_128_ctr, 16),
#'aes-256-ctr': CipherName('aes-256-ctr', self._lib.EVP_aes_256_ctr, 16),
'bf-cfb': CipherName('bf-cfb', self.EVP_bf_cfb64, 8),
'bf-cbc': CipherName('bf-cbc', self.EVP_bf_cbc, 8),
'rc4': CipherName('rc4', self.EVP_rc4, 128), # 128 is the initialisation size not block size
}
def _set_curves(self):
self.curves = {
'secp112r1': 704,
'secp112r2': 705,
'secp128r1': 706,
'secp128r2': 707,
'secp160k1': 708,
'secp160r1': 709,
'secp160r2': 710,
'secp192k1': 711,
'secp224k1': 712,
'secp224r1': 713,
'secp256k1': 714,
'secp384r1': 715,
'secp521r1': 716,
'sect113r1': 717,
'sect113r2': 718,
'sect131r1': 719,
'sect131r2': 720,
'sect163k1': 721,
'sect163r1': 722,
'sect163r2': 723,
'sect193r1': 724,
'sect193r2': 725,
'sect233k1': 726,
'sect233r1': 727,
'sect239k1': 728,
'sect283k1': 729,
'sect283r1': 730,
'sect409k1': 731,
'sect409r1': 732,
'sect571k1': 733,
'sect571r1': 734,
}
def BN_num_bytes(self, x):
"""
returns the length of a BN (OpenSSl API)
"""
return int((self.BN_num_bits(x) + 7) / 8)
def get_cipher(self, name):
"""
returns the OpenSSL cipher instance
"""
if name not in self.cipher_algo:
raise Exception("Unknown cipher")
return self.cipher_algo[name]
def get_curve(self, name):
"""
returns the id of a elliptic curve
"""
if name not in self.curves:
raise Exception("Unknown curve")
return self.curves[name]
def get_curve_by_id(self, id):
"""
returns the name of a elliptic curve with his id
"""
res = None
for i in self.curves:
if self.curves[i] == id:
res = i
break
if res is None:
raise Exception("Unknown curve")
return res
def rand(self, size):
"""
OpenSSL random function
"""
buffer = self.malloc(0, size)
self.RAND_bytes(buffer, size)
return buffer.raw
def malloc(self, data, size):
"""
returns a create_string_buffer (ctypes)
"""
buffer = None
if data != 0:
if sys.version_info.major == 3 and isinstance(data, type('')):
data = data.encode()
buffer = self.create_string_buffer(data, size)
else:
buffer = self.create_string_buffer(size)
return buffer
try:
OpenSSL = _OpenSSL('libcrypto.so')
except:
try:
OpenSSL = _OpenSSL('libeay32.dll')
except:
try:
OpenSSL = _OpenSSL('libcrypto.dylib')
except:
try:
from os import path
lib_path = path.join(sys._MEIPASS, "libeay32.dll")
OpenSSL = _OpenSSL(lib_path)
except:
if 'linux' in sys.platform or 'darwin' in sys.platform:
try:
from ctypes.util import find_library
OpenSSL = _OpenSSL(find_library('ssl'))
except Exception, err:
sys.stderr.write('(On Linux) Couldn\'t find and load the OpenSSL library. You must install it. If you believe that you already have it installed, this exception information might be of use:\n')
from ctypes.util import find_library
OpenSSL = _OpenSSL(find_library('ssl'))
else:
raise Exception("Couldn't find and load the OpenSSL library. You must install it.")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 <NAME> <<EMAIL>>
# See LICENSE for details.
#
# Software slightly changed by <NAME> <bitmessage at-symbol jonwarren.org>
import sys
import ctypes
OpenSSL = None
class CipherName:
def __init__(self, name, pointer, blocksize):
self._name = name
self._pointer = pointer
self._blocksize = blocksize
def __str__(self):
return "Cipher : " + self._name + " | Blocksize : " + str(self._blocksize) + " | Fonction pointer : " + str(self._pointer)
def get_pointer(self):
return self._pointer()
def get_name(self):
return self._name
def get_blocksize(self):
return self._blocksize
class _OpenSSL:
"""
Wrapper for OpenSSL using ctypes
"""
def __init__(self, library):
"""
Build the wrapper
"""
self._lib = ctypes.CDLL(library)
self.pointer = ctypes.pointer
self.c_int = ctypes.c_int
self.byref = ctypes.byref
self.create_string_buffer = ctypes.create_string_buffer
self.BN_new = self._lib.BN_new
self.BN_new.restype = ctypes.c_void_p
self.BN_new.argtypes = []
self.BN_free = self._lib.BN_free
self.BN_free.restype = None
self.BN_free.argtypes = [ctypes.c_void_p]
self.BN_num_bits = self._lib.BN_num_bits
self.BN_num_bits.restype = ctypes.c_int
self.BN_num_bits.argtypes = [ctypes.c_void_p]
self.BN_bn2bin = self._lib.BN_bn2bin
self.BN_bn2bin.restype = ctypes.c_int
self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.BN_bin2bn = self._lib.BN_bin2bn
self.BN_bin2bn.restype = ctypes.c_void_p
self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p]
self.EC_KEY_free = self._lib.EC_KEY_free
self.EC_KEY_free.restype = None
self.EC_KEY_free.argtypes = [ctypes.c_void_p]
self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name
self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key
self.EC_KEY_generate_key.restype = ctypes.c_int
self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_check_key = self._lib.EC_KEY_check_key
self.EC_KEY_check_key.restype = ctypes.c_int
self.EC_KEY_check_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key
self.EC_KEY_get0_private_key.restype = ctypes.c_void_p
self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key
self.EC_KEY_get0_public_key.restype = ctypes.c_void_p
self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group
self.EC_KEY_get0_group.restype = ctypes.c_void_p
self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
self.EC_POINT_get_affine_coordinates_GFp = self._lib.EC_POINT_get_affine_coordinates_GFp
self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int
self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
self.EC_KEY_set_private_key.restype = ctypes.c_int
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key
self.EC_KEY_set_public_key.restype = ctypes.c_int
self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
self.EC_KEY_set_group = self._lib.EC_KEY_set_group
self.EC_KEY_set_group.restype = ctypes.c_int
self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.EC_POINT_set_affine_coordinates_GFp = self._lib.EC_POINT_set_affine_coordinates_GFp
self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int
self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.EC_POINT_new = self._lib.EC_POINT_new
self.EC_POINT_new.restype = ctypes.c_void_p
self.EC_POINT_new.argtypes = [ctypes.c_void_p]
self.EC_POINT_free = self._lib.EC_POINT_free
self.EC_POINT_free.restype = None
self.EC_POINT_free.argtypes = [ctypes.c_void_p]
self.BN_CTX_free = self._lib.BN_CTX_free
self.BN_CTX_free.restype = None
self.BN_CTX_free.argtypes = [ctypes.c_void_p]
self.EC_POINT_mul = self._lib.EC_POINT_mul
self.EC_POINT_mul.restype = None
self.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
self.EC_KEY_set_private_key.restype = ctypes.c_int
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL
self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p
self._lib.ECDH_OpenSSL.argtypes = []
self.BN_CTX_new = self._lib.BN_CTX_new
self._lib.BN_CTX_new.restype = ctypes.c_void_p
self._lib.BN_CTX_new.argtypes = []
self.ECDH_set_method = self._lib.ECDH_set_method
self._lib.ECDH_set_method.restype = ctypes.c_int
self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.ECDH_compute_key = self._lib.ECDH_compute_key
self.ECDH_compute_key.restype = ctypes.c_int
self.ECDH_compute_key.argtypes = [ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex
self.EVP_CipherInit_ex.restype = ctypes.c_int
self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new
self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p
self.EVP_CIPHER_CTX_new.argtypes = []
# Cipher
self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128
self.EVP_aes_128_cfb128.restype = ctypes.c_void_p
self.EVP_aes_128_cfb128.argtypes = []
self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128
self.EVP_aes_256_cfb128.restype = ctypes.c_void_p
self.EVP_aes_256_cfb128.argtypes = []
self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc
self.EVP_aes_128_cbc.restype = ctypes.c_void_p
self.EVP_aes_128_cbc.argtypes = []
self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc
self.EVP_aes_256_cbc.restype = ctypes.c_void_p
self.EVP_aes_256_cbc.argtypes = []
#self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr
#self.EVP_aes_128_ctr.restype = ctypes.c_void_p
#self.EVP_aes_128_ctr.argtypes = []
#self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr
#self.EVP_aes_256_ctr.restype = ctypes.c_void_p
#self.EVP_aes_256_ctr.argtypes = []
self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb
self.EVP_aes_128_ofb.restype = ctypes.c_void_p
self.EVP_aes_128_ofb.argtypes = []
self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb
self.EVP_aes_256_ofb.restype = ctypes.c_void_p
self.EVP_aes_256_ofb.argtypes = []
self.EVP_bf_cbc = self._lib.EVP_bf_cbc
self.EVP_bf_cbc.restype = ctypes.c_void_p
self.EVP_bf_cbc.argtypes = []
self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64
self.EVP_bf_cfb64.restype = ctypes.c_void_p
self.EVP_bf_cfb64.argtypes = []
self.EVP_rc4 = self._lib.EVP_rc4
self.EVP_rc4.restype = ctypes.c_void_p
self.EVP_rc4.argtypes = []
self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup
self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int
self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p]
self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free
self.EVP_CIPHER_CTX_free.restype = None
self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p]
self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate
self.EVP_CipherUpdate.restype = ctypes.c_int
self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex
self.EVP_CipherFinal_ex.restype = ctypes.c_int
self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
self.EVP_DigestInit = self._lib.EVP_DigestInit
self.EVP_DigestInit.restype = ctypes.c_int
self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate
self.EVP_DigestUpdate.restype = ctypes.c_int
self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_int]
self.EVP_DigestFinal = self._lib.EVP_DigestFinal
self.EVP_DigestFinal.restype = ctypes.c_int
self.EVP_DigestFinal.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
self.EVP_ecdsa = self._lib.EVP_ecdsa
self._lib.EVP_ecdsa.restype = ctypes.c_void_p
self._lib.EVP_ecdsa.argtypes = []
self.ECDSA_sign = self._lib.ECDSA_sign
self.ECDSA_sign.restype = ctypes.c_int
self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.ECDSA_verify = self._lib.ECDSA_verify
self.ECDSA_verify.restype = ctypes.c_int
self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create
self.EVP_MD_CTX_create.restype = ctypes.c_void_p
self.EVP_MD_CTX_create.argtypes = []
self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init
self.EVP_MD_CTX_init.restype = None
self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p]
self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy
self.EVP_MD_CTX_destroy.restype = None
self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p]
self.RAND_bytes = self._lib.RAND_bytes
self.RAND_bytes.restype = None
self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int]
self.EVP_sha256 = self._lib.EVP_sha256
self.EVP_sha256.restype = ctypes.c_void_p
self.EVP_sha256.argtypes = []
self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey
self.i2o_ECPublicKey.restype = ctypes.c_void_p
self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.EVP_sha512 = self._lib.EVP_sha512
self.EVP_sha512.restype = ctypes.c_void_p
self.EVP_sha512.argtypes = []
self.HMAC = self._lib.HMAC
self.HMAC.restype = ctypes.c_void_p
self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC
self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_int,
ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p]
self._set_ciphers()
self._set_curves()
def _set_ciphers(self):
self.cipher_algo = {
'aes-128-cbc': CipherName('aes-128-cbc', self.EVP_aes_128_cbc, 16),
'aes-256-cbc': CipherName('aes-256-cbc', self.EVP_aes_256_cbc, 16),
'aes-128-cfb': CipherName('aes-128-cfb', self.EVP_aes_128_cfb128, 16),
'aes-256-cfb': CipherName('aes-256-cfb', self.EVP_aes_256_cfb128, 16),
'aes-128-ofb': CipherName('aes-128-ofb', self._lib.EVP_aes_128_ofb, 16),
'aes-256-ofb': CipherName('aes-256-ofb', self._lib.EVP_aes_256_ofb, 16),
#'aes-128-ctr': CipherName('aes-128-ctr', self._lib.EVP_aes_128_ctr, 16),
#'aes-256-ctr': CipherName('aes-256-ctr', self._lib.EVP_aes_256_ctr, 16),
'bf-cfb': CipherName('bf-cfb', self.EVP_bf_cfb64, 8),
'bf-cbc': CipherName('bf-cbc', self.EVP_bf_cbc, 8),
'rc4': CipherName('rc4', self.EVP_rc4, 128), # 128 is the initialisation size not block size
}
def _set_curves(self):
self.curves = {
'secp112r1': 704,
'secp112r2': 705,
'secp128r1': 706,
'secp128r2': 707,
'secp160k1': 708,
'secp160r1': 709,
'secp160r2': 710,
'secp192k1': 711,
'secp224k1': 712,
'secp224r1': 713,
'secp256k1': 714,
'secp384r1': 715,
'secp521r1': 716,
'sect113r1': 717,
'sect113r2': 718,
'sect131r1': 719,
'sect131r2': 720,
'sect163k1': 721,
'sect163r1': 722,
'sect163r2': 723,
'sect193r1': 724,
'sect193r2': 725,
'sect233k1': 726,
'sect233r1': 727,
'sect239k1': 728,
'sect283k1': 729,
'sect283r1': 730,
'sect409k1': 731,
'sect409r1': 732,
'sect571k1': 733,
'sect571r1': 734,
}
def BN_num_bytes(self, x):
"""
returns the length of a BN (OpenSSl API)
"""
return int((self.BN_num_bits(x) + 7) / 8)
def get_cipher(self, name):
"""
returns the OpenSSL cipher instance
"""
if name not in self.cipher_algo:
raise Exception("Unknown cipher")
return self.cipher_algo[name]
def get_curve(self, name):
"""
returns the id of a elliptic curve
"""
if name not in self.curves:
raise Exception("Unknown curve")
return self.curves[name]
def get_curve_by_id(self, id):
"""
returns the name of a elliptic curve with his id
"""
res = None
for i in self.curves:
if self.curves[i] == id:
res = i
break
if res is None:
raise Exception("Unknown curve")
return res
def rand(self, size):
"""
OpenSSL random function
"""
buffer = self.malloc(0, size)
self.RAND_bytes(buffer, size)
return buffer.raw
def malloc(self, data, size):
"""
returns a create_string_buffer (ctypes)
"""
buffer = None
if data != 0:
if sys.version_info.major == 3 and isinstance(data, type('')):
data = data.encode()
buffer = self.create_string_buffer(data, size)
else:
buffer = self.create_string_buffer(size)
return buffer
try:
OpenSSL = _OpenSSL('libcrypto.so')
except:
try:
OpenSSL = _OpenSSL('libeay32.dll')
except:
try:
OpenSSL = _OpenSSL('libcrypto.dylib')
except:
try:
from os import path
lib_path = path.join(sys._MEIPASS, "libeay32.dll")
OpenSSL = _OpenSSL(lib_path)
except:
if 'linux' in sys.platform or 'darwin' in sys.platform:
try:
from ctypes.util import find_library
OpenSSL = _OpenSSL(find_library('ssl'))
except Exception, err:
sys.stderr.write('(On Linux) Couldn\'t find and load the OpenSSL library. You must install it. If you believe that you already have it installed, this exception information might be of use:\n')
from ctypes.util import find_library
OpenSSL = _OpenSSL(find_library('ssl'))
else:
raise Exception("Couldn't find and load the OpenSSL library. You must install it.")
| en | 0.571502 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 <NAME> <<EMAIL>> # See LICENSE for details. # # Software slightly changed by <NAME> <bitmessage at-symbol jonwarren.org> Wrapper for OpenSSL using ctypes Build the wrapper # Cipher #self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr #self.EVP_aes_128_ctr.restype = ctypes.c_void_p #self.EVP_aes_128_ctr.argtypes = [] #self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr #self.EVP_aes_256_ctr.restype = ctypes.c_void_p #self.EVP_aes_256_ctr.argtypes = [] #'aes-128-ctr': CipherName('aes-128-ctr', self._lib.EVP_aes_128_ctr, 16), #'aes-256-ctr': CipherName('aes-256-ctr', self._lib.EVP_aes_256_ctr, 16), # 128 is the initialisation size not block size returns the length of a BN (OpenSSl API) returns the OpenSSL cipher instance returns the id of a elliptic curve returns the name of a elliptic curve with his id OpenSSL random function returns a create_string_buffer (ctypes) | 2.812258 | 3 |
python/input2.py | pawankakani/coding | 0 | 6625207 | <gh_stars>0
age = int(input("Enter your age :")) ## <1>
temperature = float(input("Enter today's temperature :")) ## <2>
print("Your age is :", age)
print("Today's temperature is :", temperature)
| age = int(input("Enter your age :")) ## <1>
temperature = float(input("Enter today's temperature :")) ## <2>
print("Your age is :", age)
print("Today's temperature is :", temperature) | eu | 0.164461 | ## <1> ## <2> | 4.284155 | 4 |
braindecode/datasets/sensor_positions.py | gemeinl/braindecode | 3 | 6625208 | <gh_stars>1-10
import numpy as np
import math
CHANNEL_10_20_APPROX = ('angle',
('Fpz',(0.000, 4.000)),
('Fp1',(-3.500, 3.500)),
('Fp2',(3.500, 3.500)),
('AFp3h',(-1.000, 3.500)),
('AFp4h',(1.000, 3.500)),
('AF7',(-4.000, 3.000)),
('AF3',(-2.000, 3.000)),
('AFz',(0.000, 3.000)),
('AF4',(2.000, 3.000)),
('AF8',(4.000, 3.000)),
('AFF5h',(-2.500, 2.500)),
('AFF1',(-0.500, 2.500)),
('AFF2',(0.500, 2.500)),
('AFF6h',(2.500, 2.500)),
('F7',(-4.000, 2.000)),
('F5',(-3.000, 2.000)),
('F3',(-2.000, 2.000)),
('F1',(-1.000, 2.000)),
('Fz',(0.000, 2.000)),
('F2',(1.000, 2.000)),
('F4',(2.000, 2.000)),
('F6',(3.000, 2.000)),
('F8',(4.000, 2.000)),
('FFT7h',(-3.500, 1.500)),
('FFC5h',(-2.500, 1.500)),
('FFC3h',(-1.500, 1.500)),
('FFC1h',(-0.500, 1.500)),
('FFC2h',(0.500, 1.500)),
('FFC4h',(1.500, 1.500)),
('FFC6h',(2.500, 1.500)),
('FFT8h',(3.500, 1.500)),
('FT9',(-5.000, 1.000)),
('FT7',(-4.000, 1.000)),
('FC5',(-3.000, 1.000)),
('FC3',(-2.000, 1.000)),
('FC1',(-1.000, 1.000)),
('FCz',(0.000, 1.000)),
('FC2',(1.000, 1.000)),
('FC4',(2.000, 1.000)),
('FC6',(3.000, 1.000)),
('FT8',(4.000, 1.000)),
('FT10',(5.000, 1.000)),
('FTT9h',(-4.500, 0.500)),
('FTT7h',(-3.500, 0.500)),
('FCC5h',(-2.500, 0.500)),
('FCC3h',(-1.500, 0.500)),
('FCC1h',(-0.500, 0.500)),
('FCC2h',(0.500, 0.500)),
('FCC4h',(1.500, 0.500)),
('FCC6h',(2.500, 0.500)),
('FTT8h',(3.500, 0.500)),
('FTT10h',(4.500, 0.500)),
('M1',(-5.000, 0.000)),
# notsure if correct:
('T9', (-4.500, 0.000)),
('T7',(-4.000, 0.000)),
('C5',(-3.000, 0.000)),
('C3',(-2.000, 0.000)),
('C1',(-1.000, 0.000)),
('Cz',(0.000, 0.000)),
('C2',(1.000, 0.000)),
('C4',(2.000, 0.000)),
('C6',(3.000, 0.000)),
('T8',(4.000, 0.000)),
('T10', (4.500, 0.000)),
('M2',(5.000, 0.000)),
('TTP7h',(-3.500, -0.500)),
('CCP5h',(-2.500, -0.500)),
('CCP3h',(-1.500, -0.500)),
('CCP1h',(-0.500, -0.500)),
('CCP2h',(0.500, -0.500)),
('CCP4h',(1.500, -0.500)),
('CCP6h',(2.500, -0.500)),
('TTP8h',(3.500, -0.500)),
('TP7',(-4.000, -1.000)),
('CP5',(-3.000, -1.000)),
('CP3',(-2.000, -1.000)),
('CP1',(-1.000, -1.000)),
('CPz',(0.000, -1.000)),
('CP2',(1.000, -1.000)),
('CP4',(2.000, -1.000)),
('CP6',(3.000, -1.000)),
('TP8',(4.000, -1.000)),
('TPP9h',(-4.500, -1.500)),
('TPP7h',(-3.500, -1.500)),
('CPP5h',(-2.500, -1.500)),
('CPP3h',(-1.500, -1.500)),
('CPP1h',(-0.500, -1.500)),
('CPP2h',(0.500, -1.500)),
('CPP4h',(1.500, -1.500)),
('CPP6h',(2.500, -1.500)),
('TPP8h',(3.500, -1.500)),
('TPP10h',(4.500, -1.500)),
('P9',(-5.000, -2.000)),
('P7',(-4.000, -2.000)),
('P5',(-3.000, -2.000)),
('P3',(-2.000, -2.000)),
('P1',(-1.000, -2.000)),
('Pz',(0.000, -2.000)),
('P2',(1.000, -2.000)),
('P4',(2.000, -2.000)),
('P6',(3.000, -2.000)),
('P8',(4.000, -2.000)),
('P10',(5.000, -2.000)),
('PPO9h',(-4.500, -2.500)),
('PPO5h',(-3.000, -2.500)),
('PPO1',(-0.650, -2.500)),
('PPO2',(0.650, -2.500)),
('PPO6h',(3.000, -2.500)),
('PPO10h',(4.500, -2.500)),
('PO9',(-5.000, -3.000)),
('PO7',(-4.000, -3.000)),
('PO5',(-3.000, -3.000)),
('PO3',(-2.000, -3.000)),
('PO1',(-1.000, -3.000)),
('POz',(0.000, -3.000)),
('PO2',(1.000, -3.000)),
('PO4',(2.000, -3.000)),
('PO6',(3.000, -3.000)),
('PO8',(4.000, -3.000)),
('PO10',(5.000, -3.000)),
('POO9h',(-4.500, -3.250)),
('POO3h',(-2.000, -3.250)),
('POO4h',(2.000, -3.250)),
('POO10h',(4.500, -3.250)),
('O1',(-2.500, -3.750)),
('Oz',(0.000, -3.750)),
('O2',(2.500, -3.750)),
('OI1h',(1.500, -4.250)),
('OI2h',(-1.500, -4.250)),
('I1',(1.000, -4.500)),
('Iz',(0.000, -4.500)),
('I2',(-1.000, -4.500)),
)
def get_channelpos(channame, chan_pos_list):
if chan_pos_list[0] == 'angle':
return get_channelpos_from_angle(channame, chan_pos_list[1:])
elif chan_pos_list[0] == 'cartesian':
channame = channame.lower()
for name, coords in chan_pos_list[1:]:
if name.lower() == channame:
return coords[0], coords[1]
return None
else:
raise ValueError("Unknown first element "
"{:s} (should be type of positions)".format(
chan_pos_list[0]))
def get_channelpos_from_angle(channame, chan_pos_list=CHANNEL_10_20_APPROX):
"""Return the x/y position of a channel.
This method calculates the stereographic projection of a channel
from ``CHANNEL_10_20``, suitable for a scalp plot.
Parameters
----------
channame : str
Name of the channel, the search is case insensitive.
chan_pos_list=CHANNEL_10_20_APPROX,
interpolation='bilinear'
Returns
-------
x, y : float or None
The projected point on the plane if the point is known,
otherwise ``None``
Examples
--------
>>> plot.get_channelpos_from_angle('C2')
(0.1720792096741632, 0.0)
>>> # the channels are case insensitive
>>> plot.get_channelpos_from_angle('c2')
(0.1720792096741632, 0.0)
>>> # lookup for an invalid channel
>>> plot.get_channelpos_from_angle('foo')
None
"""
channame = channame.lower()
for i in chan_pos_list:
if i[0].lower() == channame:
# convert the 90/4th angular position into x, y, z
p = i[1]
x, y = _convert_2d_angle_to_2d_coord(*p)
return x, y
return None
def _convert_2d_angle_to_2d_coord(a,b):
# convert the 90/4th angular position into x, y, z
ea, eb = a * (90 / 4), b * (90 / 4)
ea = ea * math.pi / 180
eb = eb * math.pi / 180
x = math.sin(ea) * math.cos(eb)
y = math.sin(eb)
z = math.cos(ea) * math.cos(eb)
# Calculate the stereographic projection.
# Given a unit sphere with radius ``r = 1`` and center at
# the origin. Project the point ``p = (x, y, z)`` from the
# sphere's South pole (0, 0, -1) on a plane on the sphere's
# North pole (0, 0, 1).
#
# The formula is:
#
# P' = P * (2r / (r + z))
#
# We changed the values to move the point of projection
# further below the south pole
mu = 1 / (1.3 + z)
x *= mu
y *= mu
return x, y
| import numpy as np
import math
CHANNEL_10_20_APPROX = ('angle',
('Fpz',(0.000, 4.000)),
('Fp1',(-3.500, 3.500)),
('Fp2',(3.500, 3.500)),
('AFp3h',(-1.000, 3.500)),
('AFp4h',(1.000, 3.500)),
('AF7',(-4.000, 3.000)),
('AF3',(-2.000, 3.000)),
('AFz',(0.000, 3.000)),
('AF4',(2.000, 3.000)),
('AF8',(4.000, 3.000)),
('AFF5h',(-2.500, 2.500)),
('AFF1',(-0.500, 2.500)),
('AFF2',(0.500, 2.500)),
('AFF6h',(2.500, 2.500)),
('F7',(-4.000, 2.000)),
('F5',(-3.000, 2.000)),
('F3',(-2.000, 2.000)),
('F1',(-1.000, 2.000)),
('Fz',(0.000, 2.000)),
('F2',(1.000, 2.000)),
('F4',(2.000, 2.000)),
('F6',(3.000, 2.000)),
('F8',(4.000, 2.000)),
('FFT7h',(-3.500, 1.500)),
('FFC5h',(-2.500, 1.500)),
('FFC3h',(-1.500, 1.500)),
('FFC1h',(-0.500, 1.500)),
('FFC2h',(0.500, 1.500)),
('FFC4h',(1.500, 1.500)),
('FFC6h',(2.500, 1.500)),
('FFT8h',(3.500, 1.500)),
('FT9',(-5.000, 1.000)),
('FT7',(-4.000, 1.000)),
('FC5',(-3.000, 1.000)),
('FC3',(-2.000, 1.000)),
('FC1',(-1.000, 1.000)),
('FCz',(0.000, 1.000)),
('FC2',(1.000, 1.000)),
('FC4',(2.000, 1.000)),
('FC6',(3.000, 1.000)),
('FT8',(4.000, 1.000)),
('FT10',(5.000, 1.000)),
('FTT9h',(-4.500, 0.500)),
('FTT7h',(-3.500, 0.500)),
('FCC5h',(-2.500, 0.500)),
('FCC3h',(-1.500, 0.500)),
('FCC1h',(-0.500, 0.500)),
('FCC2h',(0.500, 0.500)),
('FCC4h',(1.500, 0.500)),
('FCC6h',(2.500, 0.500)),
('FTT8h',(3.500, 0.500)),
('FTT10h',(4.500, 0.500)),
('M1',(-5.000, 0.000)),
# notsure if correct:
('T9', (-4.500, 0.000)),
('T7',(-4.000, 0.000)),
('C5',(-3.000, 0.000)),
('C3',(-2.000, 0.000)),
('C1',(-1.000, 0.000)),
('Cz',(0.000, 0.000)),
('C2',(1.000, 0.000)),
('C4',(2.000, 0.000)),
('C6',(3.000, 0.000)),
('T8',(4.000, 0.000)),
('T10', (4.500, 0.000)),
('M2',(5.000, 0.000)),
('TTP7h',(-3.500, -0.500)),
('CCP5h',(-2.500, -0.500)),
('CCP3h',(-1.500, -0.500)),
('CCP1h',(-0.500, -0.500)),
('CCP2h',(0.500, -0.500)),
('CCP4h',(1.500, -0.500)),
('CCP6h',(2.500, -0.500)),
('TTP8h',(3.500, -0.500)),
('TP7',(-4.000, -1.000)),
('CP5',(-3.000, -1.000)),
('CP3',(-2.000, -1.000)),
('CP1',(-1.000, -1.000)),
('CPz',(0.000, -1.000)),
('CP2',(1.000, -1.000)),
('CP4',(2.000, -1.000)),
('CP6',(3.000, -1.000)),
('TP8',(4.000, -1.000)),
('TPP9h',(-4.500, -1.500)),
('TPP7h',(-3.500, -1.500)),
('CPP5h',(-2.500, -1.500)),
('CPP3h',(-1.500, -1.500)),
('CPP1h',(-0.500, -1.500)),
('CPP2h',(0.500, -1.500)),
('CPP4h',(1.500, -1.500)),
('CPP6h',(2.500, -1.500)),
('TPP8h',(3.500, -1.500)),
('TPP10h',(4.500, -1.500)),
('P9',(-5.000, -2.000)),
('P7',(-4.000, -2.000)),
('P5',(-3.000, -2.000)),
('P3',(-2.000, -2.000)),
('P1',(-1.000, -2.000)),
('Pz',(0.000, -2.000)),
('P2',(1.000, -2.000)),
('P4',(2.000, -2.000)),
('P6',(3.000, -2.000)),
('P8',(4.000, -2.000)),
('P10',(5.000, -2.000)),
('PPO9h',(-4.500, -2.500)),
('PPO5h',(-3.000, -2.500)),
('PPO1',(-0.650, -2.500)),
('PPO2',(0.650, -2.500)),
('PPO6h',(3.000, -2.500)),
('PPO10h',(4.500, -2.500)),
('PO9',(-5.000, -3.000)),
('PO7',(-4.000, -3.000)),
('PO5',(-3.000, -3.000)),
('PO3',(-2.000, -3.000)),
('PO1',(-1.000, -3.000)),
('POz',(0.000, -3.000)),
('PO2',(1.000, -3.000)),
('PO4',(2.000, -3.000)),
('PO6',(3.000, -3.000)),
('PO8',(4.000, -3.000)),
('PO10',(5.000, -3.000)),
('POO9h',(-4.500, -3.250)),
('POO3h',(-2.000, -3.250)),
('POO4h',(2.000, -3.250)),
('POO10h',(4.500, -3.250)),
('O1',(-2.500, -3.750)),
('Oz',(0.000, -3.750)),
('O2',(2.500, -3.750)),
('OI1h',(1.500, -4.250)),
('OI2h',(-1.500, -4.250)),
('I1',(1.000, -4.500)),
('Iz',(0.000, -4.500)),
('I2',(-1.000, -4.500)),
)
def get_channelpos(channame, chan_pos_list):
if chan_pos_list[0] == 'angle':
return get_channelpos_from_angle(channame, chan_pos_list[1:])
elif chan_pos_list[0] == 'cartesian':
channame = channame.lower()
for name, coords in chan_pos_list[1:]:
if name.lower() == channame:
return coords[0], coords[1]
return None
else:
raise ValueError("Unknown first element "
"{:s} (should be type of positions)".format(
chan_pos_list[0]))
def get_channelpos_from_angle(channame, chan_pos_list=CHANNEL_10_20_APPROX):
"""Return the x/y position of a channel.
This method calculates the stereographic projection of a channel
from ``CHANNEL_10_20``, suitable for a scalp plot.
Parameters
----------
channame : str
Name of the channel, the search is case insensitive.
chan_pos_list=CHANNEL_10_20_APPROX,
interpolation='bilinear'
Returns
-------
x, y : float or None
The projected point on the plane if the point is known,
otherwise ``None``
Examples
--------
>>> plot.get_channelpos_from_angle('C2')
(0.1720792096741632, 0.0)
>>> # the channels are case insensitive
>>> plot.get_channelpos_from_angle('c2')
(0.1720792096741632, 0.0)
>>> # lookup for an invalid channel
>>> plot.get_channelpos_from_angle('foo')
None
"""
channame = channame.lower()
for i in chan_pos_list:
if i[0].lower() == channame:
# convert the 90/4th angular position into x, y, z
p = i[1]
x, y = _convert_2d_angle_to_2d_coord(*p)
return x, y
return None
def _convert_2d_angle_to_2d_coord(a,b):
# convert the 90/4th angular position into x, y, z
ea, eb = a * (90 / 4), b * (90 / 4)
ea = ea * math.pi / 180
eb = eb * math.pi / 180
x = math.sin(ea) * math.cos(eb)
y = math.sin(eb)
z = math.cos(ea) * math.cos(eb)
# Calculate the stereographic projection.
# Given a unit sphere with radius ``r = 1`` and center at
# the origin. Project the point ``p = (x, y, z)`` from the
# sphere's South pole (0, 0, -1) on a plane on the sphere's
# North pole (0, 0, 1).
#
# The formula is:
#
# P' = P * (2r / (r + z))
#
# We changed the values to move the point of projection
# further below the south pole
mu = 1 / (1.3 + z)
x *= mu
y *= mu
return x, y | en | 0.647482 | # notsure if correct: Return the x/y position of a channel. This method calculates the stereographic projection of a channel from ``CHANNEL_10_20``, suitable for a scalp plot. Parameters ---------- channame : str Name of the channel, the search is case insensitive. chan_pos_list=CHANNEL_10_20_APPROX, interpolation='bilinear' Returns ------- x, y : float or None The projected point on the plane if the point is known, otherwise ``None`` Examples -------- >>> plot.get_channelpos_from_angle('C2') (0.1720792096741632, 0.0) >>> # the channels are case insensitive >>> plot.get_channelpos_from_angle('c2') (0.1720792096741632, 0.0) >>> # lookup for an invalid channel >>> plot.get_channelpos_from_angle('foo') None # convert the 90/4th angular position into x, y, z # convert the 90/4th angular position into x, y, z # Calculate the stereographic projection. # Given a unit sphere with radius ``r = 1`` and center at # the origin. Project the point ``p = (x, y, z)`` from the # sphere's South pole (0, 0, -1) on a plane on the sphere's # North pole (0, 0, 1). # # The formula is: # # P' = P * (2r / (r + z)) # # We changed the values to move the point of projection # further below the south pole | 1.721162 | 2 |
setup.py | daanknoope/pgmpy | 0 | 6625209 | <filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="pgmpy",
version="0.1.7",
description="A library for Probabilistic Graphical Models",
packages=find_packages(exclude=['tests']),
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/pgmpy/pgmpy",
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 2.7",
"Intended Audience :: Developers",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Topic :: Scientific/Engineering"
],
long_description="https://github.com/pgmpy/pgmpy/blob/dev/README.md",
install_requires=[
"networkx",
"scipy",
"numpy",
],
)
| <filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="pgmpy",
version="0.1.7",
description="A library for Probabilistic Graphical Models",
packages=find_packages(exclude=['tests']),
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/pgmpy/pgmpy",
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 2.7",
"Intended Audience :: Developers",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Topic :: Scientific/Engineering"
],
long_description="https://github.com/pgmpy/pgmpy/blob/dev/README.md",
install_requires=[
"networkx",
"scipy",
"numpy",
],
)
| fr | 0.221828 | #!/usr/bin/env python3 | 1.473954 | 1 |
src/ggrc_basic_permissions/migrations/versions/20130920154201_5b33357784a_assign_user_role_to_.py | Killswitchz/ggrc-core | 1 | 6625210 | <reponame>Killswitchz/ggrc-core<gh_stars>1-10
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Assign User role to all existing users.
Revision ID: 5b33357784a
Revises: <KEY>
Create Date: 2013-09-20 15:42:01.558543
"""
# revision identifiers, used by Alembic.
revision = '5b33357784a'
down_revision = '<KEY>'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
person_table = table('people',
column('id', sa.Integer),
)
role_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
)
user_roles_table = table('user_roles',
column('id', sa.Integer),
column('role_id', sa.Integer),
column('person_id', sa.Integer),
column('context_id', sa.Integer),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
)
def upgrade():
users = select([person_table.c.id])
object_editor = select([role_table.c.id])\
.where(role_table.c.name == 'ObjectEditor')\
.limit(1)
program_creator = select([role_table.c.id])\
.where(role_table.c.name == 'ProgramCreator')\
.limit(1)
#FIXME this could be done better in a more recent version of sqlalchemy
#once 0.8.3 is released
#op.execute(user_roles_table.insert()\
#.from_select(['user_id'], users)\
#.from_select(['role_id'], role)\
#.values(context_id=None,))
#FIXME workaround until we can do the proper static generation of the sql
#statement
connection = op.get_bind()
users = connection.execute(users).fetchall()
object_editor = connection.execute(object_editor).fetchone()
program_creator = connection.execute(program_creator).fetchone()
current_datetime = datetime.now()
for user in users:
op.execute(user_roles_table.insert().values(
person_id=user['id'],
role_id=object_editor['id'],
context_id=None,
created_at=current_datetime,
updated_at=current_datetime,
))
op.execute(user_roles_table.insert().values(
person_id=user['id'],
role_id=program_creator['id'],
context_id=None,
created_at=current_datetime,
updated_at=current_datetime,
))
def downgrade():
'''Intentionally does nothing as we can't distinguish between migration
added assignments and not.
'''
pass
| # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Assign User role to all existing users.
Revision ID: 5b33357784a
Revises: <KEY>
Create Date: 2013-09-20 15:42:01.558543
"""
# revision identifiers, used by Alembic.
revision = '5b33357784a'
down_revision = '<KEY>'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
person_table = table('people',
column('id', sa.Integer),
)
role_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
)
user_roles_table = table('user_roles',
column('id', sa.Integer),
column('role_id', sa.Integer),
column('person_id', sa.Integer),
column('context_id', sa.Integer),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
)
def upgrade():
users = select([person_table.c.id])
object_editor = select([role_table.c.id])\
.where(role_table.c.name == 'ObjectEditor')\
.limit(1)
program_creator = select([role_table.c.id])\
.where(role_table.c.name == 'ProgramCreator')\
.limit(1)
#FIXME this could be done better in a more recent version of sqlalchemy
#once 0.8.3 is released
#op.execute(user_roles_table.insert()\
#.from_select(['user_id'], users)\
#.from_select(['role_id'], role)\
#.values(context_id=None,))
#FIXME workaround until we can do the proper static generation of the sql
#statement
connection = op.get_bind()
users = connection.execute(users).fetchall()
object_editor = connection.execute(object_editor).fetchone()
program_creator = connection.execute(program_creator).fetchone()
current_datetime = datetime.now()
for user in users:
op.execute(user_roles_table.insert().values(
person_id=user['id'],
role_id=object_editor['id'],
context_id=None,
created_at=current_datetime,
updated_at=current_datetime,
))
op.execute(user_roles_table.insert().values(
person_id=user['id'],
role_id=program_creator['id'],
context_id=None,
created_at=current_datetime,
updated_at=current_datetime,
))
def downgrade():
'''Intentionally does nothing as we can't distinguish between migration
added assignments and not.
'''
pass | en | 0.703553 | # Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> Assign User role to all existing users. Revision ID: 5b33357784a Revises: <KEY> Create Date: 2013-09-20 15:42:01.558543 # revision identifiers, used by Alembic. #FIXME this could be done better in a more recent version of sqlalchemy #once 0.8.3 is released #op.execute(user_roles_table.insert()\ #.from_select(['user_id'], users)\ #.from_select(['role_id'], role)\ #.values(context_id=None,)) #FIXME workaround until we can do the proper static generation of the sql #statement Intentionally does nothing as we can't distinguish between migration added assignments and not. | 1.969402 | 2 |
tensorflow_probability/python/bijectors/hypothesis_testlib.py | axch/probability | 0 | 6625211 | <reponame>axch/probability
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for hypothesis testing of bijectors."""
import collections
import inspect
from absl import logging
import hypothesis.strategies as hps
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensorshape_util
tfb = tfp.bijectors
tfd = tfp.distributions
SPECIAL_BIJECTORS = [
'Inline',
'Invert',
]
# INSTANTIABLE_BIJECTORS is a map from str->(BijectorClass,)
INSTANTIABLE_BIJECTORS = None
def instantiable_bijectors():
"""Identifies bijectors that are trivially instantiable.
Here, "trivially" means things like `Exp` for which no parameters need to be
generated; i.e., the only arguments to the constructor are `self`, `name`, and
`validate_args`.
This finds the bijectors by traversing the `tfp.bijectors` namespace. The
traversal is cached so it only happens once per Python process.
Returns:
instantiable: A Python `dict` mapping the `str` bijector name to a singleton
tuple containing the bijector class object.
"""
global INSTANTIABLE_BIJECTORS
if INSTANTIABLE_BIJECTORS is not None:
return INSTANTIABLE_BIJECTORS
result = {}
for bijector_name in dir(tfb):
bijector_class = getattr(tfb, bijector_name)
if (not inspect.isclass(bijector_class) or
not issubclass(bijector_class, tfb.Bijector) or
bijector_name in SPECIAL_BIJECTORS):
continue
# ArgSpec(args, varargs, keywords, defaults)
spec = inspect.getargspec(bijector_class.__init__)
ctor_args = set(spec.args) | set(
[arg for arg in (spec.varargs, spec.keywords) if arg is not None])
unsupported_args = set(ctor_args) - set(['name', 'self', 'validate_args'])
if unsupported_args:
logging.warning('Unable to test tfb.%s: unsupported args %s',
bijector_name, unsupported_args)
continue
if not bijector_class()._is_injective: # pylint: disable=protected-access
logging.warning('Unable to test non-injective tfb.%s.', bijector_name)
continue
result[bijector_name] = (bijector_class,)
result['Invert'] = (tfb.Invert,)
for bijector_name in sorted(result):
logging.warning('Supported bijector: tfb.%s', bijector_name)
INSTANTIABLE_BIJECTORS = result
return INSTANTIABLE_BIJECTORS
class BijectorSupport(collections.namedtuple(
'BijectorSupport', ['forward', 'inverse'])):
"""Specification of the domain and codomain of a bijector.
The `forward` slot is the support of the forward computation, i.e., the
domain, and the `inverse` slot is the support of the inverse computation,
i.e., the codomain.
"""
__slots__ = ()
def invert(self):
"""Returns the inverse of this `BijectorSupport`."""
return BijectorSupport(self.inverse, self.forward)
BIJECTOR_SUPPORTS = None
def bijector_supports():
"""Returns a dict of supports for each instantiable bijector.
Warns if any `instantiable_bijectors` are found to have no declared supports,
once per Python process.
Returns:
supports: Python `dict` mapping `str` bijector name to the corresponding
`BijectorSupport` object.
"""
global BIJECTOR_SUPPORTS
if BIJECTOR_SUPPORTS is not None:
return BIJECTOR_SUPPORTS
Support = tfp_hps.Support # pylint: disable=invalid-name
supports = {
'_Invert':
BijectorSupport(Support.OTHER, Support.OTHER),
'Ascending':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_STRICTLY_INCREASING),
'BatchNormalization':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'CholeskyOuterProduct':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_POSITIVE_DEFINITE),
'CholeskyToInvCholesky':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'CorrelationCholesky':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.CORRELATION_CHOLESKY),
'Cumsum':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'DiscreteCosineTransform':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Exp':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_POSITIVE),
'Expm1':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_GT_NEG1),
'FillScaleTriL':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'FillTriangular':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.MATRIX_LOWER_TRIL),
'FrechetCDF': # The domain is parameter dependent.
BijectorSupport(Support.OTHER, Support.SCALAR_IN_0_1),
'GeneralizedExtremeValueCDF': # The domain is parameter dependent.
BijectorSupport(Support.OTHER, Support.SCALAR_IN_0_1),
'GeneralizedPareto': # The range is parameter dependent.
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.OTHER),
'GompertzCDF':
BijectorSupport(Support.SCALAR_POSITIVE, Support.SCALAR_IN_0_1),
'GumbelCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Identity':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Inline':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Invert':
BijectorSupport(Support.OTHER, Support.OTHER),
'IteratedSigmoidCentered':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1),
'KumaraswamyCDF':
BijectorSupport(Support.SCALAR_IN_0_1, Support.SCALAR_IN_0_1),
'Log':
BijectorSupport(Support.SCALAR_POSITIVE,
Support.SCALAR_UNCONSTRAINED),
'Log1p':
BijectorSupport(Support.SCALAR_GT_NEG1, Support.SCALAR_UNCONSTRAINED),
'MatrixInverseTriL':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'MatvecLU':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'MoyalCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'NormalCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Ordered':
BijectorSupport(Support.VECTOR_STRICTLY_INCREASING,
Support.VECTOR_UNCONSTRAINED),
'Permute':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'Power':
BijectorSupport(Support.SCALAR_POSITIVE,
Support.SCALAR_POSITIVE),
'PowerTransform': # The domain is parameter dependent.
BijectorSupport(Support.OTHER, Support.SCALAR_POSITIVE),
'RationalQuadraticSpline':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'RayleighCDF':
BijectorSupport(Support.SCALAR_NON_NEGATIVE,
Support.SCALAR_IN_0_1),
'Reciprocal':
BijectorSupport(Support.SCALAR_NON_ZERO, Support.SCALAR_NON_ZERO),
'Reshape':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Scale':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'ScaleMatvecDiag':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'ScaleMatvecLU':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'ScaleMatvecTriL':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'Shift':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'ShiftedGompertzCDF':
BijectorSupport(Support.SCALAR_POSITIVE, Support.SCALAR_IN_0_1),
'Sigmoid':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Sinh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'SinhArcsinh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'SoftClip':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Softfloor':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Softplus':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_POSITIVE),
'Softsign':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_IN_NEG1_1),
'SoftmaxCentered':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1),
'Square':
BijectorSupport(Support.SCALAR_NON_NEGATIVE,
Support.SCALAR_NON_NEGATIVE),
'Tanh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_IN_NEG1_1),
'TransformDiagonal':
BijectorSupport(Support.MATRIX_UNCONSTRAINED, Support.OTHER),
'Transpose':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'WeibullCDF':
BijectorSupport(Support.SCALAR_NON_NEGATIVE, Support.SCALAR_IN_0_1),
}
missing_keys = set(instantiable_bijectors().keys()) - set(supports.keys())
if missing_keys:
raise ValueError('Missing bijector supports: {}'.format(missing_keys))
BIJECTOR_SUPPORTS = supports
return BIJECTOR_SUPPORTS
@hps.composite
def unconstrained_bijectors(draw, max_forward_event_ndims=None,
must_preserve_event_ndims=False,
validate_args=True):
"""Strategy for drawing forward-unconstrained bijectors.
The bijectors emitted by this are those whose `forward` computation
can act on all of R^n, with n <= `max_forward_event_ndims`.
Args:
draw: Strategy sampler supplied by `@hps.composite`.
max_forward_event_ndims: Optional python `int`, maximum acceptable bijector
`forward_event_ndims`.
must_preserve_event_ndims: Optional python `bool`, `True` if the returned
bijector must preserve the rank of the event.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
unconstrained: A strategy for drawing such bijectors.
"""
if max_forward_event_ndims is None:
max_forward_event_ndims = float('inf')
ndims_by_prefix = dict(SCALAR=0, VECTOR=1, MATRIX=2)
def is_acceptable(support):
"""Determines if a `BijectorSupport` object is acceptable."""
if 'UNCONSTRAINED' not in support.forward:
return False
forward_prefix = support.forward.split('_')[0]
if ndims_by_prefix[forward_prefix] > max_forward_event_ndims:
return False
if must_preserve_event_ndims:
inverse_prefix = support.inverse.split('_')[0]
if ndims_by_prefix[forward_prefix] != ndims_by_prefix[inverse_prefix]:
return False
return True
supports = bijector_supports()
acceptable_keys = sorted([k for k in instantiable_bijectors().keys()
if k == 'Invert' or is_acceptable(supports[k])])
bijector_name = draw(hps.sampled_from(acceptable_keys))
if bijector_name == 'Invert':
acceptable_keys = [k for k in instantiable_bijectors().keys()
if is_acceptable(supports[k].invert())]
underlying = draw(hps.sampled_from(acceptable_keys))
underlying = instantiable_bijectors()[underlying][0](
validate_args=validate_args)
return tfb.Invert(underlying, validate_args=validate_args)
return instantiable_bijectors()[bijector_name][0](validate_args=validate_args)
def distribution_eligilibility_filter_for(bijector):
"""Returns a function which filters distribution names, where possible."""
if isinstance(bijector, tfb.CorrelationCholesky):
return 'LKJ'.__eq__
return lambda name: True
def distribution_filter_for(bijector):
"""Returns a function checking Distribution compatibility with this bijector.
That is, `distribution_filter_for(bijector)(dist) == True` implies
that `bijector` can act on `dist` (i.e., they are safe to compose with
`TransformedDistribution`).
TODO(bjp): Make this sensitive to supports. Currently assumes `bijector` acts
on an unconstrained space, and just checks compatible ranks.
Args:
bijector: A `Bijector` instance to check compatibility with.
Returns:
filter: A Python callable filtering Distributions for compatibility with
this bijector.
"""
if isinstance(bijector, tfb.CholeskyToInvCholesky):
def additional_check(dist):
return (tensorshape_util.rank(dist.event_shape) == 2 and
int(dist.event_shape[0]) == int(dist.event_shape[1]))
elif isinstance(bijector, tfb.CorrelationCholesky):
def additional_check(dist):
# The isinstance check will be redundant when the
# `distribution_eligilibility_filter_for` above has been used, but we keep
# it here for safety.
return isinstance(dist, tfd.LKJ) and dist.input_output_cholesky
else:
additional_check = lambda dist: True
def distribution_filter(dist):
if not dtype_util.is_floating(dist.dtype):
return False
if bijector.forward_min_event_ndims > tensorshape_util.rank(
dist.event_shape):
return False
return additional_check(dist)
return distribution_filter
def padded(t, lhs, rhs=None):
"""Left pads and optionally right pads the innermost axis of `t`."""
t = tf.convert_to_tensor(t)
lhs = tf.convert_to_tensor(lhs, dtype=t.dtype)
zeros = tf.zeros([tf.rank(t) - 1, 2], dtype=tf.int32)
lhs_paddings = tf.concat([zeros, [[1, 0]]], axis=0)
result = tf.pad(t, paddings=lhs_paddings, constant_values=lhs)
if rhs is not None:
rhs = tf.convert_to_tensor(rhs, dtype=t.dtype)
rhs_paddings = tf.concat([zeros, [[0, 1]]], axis=0)
result = tf.pad(result, paddings=rhs_paddings, constant_values=rhs)
return result
def spline_bin_size_constraint(x, lo=-1, hi=1, dtype=tf.float32):
"""Maps innermost axis of `x` to positive values."""
nbins = tf.cast(tf.shape(x)[-1], dtype)
min_width = 1e-2
scale = hi - lo - nbins * min_width
return tf.math.softmax(tf.cast(x, dtype)) * scale + min_width
def spline_slope_constraint(s, dtype=tf.float32):
"""Maps `s` to all positive with `s[..., 0] == s[..., -1] == 1`."""
# Slice off a position since this is nknots - 2 vs nknots - 1 for bin sizes.
min_slope = 1e-2
return tf.math.softplus(tf.cast(s[..., :-1], dtype)) + min_slope
def power_transform_constraint(power):
"""Maps `s` to [-1 / power, inf)."""
def constrain(x):
if power == 0:
return x
return tf.math.softplus(x) - 1. / power
return constrain
def frechet_constraint(loc):
"""Maps `s` to [loc, inf)."""
def constrain(x):
return loc + tf.math.softplus(x)
return constrain
def gev_constraint(loc, scale, conc):
"""Maps `s` to support based on `loc`, `scale` and `conc`."""
def constrain(x):
c = tf.convert_to_tensor(conc)
# We intentionally compute the endpoint with (1.0 / concentration) * scale,
# for the same reason as in GeneralizedExtremeValueCDF._maybe_assert_valid_x
endpoint = loc - (1.0 / c) * scale
return tf.where(c > 0.,
tf.math.softplus(x) + endpoint,
tf.where(
tf.equal(0., c),
x, endpoint - tf.math.softplus(x)))
return constrain
def generalized_pareto_constraint(loc, scale, conc):
"""Maps `s` to support based on `loc`, `scale` and `conc`."""
def constrain(x):
conc_ = tf.convert_to_tensor(conc)
loc_ = tf.convert_to_tensor(loc)
return tf.where(conc_ >= 0.,
tf.math.softplus(x) + loc_,
loc_ - tf.math.sigmoid(x) * scale / conc_)
return constrain
| # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for hypothesis testing of bijectors."""
import collections
import inspect
from absl import logging
import hypothesis.strategies as hps
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensorshape_util
tfb = tfp.bijectors
tfd = tfp.distributions
SPECIAL_BIJECTORS = [
'Inline',
'Invert',
]
# INSTANTIABLE_BIJECTORS is a map from str->(BijectorClass,)
INSTANTIABLE_BIJECTORS = None
def instantiable_bijectors():
"""Identifies bijectors that are trivially instantiable.
Here, "trivially" means things like `Exp` for which no parameters need to be
generated; i.e., the only arguments to the constructor are `self`, `name`, and
`validate_args`.
This finds the bijectors by traversing the `tfp.bijectors` namespace. The
traversal is cached so it only happens once per Python process.
Returns:
instantiable: A Python `dict` mapping the `str` bijector name to a singleton
tuple containing the bijector class object.
"""
global INSTANTIABLE_BIJECTORS
if INSTANTIABLE_BIJECTORS is not None:
return INSTANTIABLE_BIJECTORS
result = {}
for bijector_name in dir(tfb):
bijector_class = getattr(tfb, bijector_name)
if (not inspect.isclass(bijector_class) or
not issubclass(bijector_class, tfb.Bijector) or
bijector_name in SPECIAL_BIJECTORS):
continue
# ArgSpec(args, varargs, keywords, defaults)
spec = inspect.getargspec(bijector_class.__init__)
ctor_args = set(spec.args) | set(
[arg for arg in (spec.varargs, spec.keywords) if arg is not None])
unsupported_args = set(ctor_args) - set(['name', 'self', 'validate_args'])
if unsupported_args:
logging.warning('Unable to test tfb.%s: unsupported args %s',
bijector_name, unsupported_args)
continue
if not bijector_class()._is_injective: # pylint: disable=protected-access
logging.warning('Unable to test non-injective tfb.%s.', bijector_name)
continue
result[bijector_name] = (bijector_class,)
result['Invert'] = (tfb.Invert,)
for bijector_name in sorted(result):
logging.warning('Supported bijector: tfb.%s', bijector_name)
INSTANTIABLE_BIJECTORS = result
return INSTANTIABLE_BIJECTORS
class BijectorSupport(collections.namedtuple(
'BijectorSupport', ['forward', 'inverse'])):
"""Specification of the domain and codomain of a bijector.
The `forward` slot is the support of the forward computation, i.e., the
domain, and the `inverse` slot is the support of the inverse computation,
i.e., the codomain.
"""
__slots__ = ()
def invert(self):
"""Returns the inverse of this `BijectorSupport`."""
return BijectorSupport(self.inverse, self.forward)
BIJECTOR_SUPPORTS = None
def bijector_supports():
"""Returns a dict of supports for each instantiable bijector.
Warns if any `instantiable_bijectors` are found to have no declared supports,
once per Python process.
Returns:
supports: Python `dict` mapping `str` bijector name to the corresponding
`BijectorSupport` object.
"""
global BIJECTOR_SUPPORTS
if BIJECTOR_SUPPORTS is not None:
return BIJECTOR_SUPPORTS
Support = tfp_hps.Support # pylint: disable=invalid-name
supports = {
'_Invert':
BijectorSupport(Support.OTHER, Support.OTHER),
'Ascending':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_STRICTLY_INCREASING),
'BatchNormalization':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'CholeskyOuterProduct':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_POSITIVE_DEFINITE),
'CholeskyToInvCholesky':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'CorrelationCholesky':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.CORRELATION_CHOLESKY),
'Cumsum':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'DiscreteCosineTransform':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Exp':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_POSITIVE),
'Expm1':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_GT_NEG1),
'FillScaleTriL':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'FillTriangular':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.MATRIX_LOWER_TRIL),
'FrechetCDF': # The domain is parameter dependent.
BijectorSupport(Support.OTHER, Support.SCALAR_IN_0_1),
'GeneralizedExtremeValueCDF': # The domain is parameter dependent.
BijectorSupport(Support.OTHER, Support.SCALAR_IN_0_1),
'GeneralizedPareto': # The range is parameter dependent.
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.OTHER),
'GompertzCDF':
BijectorSupport(Support.SCALAR_POSITIVE, Support.SCALAR_IN_0_1),
'GumbelCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Identity':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Inline':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Invert':
BijectorSupport(Support.OTHER, Support.OTHER),
'IteratedSigmoidCentered':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1),
'KumaraswamyCDF':
BijectorSupport(Support.SCALAR_IN_0_1, Support.SCALAR_IN_0_1),
'Log':
BijectorSupport(Support.SCALAR_POSITIVE,
Support.SCALAR_UNCONSTRAINED),
'Log1p':
BijectorSupport(Support.SCALAR_GT_NEG1, Support.SCALAR_UNCONSTRAINED),
'MatrixInverseTriL':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'MatvecLU':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'MoyalCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'NormalCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Ordered':
BijectorSupport(Support.VECTOR_STRICTLY_INCREASING,
Support.VECTOR_UNCONSTRAINED),
'Permute':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'Power':
BijectorSupport(Support.SCALAR_POSITIVE,
Support.SCALAR_POSITIVE),
'PowerTransform': # The domain is parameter dependent.
BijectorSupport(Support.OTHER, Support.SCALAR_POSITIVE),
'RationalQuadraticSpline':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'RayleighCDF':
BijectorSupport(Support.SCALAR_NON_NEGATIVE,
Support.SCALAR_IN_0_1),
'Reciprocal':
BijectorSupport(Support.SCALAR_NON_ZERO, Support.SCALAR_NON_ZERO),
'Reshape':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Scale':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'ScaleMatvecDiag':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'ScaleMatvecLU':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'ScaleMatvecTriL':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_UNCONSTRAINED),
'Shift':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'ShiftedGompertzCDF':
BijectorSupport(Support.SCALAR_POSITIVE, Support.SCALAR_IN_0_1),
'Sigmoid':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Sinh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'SinhArcsinh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'SoftClip':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Softfloor':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Softplus':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_POSITIVE),
'Softsign':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_IN_NEG1_1),
'SoftmaxCentered':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1),
'Square':
BijectorSupport(Support.SCALAR_NON_NEGATIVE,
Support.SCALAR_NON_NEGATIVE),
'Tanh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_IN_NEG1_1),
'TransformDiagonal':
BijectorSupport(Support.MATRIX_UNCONSTRAINED, Support.OTHER),
'Transpose':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'WeibullCDF':
BijectorSupport(Support.SCALAR_NON_NEGATIVE, Support.SCALAR_IN_0_1),
}
missing_keys = set(instantiable_bijectors().keys()) - set(supports.keys())
if missing_keys:
raise ValueError('Missing bijector supports: {}'.format(missing_keys))
BIJECTOR_SUPPORTS = supports
return BIJECTOR_SUPPORTS
@hps.composite
def unconstrained_bijectors(draw, max_forward_event_ndims=None,
must_preserve_event_ndims=False,
validate_args=True):
"""Strategy for drawing forward-unconstrained bijectors.
The bijectors emitted by this are those whose `forward` computation
can act on all of R^n, with n <= `max_forward_event_ndims`.
Args:
draw: Strategy sampler supplied by `@hps.composite`.
max_forward_event_ndims: Optional python `int`, maximum acceptable bijector
`forward_event_ndims`.
must_preserve_event_ndims: Optional python `bool`, `True` if the returned
bijector must preserve the rank of the event.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
unconstrained: A strategy for drawing such bijectors.
"""
if max_forward_event_ndims is None:
max_forward_event_ndims = float('inf')
ndims_by_prefix = dict(SCALAR=0, VECTOR=1, MATRIX=2)
def is_acceptable(support):
"""Determines if a `BijectorSupport` object is acceptable."""
if 'UNCONSTRAINED' not in support.forward:
return False
forward_prefix = support.forward.split('_')[0]
if ndims_by_prefix[forward_prefix] > max_forward_event_ndims:
return False
if must_preserve_event_ndims:
inverse_prefix = support.inverse.split('_')[0]
if ndims_by_prefix[forward_prefix] != ndims_by_prefix[inverse_prefix]:
return False
return True
supports = bijector_supports()
acceptable_keys = sorted([k for k in instantiable_bijectors().keys()
if k == 'Invert' or is_acceptable(supports[k])])
bijector_name = draw(hps.sampled_from(acceptable_keys))
if bijector_name == 'Invert':
acceptable_keys = [k for k in instantiable_bijectors().keys()
if is_acceptable(supports[k].invert())]
underlying = draw(hps.sampled_from(acceptable_keys))
underlying = instantiable_bijectors()[underlying][0](
validate_args=validate_args)
return tfb.Invert(underlying, validate_args=validate_args)
return instantiable_bijectors()[bijector_name][0](validate_args=validate_args)
def distribution_eligilibility_filter_for(bijector):
"""Returns a function which filters distribution names, where possible."""
if isinstance(bijector, tfb.CorrelationCholesky):
return 'LKJ'.__eq__
return lambda name: True
def distribution_filter_for(bijector):
"""Returns a function checking Distribution compatibility with this bijector.
That is, `distribution_filter_for(bijector)(dist) == True` implies
that `bijector` can act on `dist` (i.e., they are safe to compose with
`TransformedDistribution`).
TODO(bjp): Make this sensitive to supports. Currently assumes `bijector` acts
on an unconstrained space, and just checks compatible ranks.
Args:
bijector: A `Bijector` instance to check compatibility with.
Returns:
filter: A Python callable filtering Distributions for compatibility with
this bijector.
"""
if isinstance(bijector, tfb.CholeskyToInvCholesky):
def additional_check(dist):
return (tensorshape_util.rank(dist.event_shape) == 2 and
int(dist.event_shape[0]) == int(dist.event_shape[1]))
elif isinstance(bijector, tfb.CorrelationCholesky):
def additional_check(dist):
# The isinstance check will be redundant when the
# `distribution_eligilibility_filter_for` above has been used, but we keep
# it here for safety.
return isinstance(dist, tfd.LKJ) and dist.input_output_cholesky
else:
additional_check = lambda dist: True
def distribution_filter(dist):
if not dtype_util.is_floating(dist.dtype):
return False
if bijector.forward_min_event_ndims > tensorshape_util.rank(
dist.event_shape):
return False
return additional_check(dist)
return distribution_filter
def padded(t, lhs, rhs=None):
"""Left pads and optionally right pads the innermost axis of `t`."""
t = tf.convert_to_tensor(t)
lhs = tf.convert_to_tensor(lhs, dtype=t.dtype)
zeros = tf.zeros([tf.rank(t) - 1, 2], dtype=tf.int32)
lhs_paddings = tf.concat([zeros, [[1, 0]]], axis=0)
result = tf.pad(t, paddings=lhs_paddings, constant_values=lhs)
if rhs is not None:
rhs = tf.convert_to_tensor(rhs, dtype=t.dtype)
rhs_paddings = tf.concat([zeros, [[0, 1]]], axis=0)
result = tf.pad(result, paddings=rhs_paddings, constant_values=rhs)
return result
def spline_bin_size_constraint(x, lo=-1, hi=1, dtype=tf.float32):
"""Maps innermost axis of `x` to positive values."""
nbins = tf.cast(tf.shape(x)[-1], dtype)
min_width = 1e-2
scale = hi - lo - nbins * min_width
return tf.math.softmax(tf.cast(x, dtype)) * scale + min_width
def spline_slope_constraint(s, dtype=tf.float32):
"""Maps `s` to all positive with `s[..., 0] == s[..., -1] == 1`."""
# Slice off a position since this is nknots - 2 vs nknots - 1 for bin sizes.
min_slope = 1e-2
return tf.math.softplus(tf.cast(s[..., :-1], dtype)) + min_slope
def power_transform_constraint(power):
"""Maps `s` to [-1 / power, inf)."""
def constrain(x):
if power == 0:
return x
return tf.math.softplus(x) - 1. / power
return constrain
def frechet_constraint(loc):
"""Maps `s` to [loc, inf)."""
def constrain(x):
return loc + tf.math.softplus(x)
return constrain
def gev_constraint(loc, scale, conc):
"""Maps `s` to support based on `loc`, `scale` and `conc`."""
def constrain(x):
c = tf.convert_to_tensor(conc)
# We intentionally compute the endpoint with (1.0 / concentration) * scale,
# for the same reason as in GeneralizedExtremeValueCDF._maybe_assert_valid_x
endpoint = loc - (1.0 / c) * scale
return tf.where(c > 0.,
tf.math.softplus(x) + endpoint,
tf.where(
tf.equal(0., c),
x, endpoint - tf.math.softplus(x)))
return constrain
def generalized_pareto_constraint(loc, scale, conc):
"""Maps `s` to support based on `loc`, `scale` and `conc`."""
def constrain(x):
conc_ = tf.convert_to_tensor(conc)
loc_ = tf.convert_to_tensor(loc)
return tf.where(conc_ >= 0.,
tf.math.softplus(x) + loc_,
loc_ - tf.math.sigmoid(x) * scale / conc_)
return constrain | en | 0.787911 | # Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ Utilities for hypothesis testing of bijectors. # INSTANTIABLE_BIJECTORS is a map from str->(BijectorClass,) Identifies bijectors that are trivially instantiable. Here, "trivially" means things like `Exp` for which no parameters need to be generated; i.e., the only arguments to the constructor are `self`, `name`, and `validate_args`. This finds the bijectors by traversing the `tfp.bijectors` namespace. The traversal is cached so it only happens once per Python process. Returns: instantiable: A Python `dict` mapping the `str` bijector name to a singleton tuple containing the bijector class object. # ArgSpec(args, varargs, keywords, defaults) # pylint: disable=protected-access Specification of the domain and codomain of a bijector. The `forward` slot is the support of the forward computation, i.e., the domain, and the `inverse` slot is the support of the inverse computation, i.e., the codomain. Returns the inverse of this `BijectorSupport`. Returns a dict of supports for each instantiable bijector. Warns if any `instantiable_bijectors` are found to have no declared supports, once per Python process. Returns: supports: Python `dict` mapping `str` bijector name to the corresponding `BijectorSupport` object. # pylint: disable=invalid-name # The domain is parameter dependent. # The domain is parameter dependent. # The range is parameter dependent. # The domain is parameter dependent. Strategy for drawing forward-unconstrained bijectors. The bijectors emitted by this are those whose `forward` computation can act on all of R^n, with n <= `max_forward_event_ndims`. Args: draw: Strategy sampler supplied by `@hps.composite`. max_forward_event_ndims: Optional python `int`, maximum acceptable bijector `forward_event_ndims`. must_preserve_event_ndims: Optional python `bool`, `True` if the returned bijector must preserve the rank of the event. validate_args: Python `bool`; whether to enable runtime assertions. Returns: unconstrained: A strategy for drawing such bijectors. Determines if a `BijectorSupport` object is acceptable. Returns a function which filters distribution names, where possible. Returns a function checking Distribution compatibility with this bijector. That is, `distribution_filter_for(bijector)(dist) == True` implies that `bijector` can act on `dist` (i.e., they are safe to compose with `TransformedDistribution`). TODO(bjp): Make this sensitive to supports. Currently assumes `bijector` acts on an unconstrained space, and just checks compatible ranks. Args: bijector: A `Bijector` instance to check compatibility with. Returns: filter: A Python callable filtering Distributions for compatibility with this bijector. # The isinstance check will be redundant when the # `distribution_eligilibility_filter_for` above has been used, but we keep # it here for safety. Left pads and optionally right pads the innermost axis of `t`. Maps innermost axis of `x` to positive values. Maps `s` to all positive with `s[..., 0] == s[..., -1] == 1`. # Slice off a position since this is nknots - 2 vs nknots - 1 for bin sizes. Maps `s` to [-1 / power, inf). Maps `s` to [loc, inf). Maps `s` to support based on `loc`, `scale` and `conc`. # We intentionally compute the endpoint with (1.0 / concentration) * scale, # for the same reason as in GeneralizedExtremeValueCDF._maybe_assert_valid_x Maps `s` to support based on `loc`, `scale` and `conc`. | 2.066301 | 2 |
dataset_loading/load_datasets.py | matthewbehrend/BNC | 4 | 6625212 | <reponame>matthewbehrend/BNC
import os
from dataset_loading.officehome import OfficeHomeArt, OfficeHomeClipart, OfficeHomeProduct, OfficeHomeReal
from dataset_loading.dataset import DatasetGroup
import numpy as np
from mxnet import gluon
from mxnet.gluon.data import ArrayDataset
class OfficeHomeDatasets(object):
def __init__(self, useResNetFeatures=True, asdataloader=True):
self.k_classes = 65
self.useFeatures = useResNetFeatures
print('OfficeHome Dataset. classes: ', self.k_classes)
if(self.useFeatures):
self.fn_cache = 'data_cache/officehomefeatures.npz'
else:
self.fn_cache = 'data_cache/officehome.npz'
if(not os.path.exists( self.fn_cache )):
self.readAndCacheData()
self.load(asdataloader)
def readAndCacheData(self):
print('Loading data...')
net = None
if(self.useFeatures):
net = getResNetFeatureExtractor()
art = OfficeHomeArt(extractor=net)
clipart = OfficeHomeClipart(extractor=net)
product = OfficeHomeProduct(extractor=net)
real = OfficeHomeReal(extractor=net)
np.savez_compressed(self.fn_cache,
art.train._data[0], art.train._data[1], art.test._data[0], art.test._data[1],
clipart.train._data[0], clipart.train._data[1], clipart.test._data[0], clipart.test._data[1],
product.train._data[0], product.train._data[1], product.test._data[0], product.test._data[1],
real.train._data[0], real.train._data[1], real.test._data[0], real.test._data[1]
)
def load(self, asdataloader):
print('Loading data from cache')
dat = np.load(self.fn_cache)
batch_size = 256
i = 0
self.art = _addDataset('art', dat, i, batch_size, asdataloader)
i += 4
self.clipart = _addDataset('clipart', dat, i, batch_size, asdataloader)
i += 4
self.product = _addDataset('product', dat, i, batch_size, asdataloader)
i += 4
self.real = _addDataset('real', dat, i, batch_size, asdataloader)
self.domains = {'art':self.art, 'clipart':self.clipart, 'product':self.product, 'real':self.real}
def getResNetFeatureExtractor():
net = gluon.model_zoo.vision.resnet50_v1(pretrained=True).features
return net
def _addDataset(name, dat, idx, batch_size, asdataloader):
fl = dat.files
train = ArrayDataset(dat[fl[idx]], dat[fl[idx+1]])
test = ArrayDataset(dat[fl[idx+2]], dat[fl[idx+3]])
dat_set = DatasetGroup( name )
if(asdataloader):
dat_set.makeDomainDatasetLoader(train, test, batch_size)
else:
dat_set.train = train
dat_set.test = test
return dat_set
| import os
from dataset_loading.officehome import OfficeHomeArt, OfficeHomeClipart, OfficeHomeProduct, OfficeHomeReal
from dataset_loading.dataset import DatasetGroup
import numpy as np
from mxnet import gluon
from mxnet.gluon.data import ArrayDataset
class OfficeHomeDatasets(object):
def __init__(self, useResNetFeatures=True, asdataloader=True):
self.k_classes = 65
self.useFeatures = useResNetFeatures
print('OfficeHome Dataset. classes: ', self.k_classes)
if(self.useFeatures):
self.fn_cache = 'data_cache/officehomefeatures.npz'
else:
self.fn_cache = 'data_cache/officehome.npz'
if(not os.path.exists( self.fn_cache )):
self.readAndCacheData()
self.load(asdataloader)
def readAndCacheData(self):
print('Loading data...')
net = None
if(self.useFeatures):
net = getResNetFeatureExtractor()
art = OfficeHomeArt(extractor=net)
clipart = OfficeHomeClipart(extractor=net)
product = OfficeHomeProduct(extractor=net)
real = OfficeHomeReal(extractor=net)
np.savez_compressed(self.fn_cache,
art.train._data[0], art.train._data[1], art.test._data[0], art.test._data[1],
clipart.train._data[0], clipart.train._data[1], clipart.test._data[0], clipart.test._data[1],
product.train._data[0], product.train._data[1], product.test._data[0], product.test._data[1],
real.train._data[0], real.train._data[1], real.test._data[0], real.test._data[1]
)
def load(self, asdataloader):
print('Loading data from cache')
dat = np.load(self.fn_cache)
batch_size = 256
i = 0
self.art = _addDataset('art', dat, i, batch_size, asdataloader)
i += 4
self.clipart = _addDataset('clipart', dat, i, batch_size, asdataloader)
i += 4
self.product = _addDataset('product', dat, i, batch_size, asdataloader)
i += 4
self.real = _addDataset('real', dat, i, batch_size, asdataloader)
self.domains = {'art':self.art, 'clipart':self.clipart, 'product':self.product, 'real':self.real}
def getResNetFeatureExtractor():
net = gluon.model_zoo.vision.resnet50_v1(pretrained=True).features
return net
def _addDataset(name, dat, idx, batch_size, asdataloader):
fl = dat.files
train = ArrayDataset(dat[fl[idx]], dat[fl[idx+1]])
test = ArrayDataset(dat[fl[idx+2]], dat[fl[idx+3]])
dat_set = DatasetGroup( name )
if(asdataloader):
dat_set.makeDomainDatasetLoader(train, test, batch_size)
else:
dat_set.train = train
dat_set.test = test
return dat_set | none | 1 | 2.310858 | 2 | |
HelloWorld/Python01/Core Python Applications Programming 3rd/ch10/friendsC.py | grtlinux/KieaPython | 1 | 6625213 | #!/usr/bin/env python
import cgi
from urllib import quote_plus
header = 'Content-Type: text/html\n\n'
url = '/cgi-bin/friendsC.py'
errhtml = '''<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>ERROR</H3>
<B>%s</B><P>
<FORM><INPUT TYPE=button VALUE=Back
ONCLICK="window.history.back()"></FORM>
</BODY></HTML>'''
def showError(error_str):
print header + errhtml % error_str
formhtml = '''<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
<FORM ACTION="%s">
<B>Enter your Name:</B>
<INPUT TYPE=hidden NAME=action VALUE=edit>
<INPUT TYPE=text NAME=person VALUE="%s" SIZE=15>
<P><B>How many friends do you have?</B>
%s
<P><INPUT TYPE=submit></FORM></BODY></HTML>'''
fradio = '<INPUT TYPE=radio NAME=howmany VALUE="%s" %s> %s\n'
def showForm(who, howmany):
friends = []
for i in (0, 10, 25, 50, 100):
checked = ''
if str(i) == howmany:
checked = 'CHECKED'
friends.append(fradio % (str(i), checked, str(i)))
print '%s%s' % (header, formhtml % (
who, url, who, ''.join(friends)))
reshtml = '''<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
Your name is: <B>%s</B><P>
You have <B>%s</B> friends.
<P>Click <A HREF="%s">here</A> to edit your data again.
</BODY></HTML>'''
def doResults(who, howmany):
newurl = url + '?action=reedit&person=%s&howmany=%s' % (
quote_plus(who), howmany)
print header + reshtml % (who, who, howmany, newurl)
def process():
error = ''
form = cgi.FieldStorage()
if 'person' in form:
who = form['person'].value.title()
else:
who = 'NEW USER'
if 'howmany' in form:
howmany = form['howmany'].value
else:
if 'action' in form and \
form['action'].value == 'edit':
error = 'Please select number of friends.'
else:
howmany = 0
if not error:
if 'action' in form and \
form['action'].value != 'reedit':
doResults(who, howmany)
else:
showForm(who, howmany)
else:
showError(error)
if __name__ == '__main__':
process()
| #!/usr/bin/env python
import cgi
from urllib import quote_plus
header = 'Content-Type: text/html\n\n'
url = '/cgi-bin/friendsC.py'
errhtml = '''<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>ERROR</H3>
<B>%s</B><P>
<FORM><INPUT TYPE=button VALUE=Back
ONCLICK="window.history.back()"></FORM>
</BODY></HTML>'''
def showError(error_str):
print header + errhtml % error_str
formhtml = '''<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
<FORM ACTION="%s">
<B>Enter your Name:</B>
<INPUT TYPE=hidden NAME=action VALUE=edit>
<INPUT TYPE=text NAME=person VALUE="%s" SIZE=15>
<P><B>How many friends do you have?</B>
%s
<P><INPUT TYPE=submit></FORM></BODY></HTML>'''
fradio = '<INPUT TYPE=radio NAME=howmany VALUE="%s" %s> %s\n'
def showForm(who, howmany):
friends = []
for i in (0, 10, 25, 50, 100):
checked = ''
if str(i) == howmany:
checked = 'CHECKED'
friends.append(fradio % (str(i), checked, str(i)))
print '%s%s' % (header, formhtml % (
who, url, who, ''.join(friends)))
reshtml = '''<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
Your name is: <B>%s</B><P>
You have <B>%s</B> friends.
<P>Click <A HREF="%s">here</A> to edit your data again.
</BODY></HTML>'''
def doResults(who, howmany):
newurl = url + '?action=reedit&person=%s&howmany=%s' % (
quote_plus(who), howmany)
print header + reshtml % (who, who, howmany, newurl)
def process():
error = ''
form = cgi.FieldStorage()
if 'person' in form:
who = form['person'].value.title()
else:
who = 'NEW USER'
if 'howmany' in form:
howmany = form['howmany'].value
else:
if 'action' in form and \
form['action'].value == 'edit':
error = 'Please select number of friends.'
else:
howmany = 0
if not error:
if 'action' in form and \
form['action'].value != 'reedit':
doResults(who, howmany)
else:
showForm(who, howmany)
else:
showError(error)
if __name__ == '__main__':
process()
| en | 0.32823 | #!/usr/bin/env python <HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>ERROR</H3>
<B>%s</B><P>
<FORM><INPUT TYPE=button VALUE=Back
ONCLICK="window.history.back()"></FORM>
</BODY></HTML> <HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
<FORM ACTION="%s">
<B>Enter your Name:</B>
<INPUT TYPE=hidden NAME=action VALUE=edit>
<INPUT TYPE=text NAME=person VALUE="%s" SIZE=15>
<P><B>How many friends do you have?</B>
%s
<P><INPUT TYPE=submit></FORM></BODY></HTML> <HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
Your name is: <B>%s</B><P>
You have <B>%s</B> friends.
<P>Click <A HREF="%s">here</A> to edit your data again.
</BODY></HTML> | 3.491135 | 3 |
domonic/constants/entities.py | Jordan-Cottle/domonic | 1 | 6625214 | """
domonic.constants.entities
====================================
"""
class Entity():
def __init__(self, entity: str):
self.entity = entity
def __str__(self):
import html
return html.unescape(self.character)
class Char():
def __init__(self, character: str):
self.character = character
def __str__(self):
import html
return html.escape(self.character)
# def __repr__(self):
# return self.character
# web
# ASCII Characters (Printable)
SPACE = ' '
EXCLAMATION_MARK = '!' #: !
QUOTATION_MARK = '"' #: "
NUMBER_SIGN = '#' #: #
DOLLAR_SIGN = '$' #: $
PERCENT_SIGN = '%' #: %
AMPERSAND = '&' #: &
APOSTROPHE = ''' #: '
OPENING_PARENTHESIS = '(' #: (
LEFT_PARENTHESIS = '(' #: (
CLOSING_PARENTHESIS = ')' #: )
RIGHT_PARENTHESIS = ')' #: )
ASTERISK = '*' #: *
PLUS_SIGN = '+' #: +
COMMA = ',' #: ,
HYPHEN = '-' #: -
PERIOD = '.' #: .
SLASH = '/' #: /
ZERO = '0' #: 0
ONE = '1' #: 1
TWO = '2' #: 2
THREE = '3' #: 3
FOUR = '4' #: 4
FIVE = '5' #: 5
SIX = '6' #: 6
SEVEN = '7' #: 7
EIGHT = '8' #: 8
NINE = '9' #: 9
COLON = ':' #: :
SEMICOLON = ';' #: ;
LESS_THAN = '<' #: <
EQUALS_SIGN = '=' #: =
GREATER_THAN = '>' #: >
QUESTION_MARK = '?' #: ?
AT_SIGN = '@' #: @
UPPERCASE_A = 'A' #: A
UPPERCASE_B = 'B' #: B
UPPERCASE_C = 'C' #: C
UPPERCASE_D = 'D' #: D
UPPERCASE_E = 'E' #: E
UPPERCASE_F = 'F' #: F
UPPERCASE_G = 'G' #: G
UPPERCASE_H = 'H' #: H
UPPERCASE_I = 'I' #: I
UPPERCASE_J = 'J' #: J
UPPERCASE_K = 'K' #: K
UPPERCASE_L = 'L' #: L
UPPERCASE_M = 'M' #: M
UPPERCASE_N = 'N' #: N
UPPERCASE_O = 'O' #: O
UPPERCASE_P = 'P' #: P
UPPERCASE_Q = 'Q' #: Q
UPPERCASE_R = 'R' #: R
UPPERCASE_S = 'S' #: S
UPPERCASE_T = 'T' #: T
UPPERCASE_U = 'U' #: U
UPPERCASE_V = 'V' #: V
UPPERCASE_W = 'W' #: W
UPPERCASE_X = 'X' #: X
UPPERCASE_Y = 'Y' #: Y
UPPERCASE_Z = 'Z' #: Z
OPENING_SQUARE_BRACKET = '[' #: [
BACKSLASH = '\' #: \
CLOSING_SQUARE_BRACKET = ']' #: ]
CARET = '^' #: ^
UNDERSCORE = '_' #: _
GRAVE_ACCENT = '`' #:
LOWERCASE_A = 'a' #: a
LOWERCASE_B = 'b' #: b
LOWERCASE_C = 'c' #: c
LOWERCASE_D = 'd' #: d
LOWERCASE_E = 'e' #: e
LOWERCASE_F = 'f' #: f
LOWERCASE_G = 'g' #: g
LOWERCASE_H = 'h' #: h
LOWERCASE_I = 'i' #: i
LOWERCASE_J = 'j' #: j
LOWERCASE_K = 'k' #: k
LOWERCASE_L = 'l' #: l
LOWERCASE_M = 'm' #: m
LOWERCASE_N = 'n' #: n
LOWERCASE_O = 'o' #: o
LOWERCASE_P = 'p' #: p
LOWERCASE_Q = 'q' #: q
LOWERCASE_R = 'r' #: r
LOWERCASE_S = 's' #: s
LOWERCASE_T = 't' #: t
LOWERCASE_U = 'u' #: u
LOWERCASE_V = 'v' #: v
LOWERCASE_W = 'w' #: w
LOWERCASE_X = 'x' #: x
LOWERCASE_Y = 'y' #: y
LOWERCASE_Z = 'z' #: z
OPENING_CURLY_BRACE = '{' #: {
LEFT_CURLY_BRACE = '{' #: {
VERTICAL_BAR = '|' #: |
CLOSING_CURLY_BRACE = '}' #: }
RIGHT_CURLY_BRACE = '}' #: }
TILDE = '~' #: ~
# ISO-8859-1 Characters
AGRAVE = 'À' #: À
AACUTE = 'Á' #: Á
ACIRC = 'Â' #: Â
ATILDE = 'Ã' #: Ã
AUML = 'Ä' #: Ä
ARING = 'Å' #: Å
AELIG = 'Æ' #: Æ
CCEDIL = 'Ç' #: Ç
EGRAVE = 'È' #: È
EACUTE = 'É' #: É
ECIRC = 'Ê' #: Ê
EUML = 'Ë' #: Ë
IGRAVE = 'Ì' #: Ì
IACUTE = 'Í' #: Í
ICIRC = 'Î' #: Î
IUML = 'Ï' #: Ï
ETH = 'Ð' #: Ð
NTILDE = 'Ñ' #: Ñ
OGRAVE = 'Ò' #: Ò
OACUTE = 'Ó' #: Ó
OCIRC = 'Ô' #: Ô
OTILDE = 'Õ' #: Õ
OUML = 'Ö' #: Ö
OSLASH = 'Ø' #: Ø
UGRAVE = 'Ù' #: Ù
UACUTE = 'Ú' #: Ú
UCIRC = 'Û' #: Û
UUML = 'Ü' #: Ü
YACUTE = 'Ý' #: Ý
THORN = 'Þ' #: Þ
SZLIG = 'ß' #: ß
AGRAVE = 'à' #: à
AACUTE = 'á' #: á
ACIRC = 'â' #: â
ATILDE = 'ã' #: ã
AUML = 'ä' #: ä
ARING = 'å' #: å
AELIG = 'æ' #: æ
CCEDIL = 'ç' #: ç
EGRAVE = 'è' #: è
EACUTE = 'é' #: é
ECIRC = 'ê' #: ê
EUML = 'ë' #: ë
IGRAVE = 'ì' #: ì
IACUTE = 'í' #: í
ICIRC = 'î' #: î
IUML = 'ï' #: ï
ETH = 'ð' #: ð
NTILDE = 'ñ' #: ñ
OGRAVE = 'ò' #: ò
OACUTE = 'ó' #: ó
OCIRC = 'ô' #: ô
OTILDE = 'õ' #: õ
OUML = 'ö' #: ö
OSLASH = 'ø' #: ø
UGRAVE = 'ù' #: ù
UACUTE = 'ú' #: ú
UCIRC = 'û' #: û
UUML = 'ü' #: ü
YACUTE = 'ý' #: ý
THORN = 'þ' #: þ
YUML = 'ÿ' #: ÿ
# ISO-8859-1 Symbols
NBSP = ' ' #:
IEXCL = '¡' #: ¡
CENT = '¢' #: ¢
POUND = '£' #: £
CURREN = '¤' #: ¤
YEN = '¥' #: ¥
BRVBAR = '¦' #: ¦
SECT = '§' #: §
UML = '¨' #: ¨
COPY = '©' #: ©
COPYRIGHT = '©' #: ©
ORDF = 'ª' #: ª
LAQUO = '«' #: «
NOT = '¬' #: ¬
# ­ ­ Soft hyphen
REG = '®' #: ®
MACR = '¯' #: ¯
DEG = '°' #: °
PLUSMN = '±' #: ±
SUP2 = '²' #: ²
SUP3 = '³' #: ³
ACUTE = '´' #: ´
MICRO = 'µ' #: µ
PARA = '¶' #: ¶
CEDIL = '¸' #: ¸
SUP1 = '¹' #: ¹
ORDM = 'º' #: º
RAQUO = '»' #: »
FRAC14 = '¼' #: ¼
FRAC12 = '½' #: ½
FRAC34 = '¾' #: ¾
IQUEST = '¿' #: ¿
TIMES = '×' #: ×
DIVIDE = '÷' #: ÷
# Math Symbols
FORALL = '∀' #: ∀
PART = '∂' #: ∂
EXIST = '∃' #: ∃
EMPTY = '∅' #: ∅
NABLA = '∇' #: ∇
ISIN = '∈' #: ∈
NOTIN = '∉' #: ∉
NI = '∋' #: ∋
PROD = '∏' #: ∏
SUM = '∑' #: ∑
MINUS = '−' #: −
LOWAST = '∗' #: ∗
RADIC = '√' #: √
PROP = '∝' #: ∝
INFIN = '∞' #: ∞
ANG = '∠' #: ∠
AND = '∧' #: ∧
OR = '∨' #: ∨
CAP = '∩' #: ∩
CUP = '∪' #: ∪
INT = '∫' #: ∫
THERE4 = '∴' #: ∴
SIM = '∼' #: ∼
CONG = '≅' #: ≅
ASYMP = '≈' #: ≈
NE = '≠' #: ≠
EQUIV = '≡' #: ≡
LE = '≤' #: ≤
GE = '≥' #: ≥
SUB = '⊂' #: ⊂
SUP = '⊃' #: ⊃
NSUB = '⊄' #: ⊄
SUBE = '⊆' #: ⊆
SUPE = '⊇' #: ⊇
OPLUS = '⊕' #: ⊕
OTIMES = '⊗' #: ⊗
PERP = '⊥' #: ⊥
SDOT = '⋅' #: ⋅
# Greek Letters
ALPHA = 'Α' #: Α
BETA = 'Β' #: Β
GAMMA = 'Γ' #: Γ
DELTA = 'Δ' #: Δ
EPSILON = 'Ε' #: Ε
ZETA = 'Ζ' #: Ζ
ETA = 'Η' #: Η
THETA = 'Θ' #: Θ
IOTA = 'Ι' #: Ι
KAPPA = 'Κ' #: Κ
LAMBDA = 'Λ' #: Λ
MU = 'Μ' #: Μ
NU = 'Ν' #: Ν
XI = 'Ξ' #: Ξ
OMICRON = 'Ο' #: Ο
PI = 'Π' #: Π
RHO = 'Ρ' #: Ρ
SIGMA = 'Σ' #: Σ
TAU = 'Τ' #: Τ
UPSILON = 'Υ' #: Υ
PHI = 'Φ' #: Φ
CHI = 'Χ' #: Χ
PSI = 'Ψ' #: Ψ
OMEGA = 'Ω' #: Ω
ALPHA = 'α' #: α
BETA = 'β' #: β
GAMMA = 'γ' #: γ
DELTA = 'δ' #: δ
EPSILON = 'ε' #: ε
ZETA = 'ζ' #: ζ
ETA = 'η' #: η
THETA = 'θ' #: θ
IOTA = 'ι' #: ι
KAPPA = 'κ' #: κ
LAMBDA = 'λ' #: λ
MU = 'μ' #: μ
NU = 'ν' #: ν
XI = 'ξ' #: ξ
OMICRON = 'ο' #: ο
PI = 'π' #: π
RHO = 'ρ' #: ρ
SIGMAF = 'ς' #: ς
SIGMA = 'σ' #: σ
TAU = 'τ' #: τ
UPSILON = 'υ' #: υ
PHI = 'φ' #: φ
CHI = 'χ' #: χ
PSI = 'ψ' #: ψ
OMEGA = 'ω' #: ω
THETASYM = 'ϑ' #: ϑ
UPSIH = 'ϒ' #: ϒ
PIV = 'ϖ' #: ϖ
OELIG = 'Œ' #: Œ
oeLIG = 'œ' #: œ
SCARON = 'Š' #: Š
Scaron = 'Š' #: Š
scaron = 'š' #: š
YUML = 'Ÿ' #: Ÿ
FNOF = 'ƒ' #: ƒ
CIRC = 'ˆ' #: ˆ
TILDE = '˜' #: ˜
#     En space
#     Em space
#     Thin space
# ‌ ‌ Zero width non-joiner
# ‍ ‍ Zero width joiner
# ‎ ‎ Left-to-right mark
# ‏ ‏ Right-to-left mark
NDASH = '–' #: –
MDASH = '—' #: —
LSQUO = '‘' #: ‘
RSQUO = '’' #: ’
SBQUO = '‚' #: ‚
LDQUO = '“' #: “
RDQUO = '”' #: ”
BDQUO = '„' #: „
DAGGER = '†' #: †
DAGGER = '‡' #: ‡
BULL = '•' #: •
HELLIP = '…' #: …
PERMIL = '‰' #: ‰
PRIME = '′' #: ′
PRIME = '″' #: ″
LSAQUO = '‹' #: ‹
RSAQUO = '›' #: ›
OLINE = '‾' #: ‾
EURO = '€' #: €
TRADE = '™' #: ™
TRADEMARK = '™' #: ™
# ARROWS
LARR = '←' #: ←
LEFT = '←' #: ←
UARR = '↑' #: ↑
UP = '↑' #: ↑
RARR = '→' #: →
RIGHT = '→' #: →
DARR = '↓' #: ↓
DOWN = '↓' #: ↓
HARR = '↔' #: ↔
CRARR = '↵' #: ↵
LCEIL = '⌈' #: ⌈
RCEIL = '⌉' #: ⌉
LFLOOR = '⌊' #: ⌊
RFLOOR = '⌋' #: ⌋
LOZ = '◊' #: ◊
SPADES = '♠' #: ♠
CLUBS = '♣' #: ♣
HEARTS = '♥' #: ♥
DIAMS = '♦' #: ♦
DIAMONDS = '♦' #: ♦
SUNG = '♪' #: ♪
FLAT = '♭' #: ♭
NATUR = '♮' #: ♮
NATURAL = '♮' #: ♮
SHARP = '♯' #: ♯
CHECK = "✓" #: ✓
CHECKMARK = "✓" #: ✓
TICK = "✓" #: ✓
CROSS = "✗" #: ✗
OHM = 'Ω' #: Ω
MHO = '℧' #: ℧
FRAC13 = '⅓' #: ⅓
FRAC23 = '⅔' #: ⅔
FRAC15 = '⅕' #: ⅕
FRAC25 = '⅖' #: ⅖
FRAC35 = '⅗' #: ⅗
FRAC45 = '⅘' #: ⅘
FRAC16 = '⅙' #: ⅙
FRAC56 = '⅚' #: ⅚
FRAC18 = '⅛' #: ⅛
FRAC38 = '⅜' #: ⅜
FRAC58 = '⅝' #: ⅝
FRAC78 = '⅞' #: ⅞
STAR = "☆" #: ☆
STARF = "★" #: ★
BIGSTAR = "★"
PHONE = "☎" #: ☎
FEMALE = "♀" #: ♀
MALE = "♂" #: ♂
| """
domonic.constants.entities
====================================
"""
class Entity():
def __init__(self, entity: str):
self.entity = entity
def __str__(self):
import html
return html.unescape(self.character)
class Char():
def __init__(self, character: str):
self.character = character
def __str__(self):
import html
return html.escape(self.character)
# def __repr__(self):
# return self.character
# web
# ASCII Characters (Printable)
SPACE = ' '
EXCLAMATION_MARK = '!' #: !
QUOTATION_MARK = '"' #: "
NUMBER_SIGN = '#' #: #
DOLLAR_SIGN = '$' #: $
PERCENT_SIGN = '%' #: %
AMPERSAND = '&' #: &
APOSTROPHE = ''' #: '
OPENING_PARENTHESIS = '(' #: (
LEFT_PARENTHESIS = '(' #: (
CLOSING_PARENTHESIS = ')' #: )
RIGHT_PARENTHESIS = ')' #: )
ASTERISK = '*' #: *
PLUS_SIGN = '+' #: +
COMMA = ',' #: ,
HYPHEN = '-' #: -
PERIOD = '.' #: .
SLASH = '/' #: /
ZERO = '0' #: 0
ONE = '1' #: 1
TWO = '2' #: 2
THREE = '3' #: 3
FOUR = '4' #: 4
FIVE = '5' #: 5
SIX = '6' #: 6
SEVEN = '7' #: 7
EIGHT = '8' #: 8
NINE = '9' #: 9
COLON = ':' #: :
SEMICOLON = ';' #: ;
LESS_THAN = '<' #: <
EQUALS_SIGN = '=' #: =
GREATER_THAN = '>' #: >
QUESTION_MARK = '?' #: ?
AT_SIGN = '@' #: @
UPPERCASE_A = 'A' #: A
UPPERCASE_B = 'B' #: B
UPPERCASE_C = 'C' #: C
UPPERCASE_D = 'D' #: D
UPPERCASE_E = 'E' #: E
UPPERCASE_F = 'F' #: F
UPPERCASE_G = 'G' #: G
UPPERCASE_H = 'H' #: H
UPPERCASE_I = 'I' #: I
UPPERCASE_J = 'J' #: J
UPPERCASE_K = 'K' #: K
UPPERCASE_L = 'L' #: L
UPPERCASE_M = 'M' #: M
UPPERCASE_N = 'N' #: N
UPPERCASE_O = 'O' #: O
UPPERCASE_P = 'P' #: P
UPPERCASE_Q = 'Q' #: Q
UPPERCASE_R = 'R' #: R
UPPERCASE_S = 'S' #: S
UPPERCASE_T = 'T' #: T
UPPERCASE_U = 'U' #: U
UPPERCASE_V = 'V' #: V
UPPERCASE_W = 'W' #: W
UPPERCASE_X = 'X' #: X
UPPERCASE_Y = 'Y' #: Y
UPPERCASE_Z = 'Z' #: Z
OPENING_SQUARE_BRACKET = '[' #: [
BACKSLASH = '\' #: \
CLOSING_SQUARE_BRACKET = ']' #: ]
CARET = '^' #: ^
UNDERSCORE = '_' #: _
GRAVE_ACCENT = '`' #:
LOWERCASE_A = 'a' #: a
LOWERCASE_B = 'b' #: b
LOWERCASE_C = 'c' #: c
LOWERCASE_D = 'd' #: d
LOWERCASE_E = 'e' #: e
LOWERCASE_F = 'f' #: f
LOWERCASE_G = 'g' #: g
LOWERCASE_H = 'h' #: h
LOWERCASE_I = 'i' #: i
LOWERCASE_J = 'j' #: j
LOWERCASE_K = 'k' #: k
LOWERCASE_L = 'l' #: l
LOWERCASE_M = 'm' #: m
LOWERCASE_N = 'n' #: n
LOWERCASE_O = 'o' #: o
LOWERCASE_P = 'p' #: p
LOWERCASE_Q = 'q' #: q
LOWERCASE_R = 'r' #: r
LOWERCASE_S = 's' #: s
LOWERCASE_T = 't' #: t
LOWERCASE_U = 'u' #: u
LOWERCASE_V = 'v' #: v
LOWERCASE_W = 'w' #: w
LOWERCASE_X = 'x' #: x
LOWERCASE_Y = 'y' #: y
LOWERCASE_Z = 'z' #: z
OPENING_CURLY_BRACE = '{' #: {
LEFT_CURLY_BRACE = '{' #: {
VERTICAL_BAR = '|' #: |
CLOSING_CURLY_BRACE = '}' #: }
RIGHT_CURLY_BRACE = '}' #: }
TILDE = '~' #: ~
# ISO-8859-1 Characters
AGRAVE = 'À' #: À
AACUTE = 'Á' #: Á
ACIRC = 'Â' #: Â
ATILDE = 'Ã' #: Ã
AUML = 'Ä' #: Ä
ARING = 'Å' #: Å
AELIG = 'Æ' #: Æ
CCEDIL = 'Ç' #: Ç
EGRAVE = 'È' #: È
EACUTE = 'É' #: É
ECIRC = 'Ê' #: Ê
EUML = 'Ë' #: Ë
IGRAVE = 'Ì' #: Ì
IACUTE = 'Í' #: Í
ICIRC = 'Î' #: Î
IUML = 'Ï' #: Ï
ETH = 'Ð' #: Ð
NTILDE = 'Ñ' #: Ñ
OGRAVE = 'Ò' #: Ò
OACUTE = 'Ó' #: Ó
OCIRC = 'Ô' #: Ô
OTILDE = 'Õ' #: Õ
OUML = 'Ö' #: Ö
OSLASH = 'Ø' #: Ø
UGRAVE = 'Ù' #: Ù
UACUTE = 'Ú' #: Ú
UCIRC = 'Û' #: Û
UUML = 'Ü' #: Ü
YACUTE = 'Ý' #: Ý
THORN = 'Þ' #: Þ
SZLIG = 'ß' #: ß
AGRAVE = 'à' #: à
AACUTE = 'á' #: á
ACIRC = 'â' #: â
ATILDE = 'ã' #: ã
AUML = 'ä' #: ä
ARING = 'å' #: å
AELIG = 'æ' #: æ
CCEDIL = 'ç' #: ç
EGRAVE = 'è' #: è
EACUTE = 'é' #: é
ECIRC = 'ê' #: ê
EUML = 'ë' #: ë
IGRAVE = 'ì' #: ì
IACUTE = 'í' #: í
ICIRC = 'î' #: î
IUML = 'ï' #: ï
ETH = 'ð' #: ð
NTILDE = 'ñ' #: ñ
OGRAVE = 'ò' #: ò
OACUTE = 'ó' #: ó
OCIRC = 'ô' #: ô
OTILDE = 'õ' #: õ
OUML = 'ö' #: ö
OSLASH = 'ø' #: ø
UGRAVE = 'ù' #: ù
UACUTE = 'ú' #: ú
UCIRC = 'û' #: û
UUML = 'ü' #: ü
YACUTE = 'ý' #: ý
THORN = 'þ' #: þ
YUML = 'ÿ' #: ÿ
# ISO-8859-1 Symbols
NBSP = ' ' #:
IEXCL = '¡' #: ¡
CENT = '¢' #: ¢
POUND = '£' #: £
CURREN = '¤' #: ¤
YEN = '¥' #: ¥
BRVBAR = '¦' #: ¦
SECT = '§' #: §
UML = '¨' #: ¨
COPY = '©' #: ©
COPYRIGHT = '©' #: ©
ORDF = 'ª' #: ª
LAQUO = '«' #: «
NOT = '¬' #: ¬
# ­ ­ Soft hyphen
REG = '®' #: ®
MACR = '¯' #: ¯
DEG = '°' #: °
PLUSMN = '±' #: ±
SUP2 = '²' #: ²
SUP3 = '³' #: ³
ACUTE = '´' #: ´
MICRO = 'µ' #: µ
PARA = '¶' #: ¶
CEDIL = '¸' #: ¸
SUP1 = '¹' #: ¹
ORDM = 'º' #: º
RAQUO = '»' #: »
FRAC14 = '¼' #: ¼
FRAC12 = '½' #: ½
FRAC34 = '¾' #: ¾
IQUEST = '¿' #: ¿
TIMES = '×' #: ×
DIVIDE = '÷' #: ÷
# Math Symbols
FORALL = '∀' #: ∀
PART = '∂' #: ∂
EXIST = '∃' #: ∃
EMPTY = '∅' #: ∅
NABLA = '∇' #: ∇
ISIN = '∈' #: ∈
NOTIN = '∉' #: ∉
NI = '∋' #: ∋
PROD = '∏' #: ∏
SUM = '∑' #: ∑
MINUS = '−' #: −
LOWAST = '∗' #: ∗
RADIC = '√' #: √
PROP = '∝' #: ∝
INFIN = '∞' #: ∞
ANG = '∠' #: ∠
AND = '∧' #: ∧
OR = '∨' #: ∨
CAP = '∩' #: ∩
CUP = '∪' #: ∪
INT = '∫' #: ∫
THERE4 = '∴' #: ∴
SIM = '∼' #: ∼
CONG = '≅' #: ≅
ASYMP = '≈' #: ≈
NE = '≠' #: ≠
EQUIV = '≡' #: ≡
LE = '≤' #: ≤
GE = '≥' #: ≥
SUB = '⊂' #: ⊂
SUP = '⊃' #: ⊃
NSUB = '⊄' #: ⊄
SUBE = '⊆' #: ⊆
SUPE = '⊇' #: ⊇
OPLUS = '⊕' #: ⊕
OTIMES = '⊗' #: ⊗
PERP = '⊥' #: ⊥
SDOT = '⋅' #: ⋅
# Greek Letters
ALPHA = 'Α' #: Α
BETA = 'Β' #: Β
GAMMA = 'Γ' #: Γ
DELTA = 'Δ' #: Δ
EPSILON = 'Ε' #: Ε
ZETA = 'Ζ' #: Ζ
ETA = 'Η' #: Η
THETA = 'Θ' #: Θ
IOTA = 'Ι' #: Ι
KAPPA = 'Κ' #: Κ
LAMBDA = 'Λ' #: Λ
MU = 'Μ' #: Μ
NU = 'Ν' #: Ν
XI = 'Ξ' #: Ξ
OMICRON = 'Ο' #: Ο
PI = 'Π' #: Π
RHO = 'Ρ' #: Ρ
SIGMA = 'Σ' #: Σ
TAU = 'Τ' #: Τ
UPSILON = 'Υ' #: Υ
PHI = 'Φ' #: Φ
CHI = 'Χ' #: Χ
PSI = 'Ψ' #: Ψ
OMEGA = 'Ω' #: Ω
ALPHA = 'α' #: α
BETA = 'β' #: β
GAMMA = 'γ' #: γ
DELTA = 'δ' #: δ
EPSILON = 'ε' #: ε
ZETA = 'ζ' #: ζ
ETA = 'η' #: η
THETA = 'θ' #: θ
IOTA = 'ι' #: ι
KAPPA = 'κ' #: κ
LAMBDA = 'λ' #: λ
MU = 'μ' #: μ
NU = 'ν' #: ν
XI = 'ξ' #: ξ
OMICRON = 'ο' #: ο
PI = 'π' #: π
RHO = 'ρ' #: ρ
SIGMAF = 'ς' #: ς
SIGMA = 'σ' #: σ
TAU = 'τ' #: τ
UPSILON = 'υ' #: υ
PHI = 'φ' #: φ
CHI = 'χ' #: χ
PSI = 'ψ' #: ψ
OMEGA = 'ω' #: ω
THETASYM = 'ϑ' #: ϑ
UPSIH = 'ϒ' #: ϒ
PIV = 'ϖ' #: ϖ
OELIG = 'Œ' #: Œ
oeLIG = 'œ' #: œ
SCARON = 'Š' #: Š
Scaron = 'Š' #: Š
scaron = 'š' #: š
YUML = 'Ÿ' #: Ÿ
FNOF = 'ƒ' #: ƒ
CIRC = 'ˆ' #: ˆ
TILDE = '˜' #: ˜
#     En space
#     Em space
#     Thin space
# ‌ ‌ Zero width non-joiner
# ‍ ‍ Zero width joiner
# ‎ ‎ Left-to-right mark
# ‏ ‏ Right-to-left mark
NDASH = '–' #: –
MDASH = '—' #: —
LSQUO = '‘' #: ‘
RSQUO = '’' #: ’
SBQUO = '‚' #: ‚
LDQUO = '“' #: “
RDQUO = '”' #: ”
BDQUO = '„' #: „
DAGGER = '†' #: †
DAGGER = '‡' #: ‡
BULL = '•' #: •
HELLIP = '…' #: …
PERMIL = '‰' #: ‰
PRIME = '′' #: ′
PRIME = '″' #: ″
LSAQUO = '‹' #: ‹
RSAQUO = '›' #: ›
OLINE = '‾' #: ‾
EURO = '€' #: €
TRADE = '™' #: ™
TRADEMARK = '™' #: ™
# ARROWS
LARR = '←' #: ←
LEFT = '←' #: ←
UARR = '↑' #: ↑
UP = '↑' #: ↑
RARR = '→' #: →
RIGHT = '→' #: →
DARR = '↓' #: ↓
DOWN = '↓' #: ↓
HARR = '↔' #: ↔
CRARR = '↵' #: ↵
LCEIL = '⌈' #: ⌈
RCEIL = '⌉' #: ⌉
LFLOOR = '⌊' #: ⌊
RFLOOR = '⌋' #: ⌋
LOZ = '◊' #: ◊
SPADES = '♠' #: ♠
CLUBS = '♣' #: ♣
HEARTS = '♥' #: ♥
DIAMS = '♦' #: ♦
DIAMONDS = '♦' #: ♦
SUNG = '♪' #: ♪
FLAT = '♭' #: ♭
NATUR = '♮' #: ♮
NATURAL = '♮' #: ♮
SHARP = '♯' #: ♯
CHECK = "✓" #: ✓
CHECKMARK = "✓" #: ✓
TICK = "✓" #: ✓
CROSS = "✗" #: ✗
OHM = 'Ω' #: Ω
MHO = '℧' #: ℧
FRAC13 = '⅓' #: ⅓
FRAC23 = '⅔' #: ⅔
FRAC15 = '⅕' #: ⅕
FRAC25 = '⅖' #: ⅖
FRAC35 = '⅗' #: ⅗
FRAC45 = '⅘' #: ⅘
FRAC16 = '⅙' #: ⅙
FRAC56 = '⅚' #: ⅚
FRAC18 = '⅛' #: ⅛
FRAC38 = '⅜' #: ⅜
FRAC58 = '⅝' #: ⅝
FRAC78 = '⅞' #: ⅞
STAR = "☆" #: ☆
STARF = "★" #: ★
BIGSTAR = "★"
PHONE = "☎" #: ☎
FEMALE = "♀" #: ♀
MALE = "♂" #: ♂
| ja | 0.398194 | domonic.constants.entities ==================================== # def __repr__(self): # return self.character # web # ASCII Characters (Printable) #32;' #33;' #: ! #34;' #: " #35;' #: # #36;' #: $ #37;' #: % #: & #39;' #: ' #40;' #: ( #40;' #: ( #41;' #: ) #41;' #: ) #42;' #: * #43;' #: + #44;' #: , #45;' #: - #46;' #: . #47;' #: / #48;' #: 0 #49;' #: 1 #50;' #: 2 #51;' #: 3 #52;' #: 4 #53;' #: 5 #54;' #: 6 #55;' #: 7 #56;' #: 8 #57;' #: 9 #58;' #: : #59;' #: ; #: < #61;' #: = #: > #63;' #: ? #64;' #: @ #65;' #: A #66;' #: B #67;' #: C #68;' #: D #69;' #: E #70;' #: F #71;' #: G #72;' #: H #73;' #: I #74;' #: J #75;' #: K #76;' #: L #77;' #: M #78;' #: N #79;' #: O #80;' #: P #81;' #: Q #82;' #: R #83;' #: S #84;' #: T #85;' #: U #86;' #: V #87;' #: W #88;' #: X #89;' #: Y #90;' #: Z #91;' #: [ #92;' #: \ #93;' #: ] #94;' #: ^ #95;' #: _ #96;' #: #97;' #: a #98;' #: b #99;' #: c #100;' #: d #101;' #: e #102;' #: f #103;' #: g #104;' #: h #105;' #: i #106;' #: j #107;' #: k #108;' #: l #109;' #: m #110;' #: n #111;' #: o #112;' #: p #113;' #: q #114;' #: r #115;' #: s #116;' #: t #117;' #: u #118;' #: v #119;' #: w #120;' #: x #121;' #: y #122;' #: z #123;' #: { #123;' #: { #124;' #: | #125;' #: } #125;' #: } #126;' #: ~ # ISO-8859-1 Characters #: À #: Á #:  #: à #: Ä #: Å #: Æ #: Ç #: È #: É #: Ê #: Ë #: Ì #: Í #: Î #: Ï #: Ð #: Ñ #: Ò #: Ó #: Ô #: Õ #: Ö #: Ø #: Ù #: Ú #: Û #: Ü #: Ý #: Þ #: ß #: à #: á #: â #: ã #: ä #: å #: æ #: ç #: è #: é #: ê #: ë #: ì #: í #: î #: ï #: ð #: ñ #: ò #: ó #: ô #: õ #: ö #: ø #: ù #: ú #: û #: ü #: ý #: þ #: ÿ # ISO-8859-1 Symbols #: #: ¡ #: ¢ #: £ #: ¤ #: ¥ #: ¦ #: § #: ¨ #: © #: © #: ª #: « #: ¬ # ­ ­ Soft hyphen #: ® #: ¯ #: ° #: ± #: ² #: ³ #: ´ #: µ #: ¶ #: ¸ #: ¹ #: º #: » #: ¼ #: ½ #: ¾ #: ¿ #: × #: ÷ # Math Symbols #: ∀ #: ∂ #: ∃ #: ∅ #: ∇ #: ∈ #: ∉ #: ∋ #: ∏ #: ∑ #: − #: ∗ #: √ #: ∝ #: ∞ #: ∠ #: ∧ #: ∨ #: ∩ #: ∪ #: ∫ #: ∴ #: ∼ #: ≅ #: ≈ #: ≠ #: ≡ #: ≤ #: ≥ #: ⊂ #: ⊃ #: ⊄ #: ⊆ #: ⊇ #: ⊕ #: ⊗ #: ⊥ #: ⋅ # Greek Letters #: Α #: Β #: Γ #: Δ #: Ε #: Ζ #: Η #: Θ #: Ι #: Κ #: Λ #: Μ #: Ν #: Ξ #: Ο #: Π #: Ρ #: Σ #: Τ #: Υ #: Φ #: Χ #: Ψ #: Ω #: α #: β #: γ #: δ #: ε #: ζ #: η #: θ #: ι #: κ #: λ #: μ #: ν #: ξ #: ο #: π #: ρ #: ς #: σ #: τ #: υ #: φ #: χ #: ψ #: ω #: ϑ #: ϒ #: ϖ #: Œ #: œ #: Š #: Š #: š #: Ÿ #: ƒ #: ˆ #: ˜ #     En space #     Em space #     Thin space # ‌ ‌ Zero width non-joiner # ‍ ‍ Zero width joiner # ‎ ‎ Left-to-right mark # ‏ ‏ Right-to-left mark #: – #: — #: ‘ #: ’ #: ‚ #: “ #: ” #: „ #: † #: ‡ #: • #: … #: ‰ #: ′ #: ″ #: ‹ #: › #: ‾ #: € #: ™ #: ™ # ARROWS #: ← #: ← #: ↑ #: ↑ #: → #: → #: ↓ #: ↓ #: ↔ #: ↵ #: ⌈ #: ⌉ #: ⌊ #: ⌋ #: ◊ #: ♠ #: ♣ #: ♥ #: ♦ #: ♦ #: ♪ #: ♭ #: ♮ #: ♮ #: ♯ #: ✓ #: ✓ #: ✓ #: ✗ #: Ω #: ℧ #: ⅓ #: ⅔ #: ⅕ #: ⅖ #: ⅗ #: ⅘ #: ⅙ #: ⅚ #: ⅛ #: ⅜ #: ⅝ #: ⅞ #: ☆ #: ★ #: ☎ #: ♀ #: ♂ | 2.963118 | 3 |
scripts/postprocess_midas_data.py | sarahbald/BIG_2021_microbiome_evolution | 0 | 6625215 | <reponame>sarahbald/BIG_2021_microbiome_evolution<filename>scripts/postprocess_midas_data.py
#!/usr/bin/env python
### This script runs the necessary post-processing of the MIDAS output so that we can start analyzing
import os
import sys
import parse_midas_data
########################################################################################
#
# Standard header to read in argument information
#
########################################################################################
if len(sys.argv)>1:
if len(sys.argv)>2:
debug=True # debug does nothing in this script
species_name=sys.argv[2]
else:
debug=False
species_name=sys.argv[1]
else:
sys.stderr.write("Usage: python postprocess_midas_data.py [debug] species_name")
########################################################################################
sys.stderr.write('Postprocessing species: %s\n' % species_name)
# the following creates this file: marker_coverage.txt.bz2
# It consists of a line recapitulating MIDAS output in terms of coverage for the species of interest
# It also outputs a line summing over the coverage across all species for each sample.
sys.stderr.write('Calculating species-specific marker gene coverages...\n')
#os.system('python %scalculate_marker_gene_coverage.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating species-specific marker gene coverages!\n')
# the following step outputs three files:
# 1) coverage distribution for each sample without respect to prevalence of a site (full_coverage_distribution.txt.bz2)
# 2) coverage distribution for each sample with respect to prevalence (coverage_distribution.txt.bz2)
# 3) coverage distribution for each gene x sample using the reads from the SNPs output (gene_coverage.txt.bz2)
sys.stderr.write('Calculating coverage distributions...\n')
#os.system('python %scalculate_coverage_distribution.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating coverage distribution!\n')
# Calculate error pvalues
# this produces the file annotated_snps.txt.bz2, which contains SNPs that fall between 0.3*median and 3*median, where median=median coverage of a SNP in a sample. The output is in the form of Alt, Ref, where Ref=consensus allele across samples (so, the output is polarized relative to the major allele in the sample).
sys.stderr.write('Calculating error pvalues...\n')
#os.system('python %scalculate_error_pvalues.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating error pvalues!\n')
# Calculate snp prevalences
# this produces a list in snp_prevalences/ directory to be loaded later
# (can disable this and supply the list externally.)
sys.stderr.write('Calculating SNP prevalences...\n')
#os.system('python %scalculate_snp_prevalences.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating SNP prevalences!\n')
# Calculate within person SFS
# this produces within_sample_sfs.txt.bz2.
sys.stderr.write('Calculating within-sample SFSs...\n')
#os.system('python %scalculate_within_person_sfs.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating within-sample SFSs!\n')
# Calculate substitution rates between samples
sys.stderr.write('Calculating substitution rates...\n')
#os.system('python %scalculate_substitution_rates.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating substitution rates!\n')
# Calculate singleton substitution rates
sys.stderr.write('Calculating singleton rates...\n')
#os.system('python %scalculate_singletons.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating singleton rates!\n')
# Calculate private SNVs
sys.stderr.write('Calculating private SNVs...\n')
#os.system('python %scalculate_private_snvs.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating private SNVs!\n')
# Calculate temporal changes
#sys.stderr.write('Calculating temporal changes...\n')
#os.system('python %scalculate_temporal_changes.py %s' % (parse_midas_data.scripts_directory, species_name))
#sys.stderr.write('Done calculating temporal changes!\n')
# Calculate SNV inconsistency (for recombination figure
sys.stderr.write('Calculating SNV inconsistency...\n')
#os.system('python %scalculate_snv_distances.py --species %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating SNV inconsistency!\n')
# Calculating linkage disequilibrium inconsistency (for recombination figure
sys.stderr.write('Calculating LD...\n')
os.system('python %scalculate_linkage_disequilibria.py --species %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write("Done!\n")
sys.stderr.write("Done postprocessing %s!\n\n" % species_name)
| #!/usr/bin/env python
### This script runs the necessary post-processing of the MIDAS output so that we can start analyzing
import os
import sys
import parse_midas_data
########################################################################################
#
# Standard header to read in argument information
#
########################################################################################
if len(sys.argv)>1:
if len(sys.argv)>2:
debug=True # debug does nothing in this script
species_name=sys.argv[2]
else:
debug=False
species_name=sys.argv[1]
else:
sys.stderr.write("Usage: python postprocess_midas_data.py [debug] species_name")
########################################################################################
sys.stderr.write('Postprocessing species: %s\n' % species_name)
# the following creates this file: marker_coverage.txt.bz2
# It consists of a line recapitulating MIDAS output in terms of coverage for the species of interest
# It also outputs a line summing over the coverage across all species for each sample.
sys.stderr.write('Calculating species-specific marker gene coverages...\n')
#os.system('python %scalculate_marker_gene_coverage.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating species-specific marker gene coverages!\n')
# the following step outputs three files:
# 1) coverage distribution for each sample without respect to prevalence of a site (full_coverage_distribution.txt.bz2)
# 2) coverage distribution for each sample with respect to prevalence (coverage_distribution.txt.bz2)
# 3) coverage distribution for each gene x sample using the reads from the SNPs output (gene_coverage.txt.bz2)
sys.stderr.write('Calculating coverage distributions...\n')
#os.system('python %scalculate_coverage_distribution.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating coverage distribution!\n')
# Calculate error pvalues
# this produces the file annotated_snps.txt.bz2, which contains SNPs that fall between 0.3*median and 3*median, where median=median coverage of a SNP in a sample. The output is in the form of Alt, Ref, where Ref=consensus allele across samples (so, the output is polarized relative to the major allele in the sample).
sys.stderr.write('Calculating error pvalues...\n')
#os.system('python %scalculate_error_pvalues.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating error pvalues!\n')
# Calculate snp prevalences
# this produces a list in snp_prevalences/ directory to be loaded later
# (can disable this and supply the list externally.)
sys.stderr.write('Calculating SNP prevalences...\n')
#os.system('python %scalculate_snp_prevalences.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating SNP prevalences!\n')
# Calculate within person SFS
# this produces within_sample_sfs.txt.bz2.
sys.stderr.write('Calculating within-sample SFSs...\n')
#os.system('python %scalculate_within_person_sfs.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating within-sample SFSs!\n')
# Calculate substitution rates between samples
sys.stderr.write('Calculating substitution rates...\n')
#os.system('python %scalculate_substitution_rates.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating substitution rates!\n')
# Calculate singleton substitution rates
sys.stderr.write('Calculating singleton rates...\n')
#os.system('python %scalculate_singletons.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating singleton rates!\n')
# Calculate private SNVs
sys.stderr.write('Calculating private SNVs...\n')
#os.system('python %scalculate_private_snvs.py %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating private SNVs!\n')
# Calculate temporal changes
#sys.stderr.write('Calculating temporal changes...\n')
#os.system('python %scalculate_temporal_changes.py %s' % (parse_midas_data.scripts_directory, species_name))
#sys.stderr.write('Done calculating temporal changes!\n')
# Calculate SNV inconsistency (for recombination figure
sys.stderr.write('Calculating SNV inconsistency...\n')
#os.system('python %scalculate_snv_distances.py --species %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write('Done calculating SNV inconsistency!\n')
# Calculating linkage disequilibrium inconsistency (for recombination figure
sys.stderr.write('Calculating LD...\n')
os.system('python %scalculate_linkage_disequilibria.py --species %s' % (parse_midas_data.scripts_directory, species_name))
sys.stderr.write("Done!\n")
sys.stderr.write("Done postprocessing %s!\n\n" % species_name) | en | 0.469975 | #!/usr/bin/env python ### This script runs the necessary post-processing of the MIDAS output so that we can start analyzing ######################################################################################## # # Standard header to read in argument information # ######################################################################################## # debug does nothing in this script ######################################################################################## # the following creates this file: marker_coverage.txt.bz2 # It consists of a line recapitulating MIDAS output in terms of coverage for the species of interest # It also outputs a line summing over the coverage across all species for each sample. #os.system('python %scalculate_marker_gene_coverage.py %s' % (parse_midas_data.scripts_directory, species_name)) # the following step outputs three files: # 1) coverage distribution for each sample without respect to prevalence of a site (full_coverage_distribution.txt.bz2) # 2) coverage distribution for each sample with respect to prevalence (coverage_distribution.txt.bz2) # 3) coverage distribution for each gene x sample using the reads from the SNPs output (gene_coverage.txt.bz2) #os.system('python %scalculate_coverage_distribution.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate error pvalues # this produces the file annotated_snps.txt.bz2, which contains SNPs that fall between 0.3*median and 3*median, where median=median coverage of a SNP in a sample. The output is in the form of Alt, Ref, where Ref=consensus allele across samples (so, the output is polarized relative to the major allele in the sample). #os.system('python %scalculate_error_pvalues.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate snp prevalences # this produces a list in snp_prevalences/ directory to be loaded later # (can disable this and supply the list externally.) #os.system('python %scalculate_snp_prevalences.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate within person SFS # this produces within_sample_sfs.txt.bz2. #os.system('python %scalculate_within_person_sfs.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate substitution rates between samples #os.system('python %scalculate_substitution_rates.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate singleton substitution rates #os.system('python %scalculate_singletons.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate private SNVs #os.system('python %scalculate_private_snvs.py %s' % (parse_midas_data.scripts_directory, species_name)) # Calculate temporal changes #sys.stderr.write('Calculating temporal changes...\n') #os.system('python %scalculate_temporal_changes.py %s' % (parse_midas_data.scripts_directory, species_name)) #sys.stderr.write('Done calculating temporal changes!\n') # Calculate SNV inconsistency (for recombination figure #os.system('python %scalculate_snv_distances.py --species %s' % (parse_midas_data.scripts_directory, species_name)) # Calculating linkage disequilibrium inconsistency (for recombination figure | 2.644365 | 3 |
src/VioNet/models/anomaly_detector.py | davidGCR/VioDenseDuplication | 3 | 6625216 | import torch
from torch import nn
class AnomalyDetector(nn.Module):
def __init__(self, input_dim=4096):
super(AnomalyDetector, self).__init__()
self.fc1 = nn.Linear(input_dim, 128) #original was 512
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(0.6)
self.fc2 = nn.Linear(128, 32)
self.dropout2 = nn.Dropout(0.6)
self.fc3 = nn.Linear(32, 1)
self.sig = nn.Sigmoid()
# In the original keras code they use "glorot_normal"
# As I understand, this is the same as xavier normal in Pytorch
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
print('fc1 input: ', x.size())
x = self.fc1(x)
print('fc1 out: ', x.size())
x = self.relu1(x)
# x = self.dropout1(x)
# x = self.dropout1(self.relu1(self.fc1(x)))
x = self.dropout2(self.fc2(x))
x = self.sig(self.fc3(x))
return x
class RegularizedLoss(torch.nn.Module):
def __init__(self, model, original_objective, lambdas=0.001):
super(RegularizedLoss, self).__init__()
self.lambdas = lambdas
self.model = model
self.objective = original_objective
def forward(self, y_pred, y_true):
# loss
# Our loss is defined with respect to l2 regularization, as used in the original keras code
fc1_params = torch.cat(tuple([x.view(-1) for x in self.model.fc1.parameters()]))
fc2_params = torch.cat(tuple([x.view(-1) for x in self.model.fc2.parameters()]))
fc3_params = torch.cat(tuple([x.view(-1) for x in self.model.fc3.parameters()]))
l1_regularization = self.lambdas * torch.norm(fc1_params, p=2)
l2_regularization = self.lambdas * torch.norm(fc2_params, p=2)
l3_regularization = self.lambdas * torch.norm(fc3_params, p=2)
return self.objective(y_pred, y_true) + l1_regularization + l2_regularization + l3_regularization
def custom_objective(y_pred, y_true):
# print("y_true:", y_true, y_true.size())
# print("y_pred:", y_pred.size())
# y_pred (batch_size, 32, 1)
# y_true (batch_size)
lambdas = 8e-5
normal_vids_indices = (y_true == 0).nonzero().flatten()
anomal_vids_indices = (y_true == 1).nonzero().flatten()
print("normal_vids_indices:", normal_vids_indices)
print("anomal_vids_indices:", anomal_vids_indices)
normal_segments_scores = y_pred[normal_vids_indices] # (batch/2, 32, 1)
anomal_segments_scores = y_pred[anomal_vids_indices] # (batch/2, 32, 1)
print("normal_segments_scores:", normal_segments_scores)
print("anomal_segments_scores:", anomal_segments_scores)
# just for reducing the last dimension
normal_segments_scores = torch.sum(normal_segments_scores, dim=(-1,)) # (batch/2, 32)
anomal_segments_scores = torch.sum(anomal_segments_scores, dim=(-1,)) # (batch/2, 32)
# get the max score for each video
normal_segments_scores_maxes = normal_segments_scores.max(dim=-1)[0]
anomal_segments_scores_maxes = anomal_segments_scores.max(dim=-1)[0]
hinge_loss = 1 - anomal_segments_scores_maxes + normal_segments_scores_maxes
hinge_loss = torch.max(hinge_loss, torch.zeros_like(hinge_loss))
"""
Smoothness of anomalous video
"""
smoothed_scores = anomal_segments_scores[:, 1:] - anomal_segments_scores[:, :-1]
smoothed_scores_squared = smoothed_scores.pow(2)
smoothness_loss = smoothed_scores_squared.sum(dim=-1)
"""
Sparsity of anomalous video
"""
sparsity_loss = anomal_segments_scores.sum(dim=-1)
final_loss = (hinge_loss + lambdas*smoothness_loss + lambdas*sparsity_loss).mean()
return final_loss
if __name__=='__main__':
print('___Anomaly_Detector___')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_dim = 528
detector = AnomalyDetector(input_dim=input_dim).to(device)
input = torch.rand(1,input_dim).to(device)
out = detector(input)
print('out: ', out.size()) | import torch
from torch import nn
class AnomalyDetector(nn.Module):
def __init__(self, input_dim=4096):
super(AnomalyDetector, self).__init__()
self.fc1 = nn.Linear(input_dim, 128) #original was 512
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(0.6)
self.fc2 = nn.Linear(128, 32)
self.dropout2 = nn.Dropout(0.6)
self.fc3 = nn.Linear(32, 1)
self.sig = nn.Sigmoid()
# In the original keras code they use "glorot_normal"
# As I understand, this is the same as xavier normal in Pytorch
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
print('fc1 input: ', x.size())
x = self.fc1(x)
print('fc1 out: ', x.size())
x = self.relu1(x)
# x = self.dropout1(x)
# x = self.dropout1(self.relu1(self.fc1(x)))
x = self.dropout2(self.fc2(x))
x = self.sig(self.fc3(x))
return x
class RegularizedLoss(torch.nn.Module):
def __init__(self, model, original_objective, lambdas=0.001):
super(RegularizedLoss, self).__init__()
self.lambdas = lambdas
self.model = model
self.objective = original_objective
def forward(self, y_pred, y_true):
# loss
# Our loss is defined with respect to l2 regularization, as used in the original keras code
fc1_params = torch.cat(tuple([x.view(-1) for x in self.model.fc1.parameters()]))
fc2_params = torch.cat(tuple([x.view(-1) for x in self.model.fc2.parameters()]))
fc3_params = torch.cat(tuple([x.view(-1) for x in self.model.fc3.parameters()]))
l1_regularization = self.lambdas * torch.norm(fc1_params, p=2)
l2_regularization = self.lambdas * torch.norm(fc2_params, p=2)
l3_regularization = self.lambdas * torch.norm(fc3_params, p=2)
return self.objective(y_pred, y_true) + l1_regularization + l2_regularization + l3_regularization
def custom_objective(y_pred, y_true):
# print("y_true:", y_true, y_true.size())
# print("y_pred:", y_pred.size())
# y_pred (batch_size, 32, 1)
# y_true (batch_size)
lambdas = 8e-5
normal_vids_indices = (y_true == 0).nonzero().flatten()
anomal_vids_indices = (y_true == 1).nonzero().flatten()
print("normal_vids_indices:", normal_vids_indices)
print("anomal_vids_indices:", anomal_vids_indices)
normal_segments_scores = y_pred[normal_vids_indices] # (batch/2, 32, 1)
anomal_segments_scores = y_pred[anomal_vids_indices] # (batch/2, 32, 1)
print("normal_segments_scores:", normal_segments_scores)
print("anomal_segments_scores:", anomal_segments_scores)
# just for reducing the last dimension
normal_segments_scores = torch.sum(normal_segments_scores, dim=(-1,)) # (batch/2, 32)
anomal_segments_scores = torch.sum(anomal_segments_scores, dim=(-1,)) # (batch/2, 32)
# get the max score for each video
normal_segments_scores_maxes = normal_segments_scores.max(dim=-1)[0]
anomal_segments_scores_maxes = anomal_segments_scores.max(dim=-1)[0]
hinge_loss = 1 - anomal_segments_scores_maxes + normal_segments_scores_maxes
hinge_loss = torch.max(hinge_loss, torch.zeros_like(hinge_loss))
"""
Smoothness of anomalous video
"""
smoothed_scores = anomal_segments_scores[:, 1:] - anomal_segments_scores[:, :-1]
smoothed_scores_squared = smoothed_scores.pow(2)
smoothness_loss = smoothed_scores_squared.sum(dim=-1)
"""
Sparsity of anomalous video
"""
sparsity_loss = anomal_segments_scores.sum(dim=-1)
final_loss = (hinge_loss + lambdas*smoothness_loss + lambdas*sparsity_loss).mean()
return final_loss
if __name__=='__main__':
print('___Anomaly_Detector___')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_dim = 528
detector = AnomalyDetector(input_dim=input_dim).to(device)
input = torch.rand(1,input_dim).to(device)
out = detector(input)
print('out: ', out.size()) | en | 0.794814 | #original was 512 # In the original keras code they use "glorot_normal" # As I understand, this is the same as xavier normal in Pytorch # x = self.dropout1(x) # x = self.dropout1(self.relu1(self.fc1(x))) # loss # Our loss is defined with respect to l2 regularization, as used in the original keras code # print("y_true:", y_true, y_true.size()) # print("y_pred:", y_pred.size()) # y_pred (batch_size, 32, 1) # y_true (batch_size) # (batch/2, 32, 1) # (batch/2, 32, 1) # just for reducing the last dimension # (batch/2, 32) # (batch/2, 32) # get the max score for each video Smoothness of anomalous video Sparsity of anomalous video | 3.024478 | 3 |
src/sfctl/helps/cluster_upgrade.py | mrdakj/service-fabric-cli | 17 | 6625217 | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Help documentation for Service Fabric cluster upgrade commands"""
from knack.help_files import helps
helps['cluster upgrade'] = """
type: command
short-summary: Start upgrading the code or configuration version of a
Service Fabric cluster
long-summary: Validate the supplied upgrade parameters and start upgrading the code
or configuration version of a Service Fabric cluster if the parameters
are valid.
parameters:
- name: --code-version
type: string
short-summary: The cluster code version
- name: --config-version
type: string
short-summary: The cluster configuration version
- name: --rolling-upgrade-mode
type: string
short-summary: "Possible values include: 'Invalid',
'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'"
- name: --replica-set-check-timeout
type: string
short-summary: The maximum amount of time to block processing of an upgrade domain
and prevent loss of availability when there are unexpected issues.
long-summary: When this timeout expires, processing of the upgrade domain will
proceed regardless of availability loss issues.
The timeout is reset at the start of each upgrade domain.
Valid values are between 0 and 42949672925 inclusive.
- name: --force-restart
type: bool
short-summary: Processes are forcefully restarted during upgrade even when the
code version has not changed
long-summary: The upgrade only changes configuration or data
- name: --failure-action
type: string
short-summary: "Possible values include: 'Invalid', 'Rollback',
'Manual'"
- name: --health-check-wait
type: string
short-summary: The length of time to wait after completing an upgrade domain
before starting the health checks process.
- name: --health-check-stable
type: string
short-summary: The amount of time that the application or cluster must remain healthy
before the upgrade proceeds to the next upgrade domain.
long-summary: It is first interpreted as a string representing an ISO 8601 duration.
If that fails, then it is interpreted as a number representing the total number
of milliseconds.
- name: --health-check-retry
type: string
short-summary: The length of time between attempts to perform health checks if
the application or cluster is not healthy.
- name: --upgrade-timeout
type: string
short-summary: The amount of time the overall upgrade has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --upgrade-domain-timeout
type: string
short-summary: The amount of time each upgrade domain has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --warning-as-error
type: bool
short-summary: Indicates whether warnings are treated with the same severity as errors
- name: --unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of unhealthy nodes
before reporting an error
long-summary: For example, to allow 10% of nodes to be unhealthy,
this value would be 10. The percentage represents the maximum
tolerated percentage of nodes that can be unhealthy before the
cluster is considered in error. If the percentage is respected but
there is at least one unhealthy node, the health is evaluated as
Warning. The percentage is calculated by dividing the number of
unhealthy nodes over the total number of nodes in the cluster. The
computation rounds up to tolerate one failure on small numbers of
nodes. In large clusters, some nodes will always be down or out for
repairs, so this percentage should be configured to tolerate that.
- name: --unhealthy-applications
type: int
short-summary: The maximum allowed percentage of unhealthy
applications before reporting an error
long-summary: For example, to allow 10% of applications to be
unhealthy, this value would be 10. The percentage represents the
maximum tolerated percentage of applications that can be unhealthy
before the cluster is considered in error. If the percentage is
respected but there is at least one unhealthy application, the
health is evaluated as Warning. This is calculated by dividing the
number of unhealthy applications over the total number of
application instances in the cluster, excluding applications of
application types that are included in the
ApplicationTypeHealthPolicyMap. The computation rounds up to
tolerate one failure on small numbers of applications.
- name: --app-type-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application type
name and maximum percentage unhealthy before raising error
- name: --delta-health-evaluation
type: bool
short-summary: Enables delta health evaluation rather than absolute
health evaluation after completion of each upgrade domain
- name: --delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of nodes health
degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the nodes at
the beginning of upgrade and the state of the nodes at the time of
the health evaluation. The check is performed after every upgrade
domain upgrade completion to make sure the global state of the
cluster is within tolerated limits.
- name: --upgrade-domain-delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of upgrade domain nodes
health degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the
upgrade domain nodes at the beginning of upgrade and the state of
the upgrade domain nodes at the time of the health evaluation. The
check is performed after every upgrade domain upgrade completion
for all completed upgrade domains to make sure the state of the
upgrade domains is within tolerated limits.
- name: --app-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application name
and maximum percentage unhealthy before raising error
"""
helps['sa-cluster config-upgrade'] = """
type: command
short-summary: Start upgrading the configuration of a Service Fabric
standalone cluster
long-summary:
Validate the supplied configuration upgrade parameters and start
upgrading the cluster configuration if the parameters are valid.
parameters:
- name: --cluster-config
type: string
short-summary: The cluster configuration.
- name: --health-check-retry
type: string
short-summary: The length of time between attempts to perform health checks if
the application or cluster is not healthy.
- name: --health-check-wait
type: string
short-summary: The length of time to wait after completing an upgrade domain
before starting the health checks process.
- name: --health-check-stable
type: string
short-summary: The amount of time that the application or cluster must remain healthy
before the upgrade proceeds to the next upgrade domain.
long-summary: It is first interpreted as a string representing an ISO 8601 duration.
If that fails, then it is interpreted as a number representing the total number
of milliseconds.
- name: --upgrade-domain-timeout
type: string
short-summary: The amount of time each upgrade domain has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --upgrade-timeout
type: string
short-summary: The amount of time the overall upgrade has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --unhealthy-applications
type: int
short-summary: The maximum allowed percentage of unhealthy
applications during the upgrade. Allowed values are integer values
from zero to 100.
- name: --unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of unhealthy nodes
during the upgrade. Allowed values are integer values from zero
to 100.
- name: --delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of delta health
degradation during the upgrade. Allowed values are integer values
from zero to 100.
- name: --upgrade-domain-delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of upgrade domain delta
health degradation during the upgrade. Allowed values are integer
values from zero to 100.
- name: --application-health-policies
type: string
short-summary: JSON encoded dictionary of pairs of application type
name and maximum percentage unhealthy before raising error
examples:
- name: Start a cluster configuration update
text: sfctl sa-cluster config-upgrade --cluster-config <YOUR CLUSTER CONFIG> --application-health-policies "{\"fabric:/System\":{\"ConsiderWarningAsError\":true}}"
"""
helps['cluster upgrade-update'] = """
type: command
short-summary: Update the upgrade parameters of a Service Fabric cluster
upgrade
parameters:
- name: --upgrade-kind
type: string
short-summary: "Possible values include: 'Invalid', 'Rolling',
'Rolling_ForceRestart'"
- name: --rolling-upgrade-mode
type: string
short-summary: "Possible values include: 'Invalid',
'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'"
- name: --replica-set-check-timeout
type: string
short-summary: The maximum amount of time to block processing of an upgrade domain
and prevent loss of availability when there are unexpected issues.
long-summary: When this timeout expires, processing of the upgrade domain will
proceed regardless of availability loss issues.
The timeout is reset at the start of each upgrade domain.
Valid values are between 0 and 42949672925 inclusive.
- name: --force-restart
type: bool
short-summary: Processes are forcefully restarted during upgrade even when the
code version has not changed
long-summary: The upgrade only changes configuration or data
- name: --failure-action
type: string
short-summary: "Possible values include: 'Invalid', 'Rollback',
'Manual'"
- name: --health-check-wait
type: string
short-summary: The length of time to wait after completing an upgrade domain
before starting the health checks process.
- name: --health-check-stable
type: string
short-summary: The amount of time that the application or cluster must remain healthy
before the upgrade proceeds to the next upgrade domain.
long-summary: It is first interpreted as a string representing an ISO 8601 duration.
If that fails, then it is interpreted as a number representing the total number
of milliseconds.
- name: --health-check-retry
type: string
short-summary: The length of time between attempts to perform health checks if
the application or cluster is not healthy.
- name: --upgrade-timeout
type: string
short-summary: The amount of time the overall upgrade has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --upgrade-domain-timeout
type: string
short-summary: The amount of time each upgrade domain has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --warning-as-error
type: bool
short-summary: Indicates whether warnings are treated with the same severity as errors
- name: --unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of unhealthy nodes
before reporting an error
long-summary: For example, to allow 10% of nodes to be unhealthy,
this value would be 10. The percentage represents the maximum
tolerated percentage of nodes that can be unhealthy before the
cluster is considered in error. If the percentage is respected but
there is at least one unhealthy node, the health is evaluated as
Warning. The percentage is calculated by dividing the number of
unhealthy nodes over the total number of nodes in the cluster. The
computation rounds up to tolerate one failure on small numbers of
nodes. In large clusters, some nodes will always be down or out for
repairs, so this percentage should be configured to tolerate that.
- name: --unhealthy-applications
type: int
short-summary: The maximum allowed percentage of unhealthy
applications before reporting an error
long-summary: For example, to allow 10% of applications to be
unhealthy, this value would be 10. The percentage represents the
maximum tolerated percentage of applications that can be unhealthy
before the cluster is considered in error. If the percentage is
respected but there is at least one unhealthy application, the
health is evaluated as Warning. This is calculated by dividing the
number of unhealthy applications over the total number of
application instances in the cluster, excluding applications of
application types that are included in the
ApplicationTypeHealthPolicyMap. The computation rounds up to
tolerate one failure on small numbers of applications.
- name: --app-type-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application type
name and maximum percentage unhealthy before raising error
- name: --delta-health-evaluation
type: bool
short-summary: Enables delta health evaluation rather than absolute
health evaluation after completion of each upgrade domain
- name: --delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of nodes health
degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the nodes at
the beginning of upgrade and the state of the nodes at the time of
the health evaluation. The check is performed after every upgrade
domain upgrade completion to make sure the global state of the
cluster is within tolerated limits.
- name: --upgrade-domain-delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of upgrade domain nodes
health degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the
upgrade domain nodes at the beginning of upgrade and the state of
the upgrade domain nodes at the time of the health evaluation. The
check is performed after every upgrade domain upgrade completion
for all completed upgrade domains to make sure the state of the
upgrade domains is within tolerated limits.
- name: --app-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application name
and maximum percentage unhealthy before raising error
"""
| # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Help documentation for Service Fabric cluster upgrade commands"""
from knack.help_files import helps
helps['cluster upgrade'] = """
type: command
short-summary: Start upgrading the code or configuration version of a
Service Fabric cluster
long-summary: Validate the supplied upgrade parameters and start upgrading the code
or configuration version of a Service Fabric cluster if the parameters
are valid.
parameters:
- name: --code-version
type: string
short-summary: The cluster code version
- name: --config-version
type: string
short-summary: The cluster configuration version
- name: --rolling-upgrade-mode
type: string
short-summary: "Possible values include: 'Invalid',
'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'"
- name: --replica-set-check-timeout
type: string
short-summary: The maximum amount of time to block processing of an upgrade domain
and prevent loss of availability when there are unexpected issues.
long-summary: When this timeout expires, processing of the upgrade domain will
proceed regardless of availability loss issues.
The timeout is reset at the start of each upgrade domain.
Valid values are between 0 and 42949672925 inclusive.
- name: --force-restart
type: bool
short-summary: Processes are forcefully restarted during upgrade even when the
code version has not changed
long-summary: The upgrade only changes configuration or data
- name: --failure-action
type: string
short-summary: "Possible values include: 'Invalid', 'Rollback',
'Manual'"
- name: --health-check-wait
type: string
short-summary: The length of time to wait after completing an upgrade domain
before starting the health checks process.
- name: --health-check-stable
type: string
short-summary: The amount of time that the application or cluster must remain healthy
before the upgrade proceeds to the next upgrade domain.
long-summary: It is first interpreted as a string representing an ISO 8601 duration.
If that fails, then it is interpreted as a number representing the total number
of milliseconds.
- name: --health-check-retry
type: string
short-summary: The length of time between attempts to perform health checks if
the application or cluster is not healthy.
- name: --upgrade-timeout
type: string
short-summary: The amount of time the overall upgrade has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --upgrade-domain-timeout
type: string
short-summary: The amount of time each upgrade domain has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --warning-as-error
type: bool
short-summary: Indicates whether warnings are treated with the same severity as errors
- name: --unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of unhealthy nodes
before reporting an error
long-summary: For example, to allow 10% of nodes to be unhealthy,
this value would be 10. The percentage represents the maximum
tolerated percentage of nodes that can be unhealthy before the
cluster is considered in error. If the percentage is respected but
there is at least one unhealthy node, the health is evaluated as
Warning. The percentage is calculated by dividing the number of
unhealthy nodes over the total number of nodes in the cluster. The
computation rounds up to tolerate one failure on small numbers of
nodes. In large clusters, some nodes will always be down or out for
repairs, so this percentage should be configured to tolerate that.
- name: --unhealthy-applications
type: int
short-summary: The maximum allowed percentage of unhealthy
applications before reporting an error
long-summary: For example, to allow 10% of applications to be
unhealthy, this value would be 10. The percentage represents the
maximum tolerated percentage of applications that can be unhealthy
before the cluster is considered in error. If the percentage is
respected but there is at least one unhealthy application, the
health is evaluated as Warning. This is calculated by dividing the
number of unhealthy applications over the total number of
application instances in the cluster, excluding applications of
application types that are included in the
ApplicationTypeHealthPolicyMap. The computation rounds up to
tolerate one failure on small numbers of applications.
- name: --app-type-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application type
name and maximum percentage unhealthy before raising error
- name: --delta-health-evaluation
type: bool
short-summary: Enables delta health evaluation rather than absolute
health evaluation after completion of each upgrade domain
- name: --delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of nodes health
degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the nodes at
the beginning of upgrade and the state of the nodes at the time of
the health evaluation. The check is performed after every upgrade
domain upgrade completion to make sure the global state of the
cluster is within tolerated limits.
- name: --upgrade-domain-delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of upgrade domain nodes
health degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the
upgrade domain nodes at the beginning of upgrade and the state of
the upgrade domain nodes at the time of the health evaluation. The
check is performed after every upgrade domain upgrade completion
for all completed upgrade domains to make sure the state of the
upgrade domains is within tolerated limits.
- name: --app-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application name
and maximum percentage unhealthy before raising error
"""
helps['sa-cluster config-upgrade'] = """
type: command
short-summary: Start upgrading the configuration of a Service Fabric
standalone cluster
long-summary:
Validate the supplied configuration upgrade parameters and start
upgrading the cluster configuration if the parameters are valid.
parameters:
- name: --cluster-config
type: string
short-summary: The cluster configuration.
- name: --health-check-retry
type: string
short-summary: The length of time between attempts to perform health checks if
the application or cluster is not healthy.
- name: --health-check-wait
type: string
short-summary: The length of time to wait after completing an upgrade domain
before starting the health checks process.
- name: --health-check-stable
type: string
short-summary: The amount of time that the application or cluster must remain healthy
before the upgrade proceeds to the next upgrade domain.
long-summary: It is first interpreted as a string representing an ISO 8601 duration.
If that fails, then it is interpreted as a number representing the total number
of milliseconds.
- name: --upgrade-domain-timeout
type: string
short-summary: The amount of time each upgrade domain has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --upgrade-timeout
type: string
short-summary: The amount of time the overall upgrade has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --unhealthy-applications
type: int
short-summary: The maximum allowed percentage of unhealthy
applications during the upgrade. Allowed values are integer values
from zero to 100.
- name: --unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of unhealthy nodes
during the upgrade. Allowed values are integer values from zero
to 100.
- name: --delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of delta health
degradation during the upgrade. Allowed values are integer values
from zero to 100.
- name: --upgrade-domain-delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of upgrade domain delta
health degradation during the upgrade. Allowed values are integer
values from zero to 100.
- name: --application-health-policies
type: string
short-summary: JSON encoded dictionary of pairs of application type
name and maximum percentage unhealthy before raising error
examples:
- name: Start a cluster configuration update
text: sfctl sa-cluster config-upgrade --cluster-config <YOUR CLUSTER CONFIG> --application-health-policies "{\"fabric:/System\":{\"ConsiderWarningAsError\":true}}"
"""
helps['cluster upgrade-update'] = """
type: command
short-summary: Update the upgrade parameters of a Service Fabric cluster
upgrade
parameters:
- name: --upgrade-kind
type: string
short-summary: "Possible values include: 'Invalid', 'Rolling',
'Rolling_ForceRestart'"
- name: --rolling-upgrade-mode
type: string
short-summary: "Possible values include: 'Invalid',
'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'"
- name: --replica-set-check-timeout
type: string
short-summary: The maximum amount of time to block processing of an upgrade domain
and prevent loss of availability when there are unexpected issues.
long-summary: When this timeout expires, processing of the upgrade domain will
proceed regardless of availability loss issues.
The timeout is reset at the start of each upgrade domain.
Valid values are between 0 and 42949672925 inclusive.
- name: --force-restart
type: bool
short-summary: Processes are forcefully restarted during upgrade even when the
code version has not changed
long-summary: The upgrade only changes configuration or data
- name: --failure-action
type: string
short-summary: "Possible values include: 'Invalid', 'Rollback',
'Manual'"
- name: --health-check-wait
type: string
short-summary: The length of time to wait after completing an upgrade domain
before starting the health checks process.
- name: --health-check-stable
type: string
short-summary: The amount of time that the application or cluster must remain healthy
before the upgrade proceeds to the next upgrade domain.
long-summary: It is first interpreted as a string representing an ISO 8601 duration.
If that fails, then it is interpreted as a number representing the total number
of milliseconds.
- name: --health-check-retry
type: string
short-summary: The length of time between attempts to perform health checks if
the application or cluster is not healthy.
- name: --upgrade-timeout
type: string
short-summary: The amount of time the overall upgrade has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --upgrade-domain-timeout
type: string
short-summary: The amount of time each upgrade domain has to complete before
FailureAction is executed.
long-summary: It is first interpreted as a string representing an
ISO 8601 duration. If that fails, then it is interpreted as a number
representing the total number of milliseconds.
- name: --warning-as-error
type: bool
short-summary: Indicates whether warnings are treated with the same severity as errors
- name: --unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of unhealthy nodes
before reporting an error
long-summary: For example, to allow 10% of nodes to be unhealthy,
this value would be 10. The percentage represents the maximum
tolerated percentage of nodes that can be unhealthy before the
cluster is considered in error. If the percentage is respected but
there is at least one unhealthy node, the health is evaluated as
Warning. The percentage is calculated by dividing the number of
unhealthy nodes over the total number of nodes in the cluster. The
computation rounds up to tolerate one failure on small numbers of
nodes. In large clusters, some nodes will always be down or out for
repairs, so this percentage should be configured to tolerate that.
- name: --unhealthy-applications
type: int
short-summary: The maximum allowed percentage of unhealthy
applications before reporting an error
long-summary: For example, to allow 10% of applications to be
unhealthy, this value would be 10. The percentage represents the
maximum tolerated percentage of applications that can be unhealthy
before the cluster is considered in error. If the percentage is
respected but there is at least one unhealthy application, the
health is evaluated as Warning. This is calculated by dividing the
number of unhealthy applications over the total number of
application instances in the cluster, excluding applications of
application types that are included in the
ApplicationTypeHealthPolicyMap. The computation rounds up to
tolerate one failure on small numbers of applications.
- name: --app-type-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application type
name and maximum percentage unhealthy before raising error
- name: --delta-health-evaluation
type: bool
short-summary: Enables delta health evaluation rather than absolute
health evaluation after completion of each upgrade domain
- name: --delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of nodes health
degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the nodes at
the beginning of upgrade and the state of the nodes at the time of
the health evaluation. The check is performed after every upgrade
domain upgrade completion to make sure the global state of the
cluster is within tolerated limits.
- name: --upgrade-domain-delta-unhealthy-nodes
type: int
short-summary: The maximum allowed percentage of upgrade domain nodes
health degradation allowed during cluster upgrades
long-summary: The delta is measured between the state of the
upgrade domain nodes at the beginning of upgrade and the state of
the upgrade domain nodes at the time of the health evaluation. The
check is performed after every upgrade domain upgrade completion
for all completed upgrade domains to make sure the state of the
upgrade domains is within tolerated limits.
- name: --app-health-map
type: string
short-summary: JSON encoded dictionary of pairs of application name
and maximum percentage unhealthy before raising error
"""
| en | 0.854071 | # ----------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # ----------------------------------------------------------------------------- Help documentation for Service Fabric cluster upgrade commands type: command short-summary: Start upgrading the code or configuration version of a Service Fabric cluster long-summary: Validate the supplied upgrade parameters and start upgrading the code or configuration version of a Service Fabric cluster if the parameters are valid. parameters: - name: --code-version type: string short-summary: The cluster code version - name: --config-version type: string short-summary: The cluster configuration version - name: --rolling-upgrade-mode type: string short-summary: "Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'" - name: --replica-set-check-timeout type: string short-summary: The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. long-summary: When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0 and 42949672925 inclusive. - name: --force-restart type: bool short-summary: Processes are forcefully restarted during upgrade even when the code version has not changed long-summary: The upgrade only changes configuration or data - name: --failure-action type: string short-summary: "Possible values include: 'Invalid', 'Rollback', 'Manual'" - name: --health-check-wait type: string short-summary: The length of time to wait after completing an upgrade domain before starting the health checks process. - name: --health-check-stable type: string short-summary: The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --health-check-retry type: string short-summary: The length of time between attempts to perform health checks if the application or cluster is not healthy. - name: --upgrade-timeout type: string short-summary: The amount of time the overall upgrade has to complete before FailureAction is executed. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --upgrade-domain-timeout type: string short-summary: The amount of time each upgrade domain has to complete before FailureAction is executed. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --warning-as-error type: bool short-summary: Indicates whether warnings are treated with the same severity as errors - name: --unhealthy-nodes type: int short-summary: The maximum allowed percentage of unhealthy nodes before reporting an error long-summary: For example, to allow 10% of nodes to be unhealthy, this value would be 10. The percentage represents the maximum tolerated percentage of nodes that can be unhealthy before the cluster is considered in error. If the percentage is respected but there is at least one unhealthy node, the health is evaluated as Warning. The percentage is calculated by dividing the number of unhealthy nodes over the total number of nodes in the cluster. The computation rounds up to tolerate one failure on small numbers of nodes. In large clusters, some nodes will always be down or out for repairs, so this percentage should be configured to tolerate that. - name: --unhealthy-applications type: int short-summary: The maximum allowed percentage of unhealthy applications before reporting an error long-summary: For example, to allow 10% of applications to be unhealthy, this value would be 10. The percentage represents the maximum tolerated percentage of applications that can be unhealthy before the cluster is considered in error. If the percentage is respected but there is at least one unhealthy application, the health is evaluated as Warning. This is calculated by dividing the number of unhealthy applications over the total number of application instances in the cluster, excluding applications of application types that are included in the ApplicationTypeHealthPolicyMap. The computation rounds up to tolerate one failure on small numbers of applications. - name: --app-type-health-map type: string short-summary: JSON encoded dictionary of pairs of application type name and maximum percentage unhealthy before raising error - name: --delta-health-evaluation type: bool short-summary: Enables delta health evaluation rather than absolute health evaluation after completion of each upgrade domain - name: --delta-unhealthy-nodes type: int short-summary: The maximum allowed percentage of nodes health degradation allowed during cluster upgrades long-summary: The delta is measured between the state of the nodes at the beginning of upgrade and the state of the nodes at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits. - name: --upgrade-domain-delta-unhealthy-nodes type: int short-summary: The maximum allowed percentage of upgrade domain nodes health degradation allowed during cluster upgrades long-summary: The delta is measured between the state of the upgrade domain nodes at the beginning of upgrade and the state of the upgrade domain nodes at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion for all completed upgrade domains to make sure the state of the upgrade domains is within tolerated limits. - name: --app-health-map type: string short-summary: JSON encoded dictionary of pairs of application name and maximum percentage unhealthy before raising error type: command short-summary: Start upgrading the configuration of a Service Fabric standalone cluster long-summary: Validate the supplied configuration upgrade parameters and start upgrading the cluster configuration if the parameters are valid. parameters: - name: --cluster-config type: string short-summary: The cluster configuration. - name: --health-check-retry type: string short-summary: The length of time between attempts to perform health checks if the application or cluster is not healthy. - name: --health-check-wait type: string short-summary: The length of time to wait after completing an upgrade domain before starting the health checks process. - name: --health-check-stable type: string short-summary: The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --upgrade-domain-timeout type: string short-summary: The amount of time each upgrade domain has to complete before FailureAction is executed. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --upgrade-timeout type: string short-summary: The amount of time the overall upgrade has to complete before FailureAction is executed. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --unhealthy-applications type: int short-summary: The maximum allowed percentage of unhealthy applications during the upgrade. Allowed values are integer values from zero to 100. - name: --unhealthy-nodes type: int short-summary: The maximum allowed percentage of unhealthy nodes during the upgrade. Allowed values are integer values from zero to 100. - name: --delta-unhealthy-nodes type: int short-summary: The maximum allowed percentage of delta health degradation during the upgrade. Allowed values are integer values from zero to 100. - name: --upgrade-domain-delta-unhealthy-nodes type: int short-summary: The maximum allowed percentage of upgrade domain delta health degradation during the upgrade. Allowed values are integer values from zero to 100. - name: --application-health-policies type: string short-summary: JSON encoded dictionary of pairs of application type name and maximum percentage unhealthy before raising error examples: - name: Start a cluster configuration update text: sfctl sa-cluster config-upgrade --cluster-config <YOUR CLUSTER CONFIG> --application-health-policies "{\"fabric:/System\":{\"ConsiderWarningAsError\":true}}" type: command short-summary: Update the upgrade parameters of a Service Fabric cluster upgrade parameters: - name: --upgrade-kind type: string short-summary: "Possible values include: 'Invalid', 'Rolling', 'Rolling_ForceRestart'" - name: --rolling-upgrade-mode type: string short-summary: "Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'" - name: --replica-set-check-timeout type: string short-summary: The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. long-summary: When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0 and 42949672925 inclusive. - name: --force-restart type: bool short-summary: Processes are forcefully restarted during upgrade even when the code version has not changed long-summary: The upgrade only changes configuration or data - name: --failure-action type: string short-summary: "Possible values include: 'Invalid', 'Rollback', 'Manual'" - name: --health-check-wait type: string short-summary: The length of time to wait after completing an upgrade domain before starting the health checks process. - name: --health-check-stable type: string short-summary: The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --health-check-retry type: string short-summary: The length of time between attempts to perform health checks if the application or cluster is not healthy. - name: --upgrade-timeout type: string short-summary: The amount of time the overall upgrade has to complete before FailureAction is executed. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --upgrade-domain-timeout type: string short-summary: The amount of time each upgrade domain has to complete before FailureAction is executed. long-summary: It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. - name: --warning-as-error type: bool short-summary: Indicates whether warnings are treated with the same severity as errors - name: --unhealthy-nodes type: int short-summary: The maximum allowed percentage of unhealthy nodes before reporting an error long-summary: For example, to allow 10% of nodes to be unhealthy, this value would be 10. The percentage represents the maximum tolerated percentage of nodes that can be unhealthy before the cluster is considered in error. If the percentage is respected but there is at least one unhealthy node, the health is evaluated as Warning. The percentage is calculated by dividing the number of unhealthy nodes over the total number of nodes in the cluster. The computation rounds up to tolerate one failure on small numbers of nodes. In large clusters, some nodes will always be down or out for repairs, so this percentage should be configured to tolerate that. - name: --unhealthy-applications type: int short-summary: The maximum allowed percentage of unhealthy applications before reporting an error long-summary: For example, to allow 10% of applications to be unhealthy, this value would be 10. The percentage represents the maximum tolerated percentage of applications that can be unhealthy before the cluster is considered in error. If the percentage is respected but there is at least one unhealthy application, the health is evaluated as Warning. This is calculated by dividing the number of unhealthy applications over the total number of application instances in the cluster, excluding applications of application types that are included in the ApplicationTypeHealthPolicyMap. The computation rounds up to tolerate one failure on small numbers of applications. - name: --app-type-health-map type: string short-summary: JSON encoded dictionary of pairs of application type name and maximum percentage unhealthy before raising error - name: --delta-health-evaluation type: bool short-summary: Enables delta health evaluation rather than absolute health evaluation after completion of each upgrade domain - name: --delta-unhealthy-nodes type: int short-summary: The maximum allowed percentage of nodes health degradation allowed during cluster upgrades long-summary: The delta is measured between the state of the nodes at the beginning of upgrade and the state of the nodes at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits. - name: --upgrade-domain-delta-unhealthy-nodes type: int short-summary: The maximum allowed percentage of upgrade domain nodes health degradation allowed during cluster upgrades long-summary: The delta is measured between the state of the upgrade domain nodes at the beginning of upgrade and the state of the upgrade domain nodes at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion for all completed upgrade domains to make sure the state of the upgrade domains is within tolerated limits. - name: --app-health-map type: string short-summary: JSON encoded dictionary of pairs of application name and maximum percentage unhealthy before raising error | 1.520553 | 2 |
src/smach_actionlib.py | josuearaujo/sistema-alocador-para-robos | 2 | 6625218 | <filename>src/smach_actionlib.py<gh_stars>1-10
#!/usr/bin/env python
import roslib; roslib.load_manifest('smach_ros')
import rospy
import rostest
import unittest
from actionlib import *
from actionlib.msg import *
from smach import *
from smach_ros import *
from smach_msgs.msg import *
# Static goals
g1 = TestGoal(1) # This goal should succeed
g2 = TestGoal(2) # This goal should abort
g3 = TestGoal(3) # This goal should be rejected
class AssertUDState(State):
def __init__(self, keys):
State.__init__(self, outcomes=['succeeded', 'aborted'])
self._keys = keys
self.register_input_keys(keys)
def execute(self, ud):
for key in self._keys:
if key not in ud:
rospy.logerr("Key '%s' not in userdata. Available keys are: %s" % (key, ud.keys()))
return 'aborted'
return 'succeeded'
# ## Test harness
class TestActionlib(unittest.TestCase):
def test_action_client(self):
"""Test simple action states"""
sq = Sequence(['succeeded', 'aborted', 'preempted', 'foobar'], 'succeeded')
sq.userdata['g1'] = g1
sq.userdata['g2'] = g2
sq.userdata['goal'] = 1
sq.userdata['goal_alias'] = 1
with sq:
# Test single goal policy
Sequence.add('GOAL_STATIC',
SimpleActionState(
"reference_action", TestAction, goal=g1))
Sequence.add('GOAL_KEY',
SimpleActionState(
"reference_action", TestAction, goal_key='g1'))
Sequence.add('GOAL_SLOTS',
SimpleActionState(
"reference_action", TestAction, goal_slots=['goal']))
Sequence.add('GOAL_SLOTS_REMAP',
SimpleActionState(
"reference_action", TestAction, goal_slots=['goal']),
remapping={'goal':'goal_alias'})
# Test goal callback
def goal_cb_0(ud, default_goal):
return TestGoal(1)
Sequence.add('GOAL_CB',
SimpleActionState(
"reference_action", TestAction,
goal_cb=goal_cb_0))
Sequence.add('GOAL_CB_LAMBDA',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal: TestGoal(1)))
Sequence.add('GOAL_CB_UD',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal: ud.g1,
input_keys=['g1']))
@cb_interface(input_keys=['g1'])
def goal_cb_1(ud, default_goal):
return ud.g1
Sequence.add('GOAL_CB_UD_DECORATOR',
SimpleActionState(
"reference_action", TestAction,
goal_cb=goal_cb_1))
Sequence.add('GOAL_CB_ARGS',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal, g: TestGoal(g),
goal_cb_args=[1]))
Sequence.add('GOAL_CB_KWARGS',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal, gg: TestGoal(gg),
goal_cb_kwargs={'gg':1}))
Sequence.add('GOAL_CB_ARGS_KWARGS',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal, g, gg: TestGoal(g - gg),
goal_cb_args=[2],
goal_cb_kwargs={'gg':1}))
# Test overriding goal policies
Sequence.add('GOAL_STATIC_SLOTS',
SimpleActionState(
"reference_action", TestAction,
goal=g2,
goal_slots=['goal']))
Sequence.add('GOAL_STATIC_CB',
SimpleActionState(
"reference_action", TestAction,
goal=g2,
goal_cb=CBInterface(
lambda ud, goal: setattr(goal, 'goal', 1),
output_keys=['goal'])))
# Test result policies
Sequence.add('RESULT_KEY',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_key='res_key'))
Sequence.add('RESULT_KEY_CHECK', AssertUDState(['res_key']))
Sequence.add('RESULT_CB',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_cb=CBInterface(
lambda ud, res_stat, res: setattr(ud, 'res_cb', res),
output_keys=['res_cb'])))
Sequence.add('RESULT_CB_CHECK', AssertUDState(['res_cb']))
Sequence.add('RESULT_SLOTS',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_slots=['result']))
Sequence.add('RESULT_SLOTS_CHECK', AssertUDState(['result']))
Sequence.add('RESULT_SLOTS_REMAP',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_slots=['result']),
remapping={'result': 'res_alias'})
Sequence.add('RESULT_SLOTS_MAP_CHECK', AssertUDState(['res_alias']))
Sequence.add('RESULT_CB_OUTCOME',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_cb=CBInterface(
lambda ud, res_stat, res: 'foobar',
outcomes=['foobar'])))
sq_outcome = sq.execute()
assert sq_outcome == 'foobar'
def test_action_server_wrapper(self):
"""Test action server wrapper."""
sq = Sequence(['succeeded', 'aborted', 'preempted'], 'succeeded')
sq.register_input_keys(['goal', 'action_goal', 'action_result'])
sq.register_output_keys(['action_result'])
with sq:
Sequence.add('GOAL_KEY',
SimpleActionState(
"reference_action", TestAction, goal_key='action_goal'))
Sequence.add('GOAL_SLOTS',
SimpleActionState(
"reference_action", TestAction, goal_slots=['goal']))
@cb_interface(input_keys=['action_result'], output_keys=['action_result'])
def res_cb(ud, status, res):
ud.action_result.result = res.result + 1
Sequence.add('RESULT_CB',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_cb=res_cb))
asw = ActionServerWrapper(
'reference_action_sm', TestAction, sq,
succeeded_outcomes=['succeeded'],
aborted_outcomes=['aborted'],
preempted_outcomes=['preempted'],
expand_goal_slots=True)
asw.run_server()
ac = SimpleActionClient('reference_action_sm', TestAction)
ac.wait_for_server(rospy.Duration(30))
assert ac.send_goal_and_wait(g1, rospy.Duration(30)) == GoalStatus.SUCCEEDED
assert ac.get_result().result == 1
def test_action_preemption(self):
"""Test action preemption"""
sq = Sequence(['succeeded', 'aborted', 'preempted'], 'succeeded')
class SlowRunningState(State):
def __init__(self):
State.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'])
def execute(self, ud):
start_time = rospy.Time.now()
while rospy.Time.now() - start_time < rospy.Duration(10):
rospy.sleep(0.05)
if self.preempt_requested():
self.service_preempt()
return 'preempted'
return 'succeeded'
with sq:
Sequence.add('PREEMPT_ME', SlowRunningState())
asw = ActionServerWrapper(
'preempt_action_sm', TestAction, sq,
succeeded_outcomes=['succeeded'],
aborted_outcomes=['aborted'],
preempted_outcomes=['preempted'])
asw.run_server()
ac = SimpleActionClient('preempt_action_sm', TestAction)
ac.wait_for_server(rospy.Duration(30))
ac.send_goal(g1)
rospy.sleep(5.0)
ac.cancel_goal()
start_time = rospy.Time.now()
while ac.get_state() == GoalStatus.ACTIVE and rospy.Time.now() - start_time < rospy.Duration(30):
rospy.sleep(0.5)
assert ac.get_state() == GoalStatus.PREEMPTED
def test_action_client_timeout(self):
"""Test simple action state server timeout"""
sq = Sequence(['succeeded', 'aborted', 'preempted'], 'succeeded')
sq.userdata['g1'] = g1
with sq:
# Test single goal policy
Sequence.add(
'GOAL_STATIC',
SimpleActionState(
"reference_action_not_available", TestAction,
goal=g1,
server_wait_timeout=rospy.Duration(1.0)))
sq_outcome = sq.execute()
def main():
rospy.init_node('smach_actionlib', log_level=rospy.DEBUG)
rostest.rosrun('smach', 'smach_actionlib', TestActionlib)
if __name__ == "__main__":
main(); | <filename>src/smach_actionlib.py<gh_stars>1-10
#!/usr/bin/env python
import roslib; roslib.load_manifest('smach_ros')
import rospy
import rostest
import unittest
from actionlib import *
from actionlib.msg import *
from smach import *
from smach_ros import *
from smach_msgs.msg import *
# Static goals
g1 = TestGoal(1) # This goal should succeed
g2 = TestGoal(2) # This goal should abort
g3 = TestGoal(3) # This goal should be rejected
class AssertUDState(State):
def __init__(self, keys):
State.__init__(self, outcomes=['succeeded', 'aborted'])
self._keys = keys
self.register_input_keys(keys)
def execute(self, ud):
for key in self._keys:
if key not in ud:
rospy.logerr("Key '%s' not in userdata. Available keys are: %s" % (key, ud.keys()))
return 'aborted'
return 'succeeded'
# ## Test harness
class TestActionlib(unittest.TestCase):
def test_action_client(self):
"""Test simple action states"""
sq = Sequence(['succeeded', 'aborted', 'preempted', 'foobar'], 'succeeded')
sq.userdata['g1'] = g1
sq.userdata['g2'] = g2
sq.userdata['goal'] = 1
sq.userdata['goal_alias'] = 1
with sq:
# Test single goal policy
Sequence.add('GOAL_STATIC',
SimpleActionState(
"reference_action", TestAction, goal=g1))
Sequence.add('GOAL_KEY',
SimpleActionState(
"reference_action", TestAction, goal_key='g1'))
Sequence.add('GOAL_SLOTS',
SimpleActionState(
"reference_action", TestAction, goal_slots=['goal']))
Sequence.add('GOAL_SLOTS_REMAP',
SimpleActionState(
"reference_action", TestAction, goal_slots=['goal']),
remapping={'goal':'goal_alias'})
# Test goal callback
def goal_cb_0(ud, default_goal):
return TestGoal(1)
Sequence.add('GOAL_CB',
SimpleActionState(
"reference_action", TestAction,
goal_cb=goal_cb_0))
Sequence.add('GOAL_CB_LAMBDA',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal: TestGoal(1)))
Sequence.add('GOAL_CB_UD',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal: ud.g1,
input_keys=['g1']))
@cb_interface(input_keys=['g1'])
def goal_cb_1(ud, default_goal):
return ud.g1
Sequence.add('GOAL_CB_UD_DECORATOR',
SimpleActionState(
"reference_action", TestAction,
goal_cb=goal_cb_1))
Sequence.add('GOAL_CB_ARGS',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal, g: TestGoal(g),
goal_cb_args=[1]))
Sequence.add('GOAL_CB_KWARGS',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal, gg: TestGoal(gg),
goal_cb_kwargs={'gg':1}))
Sequence.add('GOAL_CB_ARGS_KWARGS',
SimpleActionState(
"reference_action", TestAction,
goal_cb=lambda ud, goal, g, gg: TestGoal(g - gg),
goal_cb_args=[2],
goal_cb_kwargs={'gg':1}))
# Test overriding goal policies
Sequence.add('GOAL_STATIC_SLOTS',
SimpleActionState(
"reference_action", TestAction,
goal=g2,
goal_slots=['goal']))
Sequence.add('GOAL_STATIC_CB',
SimpleActionState(
"reference_action", TestAction,
goal=g2,
goal_cb=CBInterface(
lambda ud, goal: setattr(goal, 'goal', 1),
output_keys=['goal'])))
# Test result policies
Sequence.add('RESULT_KEY',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_key='res_key'))
Sequence.add('RESULT_KEY_CHECK', AssertUDState(['res_key']))
Sequence.add('RESULT_CB',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_cb=CBInterface(
lambda ud, res_stat, res: setattr(ud, 'res_cb', res),
output_keys=['res_cb'])))
Sequence.add('RESULT_CB_CHECK', AssertUDState(['res_cb']))
Sequence.add('RESULT_SLOTS',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_slots=['result']))
Sequence.add('RESULT_SLOTS_CHECK', AssertUDState(['result']))
Sequence.add('RESULT_SLOTS_REMAP',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_slots=['result']),
remapping={'result': 'res_alias'})
Sequence.add('RESULT_SLOTS_MAP_CHECK', AssertUDState(['res_alias']))
Sequence.add('RESULT_CB_OUTCOME',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_cb=CBInterface(
lambda ud, res_stat, res: 'foobar',
outcomes=['foobar'])))
sq_outcome = sq.execute()
assert sq_outcome == 'foobar'
def test_action_server_wrapper(self):
"""Test action server wrapper."""
sq = Sequence(['succeeded', 'aborted', 'preempted'], 'succeeded')
sq.register_input_keys(['goal', 'action_goal', 'action_result'])
sq.register_output_keys(['action_result'])
with sq:
Sequence.add('GOAL_KEY',
SimpleActionState(
"reference_action", TestAction, goal_key='action_goal'))
Sequence.add('GOAL_SLOTS',
SimpleActionState(
"reference_action", TestAction, goal_slots=['goal']))
@cb_interface(input_keys=['action_result'], output_keys=['action_result'])
def res_cb(ud, status, res):
ud.action_result.result = res.result + 1
Sequence.add('RESULT_CB',
SimpleActionState(
"reference_action", TestAction,
goal=g1,
result_cb=res_cb))
asw = ActionServerWrapper(
'reference_action_sm', TestAction, sq,
succeeded_outcomes=['succeeded'],
aborted_outcomes=['aborted'],
preempted_outcomes=['preempted'],
expand_goal_slots=True)
asw.run_server()
ac = SimpleActionClient('reference_action_sm', TestAction)
ac.wait_for_server(rospy.Duration(30))
assert ac.send_goal_and_wait(g1, rospy.Duration(30)) == GoalStatus.SUCCEEDED
assert ac.get_result().result == 1
def test_action_preemption(self):
"""Test action preemption"""
sq = Sequence(['succeeded', 'aborted', 'preempted'], 'succeeded')
class SlowRunningState(State):
def __init__(self):
State.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'])
def execute(self, ud):
start_time = rospy.Time.now()
while rospy.Time.now() - start_time < rospy.Duration(10):
rospy.sleep(0.05)
if self.preempt_requested():
self.service_preempt()
return 'preempted'
return 'succeeded'
with sq:
Sequence.add('PREEMPT_ME', SlowRunningState())
asw = ActionServerWrapper(
'preempt_action_sm', TestAction, sq,
succeeded_outcomes=['succeeded'],
aborted_outcomes=['aborted'],
preempted_outcomes=['preempted'])
asw.run_server()
ac = SimpleActionClient('preempt_action_sm', TestAction)
ac.wait_for_server(rospy.Duration(30))
ac.send_goal(g1)
rospy.sleep(5.0)
ac.cancel_goal()
start_time = rospy.Time.now()
while ac.get_state() == GoalStatus.ACTIVE and rospy.Time.now() - start_time < rospy.Duration(30):
rospy.sleep(0.5)
assert ac.get_state() == GoalStatus.PREEMPTED
def test_action_client_timeout(self):
"""Test simple action state server timeout"""
sq = Sequence(['succeeded', 'aborted', 'preempted'], 'succeeded')
sq.userdata['g1'] = g1
with sq:
# Test single goal policy
Sequence.add(
'GOAL_STATIC',
SimpleActionState(
"reference_action_not_available", TestAction,
goal=g1,
server_wait_timeout=rospy.Duration(1.0)))
sq_outcome = sq.execute()
def main():
rospy.init_node('smach_actionlib', log_level=rospy.DEBUG)
rostest.rosrun('smach', 'smach_actionlib', TestActionlib)
if __name__ == "__main__":
main(); | en | 0.687828 | #!/usr/bin/env python # Static goals # This goal should succeed # This goal should abort # This goal should be rejected # ## Test harness Test simple action states # Test single goal policy # Test goal callback # Test overriding goal policies # Test result policies Test action server wrapper. Test action preemption Test simple action state server timeout # Test single goal policy | 2.037288 | 2 |
upcloud_api/cloud_manager/ip_address_mixin.py | akx/upcloud-python-api | 0 | 6625219 | <filename>upcloud_api/cloud_manager/ip_address_mixin.py
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six
from upcloud_api import IPAddress
class IPManager(object):
"""
Functions for managing IP-addresses. Intended to be used as a mixin for CloudManager.
"""
def get_ip(self, address):
"""
Get an IPAddress object with the IP address (string) from the API.
e.g manager.get_ip('192.168.3.11')
"""
res = self.get_request('/ip_address/' + address)
return IPAddress(cloud_manager=self, **res['ip_address'])
def get_ips(self, ignore_ips_without_server=False):
"""
Get all IPAddress objects from the API.
"""
res = self.get_request('/ip_address')
IPs = IPAddress._create_ip_address_objs(res['ip_addresses'], self, ignore_ips_without_server)
return IPs
def attach_ip(self, server, family='IPv4'):
"""
Attach a new (random) IPAddress to the given server (object or UUID).
"""
body = {
'ip_address': {
'server': str(server),
'family': family
}
}
res = self.post_request('/ip_address', body)
return IPAddress(cloud_manager=self, **res['ip_address'])
def modify_ip(self, ip_addr, ptr_record):
"""
Modify an IP address' ptr-record (Reverse DNS).
Accepts an IPAddress instance (object) or its address (string).
"""
body = {
'ip_address': {
'ptr_record': ptr_record
}
}
res = self.put_request('/ip_address/' + str(ip_addr), body)
return IPAddress(cloud_manager=self, **res['ip_address'])
def release_ip(self, ip_addr):
"""
Destroy an IPAddress. Returns an empty object.
Accepts an IPAddress instance (object) or its address (string).
"""
return self.delete_request('/ip_address/' + str(ip_addr))
| <filename>upcloud_api/cloud_manager/ip_address_mixin.py
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six
from upcloud_api import IPAddress
class IPManager(object):
"""
Functions for managing IP-addresses. Intended to be used as a mixin for CloudManager.
"""
def get_ip(self, address):
"""
Get an IPAddress object with the IP address (string) from the API.
e.g manager.get_ip('192.168.3.11')
"""
res = self.get_request('/ip_address/' + address)
return IPAddress(cloud_manager=self, **res['ip_address'])
def get_ips(self, ignore_ips_without_server=False):
"""
Get all IPAddress objects from the API.
"""
res = self.get_request('/ip_address')
IPs = IPAddress._create_ip_address_objs(res['ip_addresses'], self, ignore_ips_without_server)
return IPs
def attach_ip(self, server, family='IPv4'):
"""
Attach a new (random) IPAddress to the given server (object or UUID).
"""
body = {
'ip_address': {
'server': str(server),
'family': family
}
}
res = self.post_request('/ip_address', body)
return IPAddress(cloud_manager=self, **res['ip_address'])
def modify_ip(self, ip_addr, ptr_record):
"""
Modify an IP address' ptr-record (Reverse DNS).
Accepts an IPAddress instance (object) or its address (string).
"""
body = {
'ip_address': {
'ptr_record': ptr_record
}
}
res = self.put_request('/ip_address/' + str(ip_addr), body)
return IPAddress(cloud_manager=self, **res['ip_address'])
def release_ip(self, ip_addr):
"""
Destroy an IPAddress. Returns an empty object.
Accepts an IPAddress instance (object) or its address (string).
"""
return self.delete_request('/ip_address/' + str(ip_addr))
| en | 0.790552 | Functions for managing IP-addresses. Intended to be used as a mixin for CloudManager. Get an IPAddress object with the IP address (string) from the API. e.g manager.get_ip('192.168.3.11') Get all IPAddress objects from the API. Attach a new (random) IPAddress to the given server (object or UUID). Modify an IP address' ptr-record (Reverse DNS). Accepts an IPAddress instance (object) or its address (string). Destroy an IPAddress. Returns an empty object. Accepts an IPAddress instance (object) or its address (string). | 2.720425 | 3 |
python/rgz.py | willettk/rgz-analysis | 3 | 6625220 | <reponame>willettk/rgz-analysis<gh_stars>1-10
# import necessary python packages
import numpy as np
import pandas as pd
import datetime
import os
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from collections import Counter
from matplotlib import pyplot as plt
from pymongo import MongoClient
from astropy.io import fits
from astropy import wcs
from scipy import stats
from scipy import optimize
from scipy.linalg.basic import LinAlgError
#from astropy import coordinates as coord
#from astropy.io import votable
#------------------------------------------------------------------------------------------------------------
# Setup path locations
plot_dir = '../plots'
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
csv_dir = '../csv'
ann_dir = '../annfiles'
if not os.path.isdir(ann_dir):
os.mkdir(ann_dir)
dat_dir = '../datfiles'
if not os.path.isdir(dat_dir):
os.mkdir(dat_dir)
# Set constants
beta_release_date = datetime.datetime(2013, 10, 20, 12, 0, 0, 0) # date of beta release (YYY,MM,DD,HH,MM,SS,MS)
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT = 424.0 # number of pixels in the JPG image along the y axis
IMG_WIDTH = 424.0 # number of pixels in the JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image along the x axis
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT
ymin = 1.
ymax = IMG_WIDTH
xjpg2fits = float(IMG_WIDTH/FITS_WIDTH) # map the JPG pixels to the FITS pixels in x
yjpg2fits = float(IMG_HEIGHT/FITS_HEIGHT) # map the JPG pixels to the FITS pixels in y
def getWCSObj(subject):
# Determine the WCS object based on RGZ subject
src = subject["metadata"]["source"]
path = "./IMGS/%s.fits" % src
hdulist = fits.open(path)
w = wcs.WCS(hdulist[0].header)
return w
def plot_npeaks():
# Read in data
with open('%s/npeaks_ir.csv' % csv_dir,'rb') as f:
npeaks = [int(line.rstrip()) for line in f]
# Plot the distribution of the total number of IR sources per image
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
h = plt.hist(npeaks,bins=np.arange(np.max(npeaks)+1),axes=ax1)
ax1.set_title('RGZ source distribution')
ax1.set_xlabel('Number of IR peaks per image')
ax1.set_ylabel('Count')
fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/ir_peaks_histogram.png' % plot_dir)
return None
def powerlaw_fit(xdata,ydata,epsilon=1e-3,pinit=[3.0,-1.0]):
logx = np.log10(xdata+1)
logy = np.log10(ydata)
logyerr = 1./np.sqrt(logy+epsilon)
# Line fitting function
fitfunc = lambda p,x: p[0] + p[1]*x
errfunc = lambda p,x,y,err: (y - fitfunc(p,x)) / err
out = optimize.leastsq(errfunc,pinit,args=(logx,logy,logyerr),full_output=1)
pfinal,covar = out[0],out[1]
amp,index = 10.0**pfinal[0],pfinal[1]
if covar is not None:
amperr,indexerr = np.sqrt(covar[1][1])*amp,np.sqrt(covar[0][0])
else:
amperr,indexerr = 0.,0.
return amp,amperr,index,indexerr
def plot_empirical_distribution_function(dfc):
# Plot the empirical distribution function (eg, how many users contribute to the total amount of work)
# for the RGZ data
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
volunteers = pd.value_counts(dfc.user_name)
# Calculate number of anonymous users and include in data
anonymous_count = dfc._id.count() - dfc.user_name.count()
volunteers = volunteers.set_value("anonymous", anonymous_count)
volunteers.sort(ascending=False)
vnorm = volunteers/volunteers.sum()
cdf = []
running_total = 0.
for v in vnorm:
running_total += v
cdf.append(running_total)
ax1.plot(np.arange(len(volunteers))+1,cdf)
#ax1.set_title('Empirical distribution of work in RGZ')
ax1.set_xlabel('Number of volunteers',fontsize=18)
ax1.set_ylabel('Percent of total classifications',fontsize=18)
ax1.set_xscale('log')
ax1.set_ylim(0,1)
varr = (100,1000)
lsarr = ('--','-.')
for v,ls in zip(varr,lsarr):
ax1.plot([1,v],[cdf[v]]*2,'k'+ls)
ax1.plot([v]*2,[0,cdf[v]],'k'+ls)
ax1.text(1.3,cdf[0],'Anonymous users',ha='left',fontsize=12)
#ax1.text(100,cdf[100]*1.1,'Anon. + 100',ha='right',va='baseline',fontsize=8)
#ax1.text(1000,cdf[1000]*1.1,'Anon. + 1000',ha='right',va='bottom',fontsize=8)
'''
ax1.text(0.95,0.30,'Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.95,0.25,'The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.95,0.20,'The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes)
'''
print('Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.))
print('The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.))
print('The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.))
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/distribution_of_work.png' % plot_dir)
fig.savefig('/Users/willettk/Dropbox/RGZ/fig4.eps')
return None
def plot_zipf(dfc):
# This can (and should) absolutely be re-factored to use the example in zipf.py. Way too slow
# Plotting user classifications in a more specific way as requested by <NAME>,
# to see if it corresponds to Zipf's Law or Lotka's Law
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
# Note: does not include anonymous users
volunteers = pd.value_counts(dfc.user_name)
volunteers.sort(ascending=False)
xpoints = pd.Series(volunteers.values.ravel()).unique()
ypoints = [(volunteers >= x).sum() for x in xpoints]
ypoints = np.array(ypoints)
ax1.loglog(xpoints,ypoints,'ro')
# Fitting results to broken power law
brk = -50
xdata1 = xpoints[brk:]
ydata1 = ypoints[brk:]
amp1,amperr1,index1,indexerr1 = powerlaw_fit(xdata1,ydata1)
xdata2 = xpoints[:brk]
ydata2 = ypoints[:brk]
amp2,amperr2,index2,indexerr2 = powerlaw_fit(xdata2,ydata2)
print 'Fit 1: index = %5.2f, amp = %5.2f' % (index1,amp1)
print 'Fit 2: index = %5.2f, amp = %5.2f' % (index2,amp2)
# Overplot the fits
xplot = np.arange(xpoints.max() - 1)+1
ax1.plot(xplot,amp1 * (xplot**index1),'k--')
ax1.plot(xplot,amp2 * (xplot**index2),'k--')
ax1.text(0.98,0.9,r'$\alpha_1 =$ %4.1f $\pm$ %3.1f' % (index1,indexerr1),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.98,0.8,r'$\alpha_2 =$ %4.1f $\pm$ %3.1f' % (index2,indexerr2),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.set_title("Zipf's Law in Radio Galaxy Zoo?")
ax1.set_xlabel('Number of classifications')
ax1.set_ylabel('Number of volunteers with '+r'$\geq N$'+' classifications')
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/zipf_plot.png' % plot_dir)
return None
def plot_user_counts(dfc):
# Plot the total number of classifications per volunteer in the data
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(211)
volunteers = pd.value_counts(dfc.user_name)
# Calculate number of anonymous users and include in data
anonymous_count = dfc._id.count() - dfc.user_name.count()
volunteers = volunteers.set_value("anonymous", anonymous_count)
volunteers.sort(ascending=False)
vcplot = volunteers.plot(ax=ax1,use_index=True,marker='.',color='red')
# Fitting results to broken power law
brk = 1000
xdata1 = np.arange(brk)
ydata1 = volunteers[:brk]
amp1,amperr1,index1,indexerr1 = powerlaw_fit(xdata1,ydata1)
xdata2 = np.arange(len(volunteers)-brk) + brk
ydata2 = volunteers[brk:]
amp2,amperr2,index2,indexerr2 = powerlaw_fit(xdata2,ydata2)
# Overplot the fits
xplot = np.arange(len(volunteers))
ax1.plot(xplot,amp1 * (xplot**index1),'k--')
ax1.plot(xplot,amp2 * (xplot**index2),'k--')
ax1.text(0.98,0.9,r'$\alpha_1 =$ %4.1f $\pm$ %3.1f' % (index1,indexerr1),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.98,0.8,r'$\alpha_2 =$ %4.1f $\pm$ %3.1f' % (index2,indexerr2),ha='right',fontsize=12,transform=ax1.transAxes)
vcplot.set_title('RGZ volunteer distribution')
vcplot.set_xlabel('Volunteer')
vcplot.set_ylabel('Number of classifications')
vcplot.set_ylim((1,1e5))
vcplot.set_xscale('log')
vcplot.set_yscale('log')
ax2 = fig.add_subplot(212)
vchist = volunteers[1:].hist(ax=ax2,bins=50,bottom=0.1)
vchist.set_ylabel('Classifications per volunteer')
vchist.set_xlabel('Number of classifications')
vchist.set_yscale('log')
ax2.text(0.95,0.9,'Also %i anonymous classifications' % volunteers[0],ha='right',fontsize=12,transform=ax2.transAxes)
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/classifications_per_user.png' % plot_dir)
return None
def plot_classification_counts(dfs):
# Plot the total number of classifications per subject in the data
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
# Eliminate N=0 counts and tutorial image
dfs_good = dfs[(dfs.classification_count < 50) & (dfs.classification_count > 0)]
h = dfs_good.classification_count.hist(ax=ax1,bins=50,grid=False)
h.set_xlabel('Classifications per subject')
h.set_ylabel('Number of classifications')
n_nonzero = (dfs.classification_count > 0).sum()
xlim = h.get_xlim()
ylim = h.get_ylim()
h.text(0.7*xlim[1],0.9*ylim[1],r'$N_{non-zero} = %i$' % n_nonzero,fontsize=20)
fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/classifications_per_subject.png' % plot_dir)
return None
def find_ir_peak(x,y,srcid):
# Perform a kernel density estimate on the data:
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
#neighborhood = generate_binary_structure(2,2)
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max - eroded_background
npeaks = detected_peaks.sum()
return X,Y,Z,npeaks
def plot_image(x,y,srcid,zid,X,Y,Z,npeaks,all_radio,radio_unique):
# Find the peak
xpeak = X[Z==Z.max()][0]
ypeak = Y[Z==Z.max()][0]
# Plot the infrared results
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot the KDE map
ax.imshow(np.rot90(Z), cmap=plt.cm.hot_r,extent=[xmin, xmax, ymin, ymax])
# Plot the individual sources
ax.plot(x, y, 'go', markersize=4)
ax.text(270,40,r'IR peak: $(%i,%i)$'%(xpeak,ypeak),color='k',fontsize=14)
ax.text(270,70,r'$N_{peaks}$ = %i' % npeaks,color='k',fontsize=14)
ax.text(270,100,r'$N_{IR}$ = %i' % len(x),color='k',fontsize=14)
ax.plot([xpeak],[ypeak],'c*',markersize=12)
# Plot the radio counts
radio_flattened = [item for sublist in all_radio for item in sublist]
uniques = set(radio_flattened)
d = dict(zip(uniques,np.arange(len(uniques))))
c = Counter(all_radio)
for idx,ckeys in enumerate(c.keys()):
if len(ckeys) > 1:
t = ' and R'.join([str(d[x]) for x in ckeys])
else:
t = d[ckeys[0]]
singular = 's' if c[ckeys] != 1 else ''
ax.text(150,400-idx*20,'%3i vote%s: R%s' % (c[ckeys],singular,t))
# Rectangle showing the radio box size
radio_ir_scaling_factor = 435./132
box_counts = Counter(radio_flattened)
for ru in radio_unique:
x0,x1,y0,y1 = [float(ru_) * radio_ir_scaling_factor for ru_ in ru]
# Assume xmax matching is still good
xmax_index = '%.6f' % float(ru[1])
component_number = d[xmax_index]
number_votes = box_counts[xmax_index]
rectangle = plt.Rectangle((x0,y0), x1-x0, y1-y0, fill=False, linewidth=number_votes/5., edgecolor = 'c')
ax.add_patch(rectangle)
ax.text(x0-15,y0-15,'R%s' % component_number)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymax, ymin])
ax.set_title('%s\n%s' % (zid,srcid))
#fig.show()
# Save hard copy of the figure
fig.savefig('%s/ir_peaks/%s_ir_peak.png' % (plot_dir,srcid))
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def find_consensus(sub,classifications,verbose=False,completed_only=False):
Nclass = sub["classification_count"] # number of classifications made per image
srcid = sub["metadata"]["source"] # determine the image source id
zid = sub["zooniverse_id"] # determine the image source id
'''
if completed_only:
dat_dir = '../datfiles/completed_20'
'''
classfile2 = open('%s/RGZBETA2-%s-classifications.txt' % (dat_dir,srcid), 'w')
imgid = sub["_id"] # grab the ObjectId corresponding for this image
# locate all the classifications of this image by user
user_classifications = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": main_release_date}})
# count the number of users who classified this object
Nusers = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}).count()
# loop over the number of classifications
if Nclass == Nusers: # the number of classifications should equal the number of users who classified
# initialise coordinate variables
radio_ra = []
radio_dec = []
radio_x = []
radio_y = []
radio_w = []
radio_h = []
ir_ra = []
ir_dec = []
ir_radius = []
ir_x = []
ir_y = []
radio_comp = []
ir_comp = []
all_radio = []
all_radio_markings = []
Nuser_id = 0 # User id number
#---------------------------------------------------------------------------------------------------------------------
#---START: loop through the users who classified the image
for classification in list(user_classifications):
compid = 0 # Component id per image
rclass = classification["annotations"] # For now, analyze only the first set of continuous regions selected.
# Note that last two fields in annotations are timestamp and user_agent
Nuser_id += 1 # Increase the number of users who classified by 1.
#-------------------------------------------------------------------------------------------------------------------
#---START: loop through the keys in the annotation array, making sure that a classification has been made
for ann in rclass:
if ann.has_key('started_at') or ann.has_key('finished_at') or ann.has_key('user_agent') or ann.has_key('lang'):
continue
Nradio = 0 # counter for the number of radio components per classification
Nir = 0 # counter for the number of IR components per classification
if (ann.has_key('radio') and ann['radio'] != 'No Contours'): # get the radio annotations
radio = ann["radio"]
Nradio = len(radio) # count the number of radio components per classification
'''
print 'RADIO:'
print radio
'''
compid += 1 # we have a radio source - all components will be id with this number
list_radio = []
#---------------------------------------------------------------------------------------------------------------
#---STAR: loop through number of radio components in user classification
for rr in radio:
radio_marking = radio[rr]
# Find the location and size of the radio box in pixels
list_radio.append('%.6f' % float(radio_marking['xmax']))
all_radio_markings.append(radio_marking)
print >> classfile2, Nuser_id, compid,'RADIO', radio_marking['xmin'], radio_marking['xmax'], radio_marking['ymin'], radio_marking['ymax']
all_radio.append(tuple(sorted(list_radio)))
#---END: loop through number of radio components in user classification
#---------------------------------------------------------------------------------------------------------------
# get IR counterpart
irkey = ann.has_key('ir')
ir_nosources = True if (irkey and ann['ir'] == 'No Sources') else False
if (irkey and not ir_nosources): # get the infrared annotation for the radio classification.
ir = ann["ir"]
Nir = 1 #len(ir) # number of IR counterparts.
'''
print 'IR:'
print ir
'''
#exit()
#jj = 0
for ii in ir:
ir_marking = ir[ii]
# write to annotation file
print >> classfile2, Nuser_id, compid, 'IR', float(ir_marking['x']), float(ir_marking['y'])
ir_x.append(float(ir_marking['x']))
ir_y.append(float(ir_marking['y']))
else: # user did not classify an infrared source
Nir = 0
xir = -99.
yir = -99.
radiusir = -99.
print >> classfile2, Nuser_id, compid, 'IR', xir, yir
else: # user did not classify a radio source
Nradio = 0
Nir = 0
# there should always be a radio source, bug in program if we reach this part.
if not ann.has_key('radio'):
print >> classfile2,'%i No radio source - error in processing on image %s' % (Nuser_id, srcid)
elif ann['radio'] == 'No Contours':
print >> classfile2,'%i No radio source labeled by user for image %s' % (Nuser_id,srcid)
else:
print >> classfile2,'Unknown error processing radio source'
radio_comp.append( Nradio ) # add the number of radio components per user source to array.
ir_comp.append( Nir ) # add the number of IR counterparts per user soruce to array.
#---END: loop through the users who classified the image
#---------------------------------------------------------------------------------------------------------------------
else: # Nclass != Nusers
print 'Number of users who classified subject (%i) does not equal classification count (%i).' % (Nusers,Nclass)
# Process the radio markings into unique components
rlist = [(rr['xmin'],rr['xmax'],rr['ymin'],rr['ymax']) for rr in all_radio_markings]
if len(all_radio_markings) > 1:
radio_unique = [rlist[0]]
for rr in rlist[1:]:
if rr not in radio_unique:
radio_unique.append(rr)
# Use a 2-D Gaussian kernel to find the center of the IR sources and plot the analysis images
if len(ir_x) > 2:
try:
xpeak,ypeak,Z,npeaks = find_ir_peak(ir_x,ir_y,srcid)
plot_image(ir_x,ir_y,srcid,zid,xpeak,ypeak,Z,npeaks,all_radio,radio_unique)
except LinAlgError:
npeaks = len(ir_x)
print 'LinAlgError - only %i non-unique IR peaks labeled for %s' % (npeaks,srcid)
else:
npeaks = len(ir_x)
print 'Only %i IR peaks labeled for %s' % (npeaks,srcid)
# calculate the median number of components for both IR and radio for each object in image.
radio_med = np.median(radio_comp) # median number of radio components
Ncomp_radio = np.size(np.where(radio_comp == radio_med)) # number of classifications = median number
ir_med = np.median(ir_comp) # median number of infrared components
Ncomp_ir = np.size(np.where(ir_comp == ir_med)) # number of classifications = median number
if verbose:
print ' '
print 'Source.....................................................................................: %s' % srcid
print 'Number of users who classified the object..................................................: %d' % len(radio_comp)
print '................'
print 'Number of users who classified the radio source with the median value of radio components..: %d' % Ncomp_radio
print 'Median number of radio components per user.................................................: %f' % radio_med
print 'Number of users who classified the IR source with the median value of IR components........: %d' % Ncomp_ir
print 'Median number of IR components per user....................................................: %f' % ir_med
print ' '
classfile2.close()
return npeaks
def load_rgz_data():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
return subjects,classifications
def load_catalog():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
catalog = db['catalog']
return catalog
def overall_stats(subjects,classifications,verbose=True):
# Retrieve RGZ data, convert into data frames
batch_classifications = classifications.find({"updated_at": {"$gt": main_release_date}})
batch_subjects = subjects.find()
dfc = pd.DataFrame( list(batch_classifications) )
dfs = pd.DataFrame( list(batch_subjects) )
# Get some quick statistics on the dataset so far
n_subjects = subjects.count() # determine the number of images in the data set
n_classifications = classifications.find({"updated_at": {"$gt": main_release_date}}).count() # total number of classifications
users = classifications.distinct('user_name')
n_users = len(users)
# Find the most recent classification in this data dump
mrc = classifications.find().sort([("updated_at", -1)]).limit(1)
most_recent_date = [x for x in mrc][0]['updated_at']
# Find number of anonymous classifications
total_count = dfc._id.count()
loggedin_count = dfc.user_name.count()
anonymous_count = total_count - loggedin_count
anonymous_percent = float(anonymous_count)/total_count * 100
if verbose:
print ' '
print 'RGZ data as of %s' % most_recent_date.strftime("%H:%M:%S%Z %b %d, %Y")
print '---------------------------------'
print 'Total classifications : %i' % n_classifications
print 'Total distinct subjects : %i' % n_subjects
print 'Total distinct users : %i' % n_users
print ' '
print 'Percent of classifications by anonymous users: %.1f (%i,%i)' % (anonymous_percent,anonymous_count,loggedin_count)
print ' '
# Make some plots
plot_user_counts(dfc)
plot_classification_counts(dfs)
return None
def run_sample(subjects,classifications,n_subjects=1000,completed=False):
N = 0
if completed:
suffix = '_completed'
class_lim = {'state':'complete'}
else:
suffix = ''
class_lim = {'classification_count':{'$gt':0}}
# Look at just the newly retired ones (single-contour, 5 classifications)
# suffix = '_radio1'
# class_lim = {'state':'complete','metadata.contour_count':1,'classification_count':5}
with open('%s/npeaks_ir%s.csv' % (csv_dir,suffix),'wb') as f:
for sub in list(subjects.find(class_lim).limit(n_subjects)):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image (shouldn't happen)
npeak = find_consensus(sub,classifications,completed_only=completed)
print >> f, npeak
N += 1
# Check progress by printing to screen every 100 classifications
if not N % 100:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def onemillion(classifications,users):
# DEPRECATED
# Does not work with new sanitized RGZ dumps (starting Feb 2016)
'''
Discrepancy between the API count and the number of classifications in MongoDB.
For example, on 14 Jan 2015, the counts were:
API = 997,395
MongoDB = 1,036,501
Consulting with Ivy and <NAME>., we decided to go with the count on the API. So the correct classification for the
1 millionth ID for RGZ will be the 100000 + (Mongo - API) = 1,039,106th entry sorted by date in MongoDB.
First data dump that got to this was 15 Jan 2015, which had 1,040,566 documents in radio_classifications.
'''
# Limit the number of records to pull from this data dump.
ntot = classifications.count()
onemillionth = 1039106
diff1M = ntot - onemillionth
# Return the classifications surrounding 1 million
classifications_sorted = classifications.find().sort([("updated_at",-1)]).limit(diff1M)
lc = list(classifications_sorted)
lc.reverse()
names = set()
nu = 0
for idx,c in enumerate(lc):
idx1M = idx + 1000000
try:
username = c['user_name']
if username not in names:
names.add(username)
usr = users.find_one({'name':username})
email = usr['email']
# How many classifications have they done? Are these our "power" users?
nclass = classifications.find({'user_name':username}).count()
print 'Classification: %7i, Prize order: %2i, Date: %s, N_class = %5i, Username: %20s, Email: %s ' % (idx1M, nu+1, c['updated_at'], nclass, username, email)
nu += 1
except KeyError:
username = "Anonymous"
if nu >= 10:
break
return None
# If program is called from the command line, process the full dataset
if __name__ == '__main__':
subjects,classifications = load_rgz_data()
run_sample(subjects,classifications)
plot_npeaks()
| # import necessary python packages
import numpy as np
import pandas as pd
import datetime
import os
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from collections import Counter
from matplotlib import pyplot as plt
from pymongo import MongoClient
from astropy.io import fits
from astropy import wcs
from scipy import stats
from scipy import optimize
from scipy.linalg.basic import LinAlgError
#from astropy import coordinates as coord
#from astropy.io import votable
#------------------------------------------------------------------------------------------------------------
# Setup path locations
plot_dir = '../plots'
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
csv_dir = '../csv'
ann_dir = '../annfiles'
if not os.path.isdir(ann_dir):
os.mkdir(ann_dir)
dat_dir = '../datfiles'
if not os.path.isdir(dat_dir):
os.mkdir(dat_dir)
# Set constants
beta_release_date = datetime.datetime(2013, 10, 20, 12, 0, 0, 0) # date of beta release (YYY,MM,DD,HH,MM,SS,MS)
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT = 424.0 # number of pixels in the JPG image along the y axis
IMG_WIDTH = 424.0 # number of pixels in the JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image along the x axis
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT
ymin = 1.
ymax = IMG_WIDTH
xjpg2fits = float(IMG_WIDTH/FITS_WIDTH) # map the JPG pixels to the FITS pixels in x
yjpg2fits = float(IMG_HEIGHT/FITS_HEIGHT) # map the JPG pixels to the FITS pixels in y
def getWCSObj(subject):
# Determine the WCS object based on RGZ subject
src = subject["metadata"]["source"]
path = "./IMGS/%s.fits" % src
hdulist = fits.open(path)
w = wcs.WCS(hdulist[0].header)
return w
def plot_npeaks():
# Read in data
with open('%s/npeaks_ir.csv' % csv_dir,'rb') as f:
npeaks = [int(line.rstrip()) for line in f]
# Plot the distribution of the total number of IR sources per image
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
h = plt.hist(npeaks,bins=np.arange(np.max(npeaks)+1),axes=ax1)
ax1.set_title('RGZ source distribution')
ax1.set_xlabel('Number of IR peaks per image')
ax1.set_ylabel('Count')
fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/ir_peaks_histogram.png' % plot_dir)
return None
def powerlaw_fit(xdata,ydata,epsilon=1e-3,pinit=[3.0,-1.0]):
logx = np.log10(xdata+1)
logy = np.log10(ydata)
logyerr = 1./np.sqrt(logy+epsilon)
# Line fitting function
fitfunc = lambda p,x: p[0] + p[1]*x
errfunc = lambda p,x,y,err: (y - fitfunc(p,x)) / err
out = optimize.leastsq(errfunc,pinit,args=(logx,logy,logyerr),full_output=1)
pfinal,covar = out[0],out[1]
amp,index = 10.0**pfinal[0],pfinal[1]
if covar is not None:
amperr,indexerr = np.sqrt(covar[1][1])*amp,np.sqrt(covar[0][0])
else:
amperr,indexerr = 0.,0.
return amp,amperr,index,indexerr
def plot_empirical_distribution_function(dfc):
# Plot the empirical distribution function (eg, how many users contribute to the total amount of work)
# for the RGZ data
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
volunteers = pd.value_counts(dfc.user_name)
# Calculate number of anonymous users and include in data
anonymous_count = dfc._id.count() - dfc.user_name.count()
volunteers = volunteers.set_value("anonymous", anonymous_count)
volunteers.sort(ascending=False)
vnorm = volunteers/volunteers.sum()
cdf = []
running_total = 0.
for v in vnorm:
running_total += v
cdf.append(running_total)
ax1.plot(np.arange(len(volunteers))+1,cdf)
#ax1.set_title('Empirical distribution of work in RGZ')
ax1.set_xlabel('Number of volunteers',fontsize=18)
ax1.set_ylabel('Percent of total classifications',fontsize=18)
ax1.set_xscale('log')
ax1.set_ylim(0,1)
varr = (100,1000)
lsarr = ('--','-.')
for v,ls in zip(varr,lsarr):
ax1.plot([1,v],[cdf[v]]*2,'k'+ls)
ax1.plot([v]*2,[0,cdf[v]],'k'+ls)
ax1.text(1.3,cdf[0],'Anonymous users',ha='left',fontsize=12)
#ax1.text(100,cdf[100]*1.1,'Anon. + 100',ha='right',va='baseline',fontsize=8)
#ax1.text(1000,cdf[1000]*1.1,'Anon. + 1000',ha='right',va='bottom',fontsize=8)
'''
ax1.text(0.95,0.30,'Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.95,0.25,'The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.95,0.20,'The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes)
'''
print('Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.))
print('The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.))
print('The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.))
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/distribution_of_work.png' % plot_dir)
fig.savefig('/Users/willettk/Dropbox/RGZ/fig4.eps')
return None
def plot_zipf(dfc):
# This can (and should) absolutely be re-factored to use the example in zipf.py. Way too slow
# Plotting user classifications in a more specific way as requested by <NAME>,
# to see if it corresponds to Zipf's Law or Lotka's Law
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
# Note: does not include anonymous users
volunteers = pd.value_counts(dfc.user_name)
volunteers.sort(ascending=False)
xpoints = pd.Series(volunteers.values.ravel()).unique()
ypoints = [(volunteers >= x).sum() for x in xpoints]
ypoints = np.array(ypoints)
ax1.loglog(xpoints,ypoints,'ro')
# Fitting results to broken power law
brk = -50
xdata1 = xpoints[brk:]
ydata1 = ypoints[brk:]
amp1,amperr1,index1,indexerr1 = powerlaw_fit(xdata1,ydata1)
xdata2 = xpoints[:brk]
ydata2 = ypoints[:brk]
amp2,amperr2,index2,indexerr2 = powerlaw_fit(xdata2,ydata2)
print 'Fit 1: index = %5.2f, amp = %5.2f' % (index1,amp1)
print 'Fit 2: index = %5.2f, amp = %5.2f' % (index2,amp2)
# Overplot the fits
xplot = np.arange(xpoints.max() - 1)+1
ax1.plot(xplot,amp1 * (xplot**index1),'k--')
ax1.plot(xplot,amp2 * (xplot**index2),'k--')
ax1.text(0.98,0.9,r'$\alpha_1 =$ %4.1f $\pm$ %3.1f' % (index1,indexerr1),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.98,0.8,r'$\alpha_2 =$ %4.1f $\pm$ %3.1f' % (index2,indexerr2),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.set_title("Zipf's Law in Radio Galaxy Zoo?")
ax1.set_xlabel('Number of classifications')
ax1.set_ylabel('Number of volunteers with '+r'$\geq N$'+' classifications')
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/zipf_plot.png' % plot_dir)
return None
def plot_user_counts(dfc):
# Plot the total number of classifications per volunteer in the data
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(211)
volunteers = pd.value_counts(dfc.user_name)
# Calculate number of anonymous users and include in data
anonymous_count = dfc._id.count() - dfc.user_name.count()
volunteers = volunteers.set_value("anonymous", anonymous_count)
volunteers.sort(ascending=False)
vcplot = volunteers.plot(ax=ax1,use_index=True,marker='.',color='red')
# Fitting results to broken power law
brk = 1000
xdata1 = np.arange(brk)
ydata1 = volunteers[:brk]
amp1,amperr1,index1,indexerr1 = powerlaw_fit(xdata1,ydata1)
xdata2 = np.arange(len(volunteers)-brk) + brk
ydata2 = volunteers[brk:]
amp2,amperr2,index2,indexerr2 = powerlaw_fit(xdata2,ydata2)
# Overplot the fits
xplot = np.arange(len(volunteers))
ax1.plot(xplot,amp1 * (xplot**index1),'k--')
ax1.plot(xplot,amp2 * (xplot**index2),'k--')
ax1.text(0.98,0.9,r'$\alpha_1 =$ %4.1f $\pm$ %3.1f' % (index1,indexerr1),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.98,0.8,r'$\alpha_2 =$ %4.1f $\pm$ %3.1f' % (index2,indexerr2),ha='right',fontsize=12,transform=ax1.transAxes)
vcplot.set_title('RGZ volunteer distribution')
vcplot.set_xlabel('Volunteer')
vcplot.set_ylabel('Number of classifications')
vcplot.set_ylim((1,1e5))
vcplot.set_xscale('log')
vcplot.set_yscale('log')
ax2 = fig.add_subplot(212)
vchist = volunteers[1:].hist(ax=ax2,bins=50,bottom=0.1)
vchist.set_ylabel('Classifications per volunteer')
vchist.set_xlabel('Number of classifications')
vchist.set_yscale('log')
ax2.text(0.95,0.9,'Also %i anonymous classifications' % volunteers[0],ha='right',fontsize=12,transform=ax2.transAxes)
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/classifications_per_user.png' % plot_dir)
return None
def plot_classification_counts(dfs):
# Plot the total number of classifications per subject in the data
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
# Eliminate N=0 counts and tutorial image
dfs_good = dfs[(dfs.classification_count < 50) & (dfs.classification_count > 0)]
h = dfs_good.classification_count.hist(ax=ax1,bins=50,grid=False)
h.set_xlabel('Classifications per subject')
h.set_ylabel('Number of classifications')
n_nonzero = (dfs.classification_count > 0).sum()
xlim = h.get_xlim()
ylim = h.get_ylim()
h.text(0.7*xlim[1],0.9*ylim[1],r'$N_{non-zero} = %i$' % n_nonzero,fontsize=20)
fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/classifications_per_subject.png' % plot_dir)
return None
def find_ir_peak(x,y,srcid):
# Perform a kernel density estimate on the data:
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
#neighborhood = generate_binary_structure(2,2)
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max - eroded_background
npeaks = detected_peaks.sum()
return X,Y,Z,npeaks
def plot_image(x,y,srcid,zid,X,Y,Z,npeaks,all_radio,radio_unique):
# Find the peak
xpeak = X[Z==Z.max()][0]
ypeak = Y[Z==Z.max()][0]
# Plot the infrared results
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot the KDE map
ax.imshow(np.rot90(Z), cmap=plt.cm.hot_r,extent=[xmin, xmax, ymin, ymax])
# Plot the individual sources
ax.plot(x, y, 'go', markersize=4)
ax.text(270,40,r'IR peak: $(%i,%i)$'%(xpeak,ypeak),color='k',fontsize=14)
ax.text(270,70,r'$N_{peaks}$ = %i' % npeaks,color='k',fontsize=14)
ax.text(270,100,r'$N_{IR}$ = %i' % len(x),color='k',fontsize=14)
ax.plot([xpeak],[ypeak],'c*',markersize=12)
# Plot the radio counts
radio_flattened = [item for sublist in all_radio for item in sublist]
uniques = set(radio_flattened)
d = dict(zip(uniques,np.arange(len(uniques))))
c = Counter(all_radio)
for idx,ckeys in enumerate(c.keys()):
if len(ckeys) > 1:
t = ' and R'.join([str(d[x]) for x in ckeys])
else:
t = d[ckeys[0]]
singular = 's' if c[ckeys] != 1 else ''
ax.text(150,400-idx*20,'%3i vote%s: R%s' % (c[ckeys],singular,t))
# Rectangle showing the radio box size
radio_ir_scaling_factor = 435./132
box_counts = Counter(radio_flattened)
for ru in radio_unique:
x0,x1,y0,y1 = [float(ru_) * radio_ir_scaling_factor for ru_ in ru]
# Assume xmax matching is still good
xmax_index = '%.6f' % float(ru[1])
component_number = d[xmax_index]
number_votes = box_counts[xmax_index]
rectangle = plt.Rectangle((x0,y0), x1-x0, y1-y0, fill=False, linewidth=number_votes/5., edgecolor = 'c')
ax.add_patch(rectangle)
ax.text(x0-15,y0-15,'R%s' % component_number)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymax, ymin])
ax.set_title('%s\n%s' % (zid,srcid))
#fig.show()
# Save hard copy of the figure
fig.savefig('%s/ir_peaks/%s_ir_peak.png' % (plot_dir,srcid))
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def find_consensus(sub,classifications,verbose=False,completed_only=False):
Nclass = sub["classification_count"] # number of classifications made per image
srcid = sub["metadata"]["source"] # determine the image source id
zid = sub["zooniverse_id"] # determine the image source id
'''
if completed_only:
dat_dir = '../datfiles/completed_20'
'''
classfile2 = open('%s/RGZBETA2-%s-classifications.txt' % (dat_dir,srcid), 'w')
imgid = sub["_id"] # grab the ObjectId corresponding for this image
# locate all the classifications of this image by user
user_classifications = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": main_release_date}})
# count the number of users who classified this object
Nusers = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}).count()
# loop over the number of classifications
if Nclass == Nusers: # the number of classifications should equal the number of users who classified
# initialise coordinate variables
radio_ra = []
radio_dec = []
radio_x = []
radio_y = []
radio_w = []
radio_h = []
ir_ra = []
ir_dec = []
ir_radius = []
ir_x = []
ir_y = []
radio_comp = []
ir_comp = []
all_radio = []
all_radio_markings = []
Nuser_id = 0 # User id number
#---------------------------------------------------------------------------------------------------------------------
#---START: loop through the users who classified the image
for classification in list(user_classifications):
compid = 0 # Component id per image
rclass = classification["annotations"] # For now, analyze only the first set of continuous regions selected.
# Note that last two fields in annotations are timestamp and user_agent
Nuser_id += 1 # Increase the number of users who classified by 1.
#-------------------------------------------------------------------------------------------------------------------
#---START: loop through the keys in the annotation array, making sure that a classification has been made
for ann in rclass:
if ann.has_key('started_at') or ann.has_key('finished_at') or ann.has_key('user_agent') or ann.has_key('lang'):
continue
Nradio = 0 # counter for the number of radio components per classification
Nir = 0 # counter for the number of IR components per classification
if (ann.has_key('radio') and ann['radio'] != 'No Contours'): # get the radio annotations
radio = ann["radio"]
Nradio = len(radio) # count the number of radio components per classification
'''
print 'RADIO:'
print radio
'''
compid += 1 # we have a radio source - all components will be id with this number
list_radio = []
#---------------------------------------------------------------------------------------------------------------
#---STAR: loop through number of radio components in user classification
for rr in radio:
radio_marking = radio[rr]
# Find the location and size of the radio box in pixels
list_radio.append('%.6f' % float(radio_marking['xmax']))
all_radio_markings.append(radio_marking)
print >> classfile2, Nuser_id, compid,'RADIO', radio_marking['xmin'], radio_marking['xmax'], radio_marking['ymin'], radio_marking['ymax']
all_radio.append(tuple(sorted(list_radio)))
#---END: loop through number of radio components in user classification
#---------------------------------------------------------------------------------------------------------------
# get IR counterpart
irkey = ann.has_key('ir')
ir_nosources = True if (irkey and ann['ir'] == 'No Sources') else False
if (irkey and not ir_nosources): # get the infrared annotation for the radio classification.
ir = ann["ir"]
Nir = 1 #len(ir) # number of IR counterparts.
'''
print 'IR:'
print ir
'''
#exit()
#jj = 0
for ii in ir:
ir_marking = ir[ii]
# write to annotation file
print >> classfile2, Nuser_id, compid, 'IR', float(ir_marking['x']), float(ir_marking['y'])
ir_x.append(float(ir_marking['x']))
ir_y.append(float(ir_marking['y']))
else: # user did not classify an infrared source
Nir = 0
xir = -99.
yir = -99.
radiusir = -99.
print >> classfile2, Nuser_id, compid, 'IR', xir, yir
else: # user did not classify a radio source
Nradio = 0
Nir = 0
# there should always be a radio source, bug in program if we reach this part.
if not ann.has_key('radio'):
print >> classfile2,'%i No radio source - error in processing on image %s' % (Nuser_id, srcid)
elif ann['radio'] == 'No Contours':
print >> classfile2,'%i No radio source labeled by user for image %s' % (Nuser_id,srcid)
else:
print >> classfile2,'Unknown error processing radio source'
radio_comp.append( Nradio ) # add the number of radio components per user source to array.
ir_comp.append( Nir ) # add the number of IR counterparts per user soruce to array.
#---END: loop through the users who classified the image
#---------------------------------------------------------------------------------------------------------------------
else: # Nclass != Nusers
print 'Number of users who classified subject (%i) does not equal classification count (%i).' % (Nusers,Nclass)
# Process the radio markings into unique components
rlist = [(rr['xmin'],rr['xmax'],rr['ymin'],rr['ymax']) for rr in all_radio_markings]
if len(all_radio_markings) > 1:
radio_unique = [rlist[0]]
for rr in rlist[1:]:
if rr not in radio_unique:
radio_unique.append(rr)
# Use a 2-D Gaussian kernel to find the center of the IR sources and plot the analysis images
if len(ir_x) > 2:
try:
xpeak,ypeak,Z,npeaks = find_ir_peak(ir_x,ir_y,srcid)
plot_image(ir_x,ir_y,srcid,zid,xpeak,ypeak,Z,npeaks,all_radio,radio_unique)
except LinAlgError:
npeaks = len(ir_x)
print 'LinAlgError - only %i non-unique IR peaks labeled for %s' % (npeaks,srcid)
else:
npeaks = len(ir_x)
print 'Only %i IR peaks labeled for %s' % (npeaks,srcid)
# calculate the median number of components for both IR and radio for each object in image.
radio_med = np.median(radio_comp) # median number of radio components
Ncomp_radio = np.size(np.where(radio_comp == radio_med)) # number of classifications = median number
ir_med = np.median(ir_comp) # median number of infrared components
Ncomp_ir = np.size(np.where(ir_comp == ir_med)) # number of classifications = median number
if verbose:
print ' '
print 'Source.....................................................................................: %s' % srcid
print 'Number of users who classified the object..................................................: %d' % len(radio_comp)
print '................'
print 'Number of users who classified the radio source with the median value of radio components..: %d' % Ncomp_radio
print 'Median number of radio components per user.................................................: %f' % radio_med
print 'Number of users who classified the IR source with the median value of IR components........: %d' % Ncomp_ir
print 'Median number of IR components per user....................................................: %f' % ir_med
print ' '
classfile2.close()
return npeaks
def load_rgz_data():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
return subjects,classifications
def load_catalog():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
catalog = db['catalog']
return catalog
def overall_stats(subjects,classifications,verbose=True):
# Retrieve RGZ data, convert into data frames
batch_classifications = classifications.find({"updated_at": {"$gt": main_release_date}})
batch_subjects = subjects.find()
dfc = pd.DataFrame( list(batch_classifications) )
dfs = pd.DataFrame( list(batch_subjects) )
# Get some quick statistics on the dataset so far
n_subjects = subjects.count() # determine the number of images in the data set
n_classifications = classifications.find({"updated_at": {"$gt": main_release_date}}).count() # total number of classifications
users = classifications.distinct('user_name')
n_users = len(users)
# Find the most recent classification in this data dump
mrc = classifications.find().sort([("updated_at", -1)]).limit(1)
most_recent_date = [x for x in mrc][0]['updated_at']
# Find number of anonymous classifications
total_count = dfc._id.count()
loggedin_count = dfc.user_name.count()
anonymous_count = total_count - loggedin_count
anonymous_percent = float(anonymous_count)/total_count * 100
if verbose:
print ' '
print 'RGZ data as of %s' % most_recent_date.strftime("%H:%M:%S%Z %b %d, %Y")
print '---------------------------------'
print 'Total classifications : %i' % n_classifications
print 'Total distinct subjects : %i' % n_subjects
print 'Total distinct users : %i' % n_users
print ' '
print 'Percent of classifications by anonymous users: %.1f (%i,%i)' % (anonymous_percent,anonymous_count,loggedin_count)
print ' '
# Make some plots
plot_user_counts(dfc)
plot_classification_counts(dfs)
return None
def run_sample(subjects,classifications,n_subjects=1000,completed=False):
N = 0
if completed:
suffix = '_completed'
class_lim = {'state':'complete'}
else:
suffix = ''
class_lim = {'classification_count':{'$gt':0}}
# Look at just the newly retired ones (single-contour, 5 classifications)
# suffix = '_radio1'
# class_lim = {'state':'complete','metadata.contour_count':1,'classification_count':5}
with open('%s/npeaks_ir%s.csv' % (csv_dir,suffix),'wb') as f:
for sub in list(subjects.find(class_lim).limit(n_subjects)):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image (shouldn't happen)
npeak = find_consensus(sub,classifications,completed_only=completed)
print >> f, npeak
N += 1
# Check progress by printing to screen every 100 classifications
if not N % 100:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def onemillion(classifications,users):
# DEPRECATED
# Does not work with new sanitized RGZ dumps (starting Feb 2016)
'''
Discrepancy between the API count and the number of classifications in MongoDB.
For example, on 14 Jan 2015, the counts were:
API = 997,395
MongoDB = 1,036,501
Consulting with Ivy and <NAME>., we decided to go with the count on the API. So the correct classification for the
1 millionth ID for RGZ will be the 100000 + (Mongo - API) = 1,039,106th entry sorted by date in MongoDB.
First data dump that got to this was 15 Jan 2015, which had 1,040,566 documents in radio_classifications.
'''
# Limit the number of records to pull from this data dump.
ntot = classifications.count()
onemillionth = 1039106
diff1M = ntot - onemillionth
# Return the classifications surrounding 1 million
classifications_sorted = classifications.find().sort([("updated_at",-1)]).limit(diff1M)
lc = list(classifications_sorted)
lc.reverse()
names = set()
nu = 0
for idx,c in enumerate(lc):
idx1M = idx + 1000000
try:
username = c['user_name']
if username not in names:
names.add(username)
usr = users.find_one({'name':username})
email = usr['email']
# How many classifications have they done? Are these our "power" users?
nclass = classifications.find({'user_name':username}).count()
print 'Classification: %7i, Prize order: %2i, Date: %s, N_class = %5i, Username: %20s, Email: %s ' % (idx1M, nu+1, c['updated_at'], nclass, username, email)
nu += 1
except KeyError:
username = "Anonymous"
if nu >= 10:
break
return None
# If program is called from the command line, process the full dataset
if __name__ == '__main__':
subjects,classifications = load_rgz_data()
run_sample(subjects,classifications)
plot_npeaks() | en | 0.734163 | # import necessary python packages #from astropy import coordinates as coord #from astropy.io import votable #------------------------------------------------------------------------------------------------------------ # Setup path locations # Set constants # date of beta release (YYY,MM,DD,HH,MM,SS,MS) # number of pixels in the JPG image along the y axis # number of pixels in the JPG image along the x axis # number of pixels in the FITS image along the y axis # number of pixels in the FITS image along the x axis #/3600.0 # the number of arcseconds per pixel in the FITS image # map the JPG pixels to the FITS pixels in x # map the JPG pixels to the FITS pixels in y # Determine the WCS object based on RGZ subject # Read in data # Plot the distribution of the total number of IR sources per image # Save hard copy of the figure # Line fitting function # Plot the empirical distribution function (eg, how many users contribute to the total amount of work) # for the RGZ data # Calculate number of anonymous users and include in data #ax1.set_title('Empirical distribution of work in RGZ') #ax1.text(100,cdf[100]*1.1,'Anon. + 100',ha='right',va='baseline',fontsize=8) #ax1.text(1000,cdf[1000]*1.1,'Anon. + 1000',ha='right',va='bottom',fontsize=8) ax1.text(0.95,0.30,'Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.),ha='right',fontsize=12,transform=ax1.transAxes) ax1.text(0.95,0.25,'The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes) ax1.text(0.95,0.20,'The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes) # Save hard copy of the figure # This can (and should) absolutely be re-factored to use the example in zipf.py. Way too slow # Plotting user classifications in a more specific way as requested by <NAME>, # to see if it corresponds to Zipf's Law or Lotka's Law # Note: does not include anonymous users # Fitting results to broken power law # Overplot the fits # Save hard copy of the figure # Plot the total number of classifications per volunteer in the data # Calculate number of anonymous users and include in data # Fitting results to broken power law # Overplot the fits # Save hard copy of the figure # Plot the total number of classifications per subject in the data # Eliminate N=0 counts and tutorial image # Save hard copy of the figure # Perform a kernel density estimate on the data: # Find the number of peaks # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array #neighborhood = generate_binary_structure(2,2) # Find the peak # Plot the infrared results # Plot the KDE map # Plot the individual sources # Plot the radio counts # Rectangle showing the radio box size # Assume xmax matching is still good #fig.show() # Save hard copy of the figure # Close figure after it's done; otherwise mpl complains about having thousands of stuff open # number of classifications made per image # determine the image source id # determine the image source id if completed_only: dat_dir = '../datfiles/completed_20' # grab the ObjectId corresponding for this image # locate all the classifications of this image by user # count the number of users who classified this object # loop over the number of classifications # the number of classifications should equal the number of users who classified # initialise coordinate variables # User id number #--------------------------------------------------------------------------------------------------------------------- #---START: loop through the users who classified the image # Component id per image # For now, analyze only the first set of continuous regions selected. # Note that last two fields in annotations are timestamp and user_agent # Increase the number of users who classified by 1. #------------------------------------------------------------------------------------------------------------------- #---START: loop through the keys in the annotation array, making sure that a classification has been made # counter for the number of radio components per classification # counter for the number of IR components per classification # get the radio annotations # count the number of radio components per classification print 'RADIO:' print radio # we have a radio source - all components will be id with this number #--------------------------------------------------------------------------------------------------------------- #---STAR: loop through number of radio components in user classification # Find the location and size of the radio box in pixels #---END: loop through number of radio components in user classification #--------------------------------------------------------------------------------------------------------------- # get IR counterpart # get the infrared annotation for the radio classification. #len(ir) # number of IR counterparts. print 'IR:' print ir #exit() #jj = 0 # write to annotation file # user did not classify an infrared source # user did not classify a radio source # there should always be a radio source, bug in program if we reach this part. # add the number of radio components per user source to array. # add the number of IR counterparts per user soruce to array. #---END: loop through the users who classified the image #--------------------------------------------------------------------------------------------------------------------- # Nclass != Nusers # Process the radio markings into unique components # Use a 2-D Gaussian kernel to find the center of the IR sources and plot the analysis images # calculate the median number of components for both IR and radio for each object in image. # median number of radio components # number of classifications = median number # median number of infrared components # number of classifications = median number # Connect to Mongo database # Make sure to run mongorestore /path/to/database to restore the updated files # mongod client must be running locally # subjects = images # classifications = classifications of each subject per user # Connect to Mongo database # Make sure to run mongorestore /path/to/database to restore the updated files # mongod client must be running locally # Retrieve RGZ data, convert into data frames # Get some quick statistics on the dataset so far # determine the number of images in the data set # total number of classifications # Find the most recent classification in this data dump # Find number of anonymous classifications # Make some plots # Look at just the newly retired ones (single-contour, 5 classifications) # suffix = '_radio1' # class_lim = {'state':'complete','metadata.contour_count':1,'classification_count':5} # number of classifications made per image # if no classifications move to next image (shouldn't happen) # Check progress by printing to screen every 100 classifications # DEPRECATED # Does not work with new sanitized RGZ dumps (starting Feb 2016) Discrepancy between the API count and the number of classifications in MongoDB. For example, on 14 Jan 2015, the counts were: API = 997,395 MongoDB = 1,036,501 Consulting with Ivy and <NAME>., we decided to go with the count on the API. So the correct classification for the 1 millionth ID for RGZ will be the 100000 + (Mongo - API) = 1,039,106th entry sorted by date in MongoDB. First data dump that got to this was 15 Jan 2015, which had 1,040,566 documents in radio_classifications. # Limit the number of records to pull from this data dump. # Return the classifications surrounding 1 million # How many classifications have they done? Are these our "power" users? # If program is called from the command line, process the full dataset | 2.144907 | 2 |
libs/mediafile.py | magne4000/festival | 14 | 6625221 | # This file is part of beets.
# Copyright 2015, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
Internally ``MediaFile`` uses ``MediaField`` descriptors to access the
data from the tags. In turn ``MediaField`` uses a number of
``StorageStyle`` strategies to handle format specific logic.
"""
import mutagen
import datetime
import re
import base64
import math
import struct
import imghdr
import os
import traceback
import enum
import logging
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
log = logging.getLogger('beets')
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'aac': 'AAC',
'alac': 'ALAC',
'ogg': 'OGG',
'opus': 'Opus',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
'asf': 'Windows Media',
'aiff': 'AIFF',
}
# Exceptions.
class UnreadableFileError(Exception):
"""Mutagen is not able to extract information from the file.
"""
def __init__(self, path):
Exception.__init__(self, path)
class FileTypeError(UnreadableFileError):
"""Reading this type of file is not supported.
If passed the `mutagen_type` argument this indicates that the
mutagen type is not supported by `Mediafile`.
"""
def __init__(self, path, mutagen_type=None):
if mutagen_type is None:
msg = path
else:
msg = '{0}: of mutagen type {1}'.format(path, mutagen_type)
Exception.__init__(self, msg)
class MutagenError(UnreadableFileError):
"""Raised when Mutagen fails unexpectedly---probably due to a bug.
"""
def __init__(self, path, mutagen_exc):
msg = '{0}: {1}'.format(path, mutagen_exc)
Exception.__init__(self, msg)
# Utility.
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or str; otherwise, the
value is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, str):
val = str(val)
# Get a number from the front of the string.
val = re.match(r'[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == str:
if isinstance(val, bytes):
return val.decode('utf8', 'ignore')
elif isinstance(val, str):
return val
else:
return str(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if not isinstance(val, str):
val = str(val)
match = re.match(r'[\+-]?[0-9\.]+', val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val
# Image coding for ASF/WMA.
def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from(b'<bi', data)
pos = 5
mime = ""
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = ""
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le"))
def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data
# iTunes Sound Check encoding.
def _sc_decode(soundcheck):
"""Convert a Sound Check string value to a (gain, peak) tuple as
used by ReplayGain.
"""
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = soundcheck.replace(' ', '').decode('hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = min(round((10 ** (gain / -10)) * 1000), 65534)
# Same as above, except our reference level is 2500 units.
g2 = min(round((10 ** (gain / -10)) * 2500), 65534)
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, peak, peak, uk, uk)
return (' %08X' * 10) % values
# Cover art and other images.
def _image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
kind = imghdr.what(None, h=data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind)
class ImageType(enum.Enum):
"""Indicates the kind of an `Image` stored in a file's tag.
"""
other = 0
icon = 1
other_icon = 2
front = 3
back = 4
leaflet = 5
media = 6
lead_artist = 7
artist = 8
conductor = 9
group = 10
composer = 11
lyricist = 12
recording_location = 13
recording_session = 14
performance = 15
screen_capture = 16
fish = 17
illustration = 18
artist_logo = 19
publisher_logo = 20
class Image(object):
"""Structure representing image data and metadata that can be
stored and retrieved from tags.
The structure has four properties.
* ``data`` The binary data of the image
* ``desc`` An optional description of the image
* ``type`` An instance of `ImageType` indicating the kind of image
* ``mime_type`` Read-only property that contains the mime type of
the binary data
"""
def __init__(self, data, desc=None, type=None):
self.data = data
self.desc = desc
if isinstance(type, int):
try:
type = list(ImageType)[type]
except IndexError:
log.debug(u"ignoring unknown image type index {0}", type)
type = ImageType.other
self.type = type
@property
def mime_type(self):
if self.data:
return _image_mime_type(self.data)
@property
def type_index(self):
if self.type is None:
# This method is used when a tag format requires the type
# index to be set, so we return "other" as the default value.
return 0
return self.type.value
# StorageStyle classes describe strategies for accessing values in
# Mutagen file objects.
class StorageStyle(object):
"""A strategy for storing a value for a certain tag format (or set
of tag formats). This basic StorageStyle describes simple 1:1
mapping from raw values to keys in a Mutagen file object; subclasses
describe more sophisticated translations or format-specific access
strategies.
MediaFile uses a StorageStyle via three methods: ``get()``,
``set()``, and ``delete()``. It passes a Mutagen file object to
each.
Internally, the StorageStyle implements ``get()`` and ``set()``
using two steps that may be overridden by subtypes. To get a value,
the StorageStyle first calls ``fetch()`` to retrieve the value
corresponding to a key and then ``deserialize()`` to convert the raw
Mutagen value to a consumable Python value. Similarly, to set a
field, we call ``serialize()`` to encode the value and then
``store()`` to assign the result into the Mutagen object.
Each StorageStyle type has a class-level `formats` attribute that is
a list of strings indicating the formats that the style applies to.
MediaFile only uses StorageStyles that apply to the correct type for
a given audio file.
"""
formats = ['FLAC', 'OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac', 'APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio']
"""List of mutagen classes the StorageStyle can handle.
"""
def __init__(self, key, as_type=str, suffix=None, float_places=2):
"""Create a basic storage strategy. Parameters:
- `key`: The key on the Mutagen file object used to access the
field's data.
- `as_type`: The Python type that the value is stored as
internally (`str`, `int`, `bool`, or `bytes`).
- `suffix`: When `as_type` is a string type, append this before
storing the value.
- `float_places`: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
decimal point.
"""
self.key = key
self.as_type = as_type
self.suffix = suffix
self.float_places = float_places
# Convert suffix to correct string type.
if self.suffix and self.as_type is str \
and not isinstance(self.suffix, str):
self.suffix = self.suffix.decode('utf8')
# Getter.
def get(self, mutagen_file):
"""Get the value for the field using this style.
"""
return self.deserialize(self.fetch(mutagen_file))
def fetch(self, mutagen_file):
"""Retrieve the raw value of for this tag from the Mutagen file
object.
"""
try:
return mutagen_file[self.key][0]
except (KeyError, IndexError):
return None
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, str) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value
# Setter.
def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value))
def store(self, mutagen_file, value):
"""Store a serialized value in the Mutagen file object.
"""
mutagen_file[self.key] = [value]
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is str:
value = '{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is str:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = str(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
else:
value = str(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value
def delete(self, mutagen_file):
"""Remove the tag from the file.
"""
if self.key in mutagen_file:
del mutagen_file[self.key]
class ListStorageStyle(StorageStyle):
"""Abstract storage style that provides access to lists.
The ListMediaField descriptor uses a ListStorageStyle via two
methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file
object to each.
Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must
return a (possibly empty) list and ``store`` receives a serialized
list of values as the second argument.
The `serialize` and `deserialize` methods (from the base
`StorageStyle`) are still called with individual values. This class
handles packing and unpacking the values into lists.
"""
def get(self, mutagen_file):
"""Get the first value in the field's value list.
"""
try:
return self.get_list(mutagen_file)[0]
except IndexError:
return None
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
def fetch(self, mutagen_file):
"""Get the list of raw (serialized) values.
"""
try:
return mutagen_file[self.key]
except KeyError:
return []
def set(self, mutagen_file, value):
"""Set an individual value as the only value for the field using
this style.
"""
self.set_list(mutagen_file, [value])
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values])
def store(self, mutagen_file, values):
"""Set the list of all raw (serialized) values for this field.
"""
mutagen_file[self.key] = values
class SoundCheckStorageStyleMixin(object):
"""A mixin for storage styles that read and write iTunes SoundCheck
analysis values. The object must have an `index` field that
indicates which half of the gain/peak pair---0 or 1---the field
represents.
"""
def get(self, mutagen_file):
data = self.fetch(mutagen_file)
if data is not None:
return _sc_decode(data)[self.index]
def set(self, mutagen_file, value):
data = self.fetch(mutagen_file)
if data is None:
gain_peak = [0, 0]
else:
gain_peak = list(_sc_decode(data))
gain_peak[self.index] = value or 0
data = self.serialize(_sc_encode(*gain_peak))
self.store(mutagen_file, data)
class ASFStorageStyle(ListStorageStyle):
"""A general storage style for Windows Media/ASF files.
"""
formats = ['ASF']
def deserialize(self, data):
if isinstance(data, mutagen.asf.ASFBaseAttribute):
data = data.value
return data
class MP4StorageStyle(StorageStyle):
"""A general storage style for MPEG-4 tags.
"""
formats = ['MP4']
def serialize(self, value):
value = super(MP4StorageStyle, self).serialize(value)
if self.key.startswith('----:') and isinstance(value, str):
value = value.encode('utf8')
return value
class MP4TupleStorageStyle(MP4StorageStyle):
"""A style for storing values as part of a pair of numbers in an
MPEG-4 file.
"""
def __init__(self, key, index=0, **kwargs):
super(MP4TupleStorageStyle, self).__init__(key, **kwargs)
self.index = index
def deserialize(self, mutagen_value):
items = mutagen_value or []
packing_length = 2
return list(items) + [0] * (packing_length - len(items))
def get(self, mutagen_file):
value = super(MP4TupleStorageStyle, self).get(mutagen_file)[self.index]
if value == 0:
# The values are always present and saved as integers. So we
# assume that "0" indicates it is not set.
return None
else:
return value
def set(self, mutagen_file, value):
if value is None:
value = 0
items = self.deserialize(self.fetch(mutagen_file))
items[self.index] = int(value)
self.store(mutagen_file, items)
def delete(self, mutagen_file):
if self.index == 0:
super(MP4TupleStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP4ListStorageStyle(ListStorageStyle, MP4StorageStyle):
pass
class MP4SoundCheckStorageStyle(SoundCheckStorageStyleMixin, MP4StorageStyle):
def __init__(self, key, index=0, **kwargs):
super(MP4SoundCheckStorageStyle, self).__init__(key, **kwargs)
self.index = index
class MP4BoolStorageStyle(MP4StorageStyle):
"""A style for booleans in MPEG-4 files. (MPEG-4 has an atom type
specifically for representing booleans.)
"""
def get(self, mutagen_file):
try:
return mutagen_file[self.key]
except KeyError:
return None
def get_list(self, mutagen_file):
raise NotImplementedError('MP4 bool storage does not support lists')
def set(self, mutagen_file, value):
mutagen_file[self.key] = value
def set_list(self, mutagen_file, values):
raise NotImplementedError('MP4 bool storage does not support lists')
class MP4ImageStorageStyle(MP4ListStorageStyle):
"""Store images as MPEG-4 image atoms. Values are `Image` objects.
"""
def __init__(self, **kwargs):
super(MP4ImageStorageStyle, self).__init__(key=b'covr', **kwargs)
def deserialize(self, data):
return Image(data)
def serialize(self, image):
if image.mime_type == 'image/png':
kind = mutagen.mp4.MP4Cover.FORMAT_PNG
elif image.mime_type == 'image/jpeg':
kind = mutagen.mp4.MP4Cover.FORMAT_JPEG
else:
raise ValueError('MP4 files only supports PNG and JPEG images')
return mutagen.mp4.MP4Cover(image.data, kind)
class MP3StorageStyle(StorageStyle):
"""Store data in ID3 frames.
"""
formats = ['MP3', 'AIFF']
def __init__(self, key, id3_lang=None, **kwargs):
"""Create a new ID3 storage style. `id3_lang` is the value for
the language field of newly created frames.
"""
self.id3_lang = id3_lang
super(MP3StorageStyle, self).__init__(key, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text[0]
except (KeyError, IndexError):
return None
def store(self, mutagen_file, value):
frame = mutagen.id3.Frames[self.key](encoding=3, text=[value])
mutagen_file.tags.setall(self.key, [frame])
class MP3ListStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Store lists of data in multiple ID3 frames.
"""
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text
except KeyError:
return []
def store(self, mutagen_file, values):
frame = mutagen.id3.Frames[self.key](encoding=3, text=values)
mutagen_file.tags.setall(self.key, [frame])
class MP3UFIDStorageStyle(MP3StorageStyle):
"""Store data in a UFID ID3 frame with a particular owner.
"""
def __init__(self, owner, **kwargs):
self.owner = owner
super(MP3UFIDStorageStyle, self).__init__('UFID:' + owner, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].data
except KeyError:
return None
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == self.owner:
frame.data = value
else:
# New frame.
frame = mutagen.id3.UFID(owner=self.owner, data=value)
mutagen_file.tags.setall(self.key, [frame])
class MP3DescStorageStyle(MP3StorageStyle):
"""Store data in a TXXX (or similar) ID3 frame. The frame is
selected based its ``desc`` field.
"""
def __init__(self, desc='', key='TXXX', **kwargs):
self.description = desc
super(MP3DescStorageStyle, self).__init__(key=key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
if self.key != 'USLT':
value = [value]
# try modifying in place
found = False
for frame in frames:
if frame.desc.lower() == self.description.lower():
frame.text = value
found = True
# need to make a new frame?
if not found:
frame = mutagen.id3.Frames[self.key](
desc=bytes(self.description),
text=value,
encoding=3
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if self.key == 'USLT':
return frame.text
try:
return frame.text[0]
except IndexError:
return None
def delete(self, mutagen_file):
found_frame = None
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
found_frame = frame
break
if found_frame is not None:
del mutagen_file[frame.HashKey]
class MP3SlashPackStorageStyle(MP3StorageStyle):
"""Store value as part of pair that is serialized as a slash-
separated string.
"""
def __init__(self, key, pack_pos=0, **kwargs):
super(MP3SlashPackStorageStyle, self).__init__(key, **kwargs)
self.pack_pos = pack_pos
def _fetch_unpacked(self, mutagen_file):
data = self.fetch(mutagen_file)
if data:
items = str(data).split('/')
else:
items = []
packing_length = 2
return list(items) + [None] * (packing_length - len(items))
def get(self, mutagen_file):
return self._fetch_unpacked(mutagen_file)[self.pack_pos]
def set(self, mutagen_file, value):
items = self._fetch_unpacked(mutagen_file)
items[self.pack_pos] = value
if items[0] is None:
items[0] = ''
if items[1] is None:
items.pop() # Do not store last value
self.store(mutagen_file, '/'.join(map(str, items)))
def delete(self, mutagen_file):
if self.pack_pos == 0:
super(MP3SlashPackStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP3ImageStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Converts between APIC frames and ``Image`` instances.
The `get_list` method inherited from ``ListStorageStyle`` returns a
list of ``Image``s. Similarly, the `set_list` method accepts a
list of ``Image``s as its ``values`` argument.
"""
def __init__(self):
super(MP3ImageStorageStyle, self).__init__(key='APIC')
self.as_type = bytes
def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type)
def fetch(self, mutagen_file):
return mutagen_file.tags.getall(self.key)
def store(self, mutagen_file, frames):
mutagen_file.tags.setall(self.key, frames)
def delete(self, mutagen_file):
mutagen_file.tags.delall(self.key)
def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = (image.desc or '').encode('utf8')
frame.encoding = 3 # UTF-8 encoding of desc
frame.type = image.type_index
return frame
class MP3SoundCheckStorageStyle(SoundCheckStorageStyleMixin,
MP3DescStorageStyle):
def __init__(self, index=0, **kwargs):
super(MP3SoundCheckStorageStyle, self).__init__(**kwargs)
self.index = index
class ASFImageStorageStyle(ListStorageStyle):
"""Store images packed into Windows Media/ASF byte array attributes.
Values are `Image` objects.
"""
formats = ['ASF']
def __init__(self):
super(ASFImageStorageStyle, self).__init__(key='WM/Picture')
def deserialize(self, asf_picture):
mime, data, type, desc = _unpack_asf_image(asf_picture.value)
return Image(data, desc=desc, type=type)
def serialize(self, image):
pic = mutagen.asf.ASFByteArrayAttribute()
pic.value = _pack_asf_image(image.mime_type, image.data,
type=image.type_index,
description=image.desc or '')
return pic
class VorbisImageStorageStyle(ListStorageStyle):
"""Store images in Vorbis comments. Both legacy COVERART fields and
modern METADATA_BLOCK_PICTURE tags are supported. Data is
base64-encoded. Values are `Image` objects.
"""
formats = ['OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac']
def __init__(self):
super(VorbisImageStorageStyle, self).__init__(
key='metadata_block_picture'
)
self.as_type = bytes
def fetch(self, mutagen_file):
images = []
if 'metadata_block_picture' not in mutagen_file:
# Try legacy COVERART tags.
if 'coverart' in mutagen_file:
for data in mutagen_file['coverart']:
images.append(Image(base64.b64decode(data)))
return images
for data in mutagen_file["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
except (TypeError, AttributeError):
continue
images.append(Image(data=pic.data, desc=pic.desc,
type=pic.type))
return images
def store(self, mutagen_file, image_data):
# Strip all art, including legacy COVERART.
if 'coverart' in mutagen_file:
del mutagen_file['coverart']
if 'coverartmime' in mutagen_file:
del mutagen_file['coverartmime']
super(VorbisImageStorageStyle, self).store(mutagen_file, image_data)
def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or ''
return base64.b64encode(pic.write())
class FlacImageStorageStyle(ListStorageStyle):
"""Converts between ``mutagen.flac.Picture`` and ``Image`` instances.
"""
formats = ['FLAC']
def __init__(self):
super(FlacImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
return mutagen_file.pictures
def deserialize(self, flac_picture):
return Image(data=flac_picture.data, desc=flac_picture.desc,
type=flac_picture.type)
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic)
def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or ''
return pic
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
mutagen_file.clear_pictures()
class APEv2ImageStorageStyle(ListStorageStyle):
"""Store images in APEv2 tags. Values are `Image` objects.
"""
formats = ['APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio', 'OptimFROG']
TAG_NAMES = {
ImageType.other: 'Cover Art (other)',
ImageType.icon: 'Cover Art (icon)',
ImageType.other_icon: 'Cover Art (other icon)',
ImageType.front: 'Cover Art (front)',
ImageType.back: 'Cover Art (back)',
ImageType.leaflet: 'Cover Art (leaflet)',
ImageType.media: 'Cover Art (media)',
ImageType.lead_artist: 'Cover Art (lead)',
ImageType.artist: 'Cover Art (artist)',
ImageType.conductor: 'Cover Art (conductor)',
ImageType.group: 'Cover Art (band)',
ImageType.composer: 'Cover Art (composer)',
ImageType.lyricist: 'Cover Art (lyricist)',
ImageType.recording_location: 'Cover Art (studio)',
ImageType.recording_session: 'Cover Art (recording)',
ImageType.performance: 'Cover Art (performance)',
ImageType.screen_capture: 'Cover Art (movie scene)',
ImageType.fish: 'Cover Art (colored fish)',
ImageType.illustration: 'Cover Art (illustration)',
ImageType.artist_logo: 'Cover Art (band logo)',
ImageType.publisher_logo: 'Cover Art (publisher logo)',
}
def __init__(self):
super(APEv2ImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
images = []
for cover_type, cover_tag in self.TAG_NAMES.items():
try:
frame = mutagen_file[cover_tag]
text_delimiter_index = frame.value.find('\x00')
comment = frame.value[0:text_delimiter_index] \
if text_delimiter_index > 0 else None
image_data = frame.value[text_delimiter_index + 1:]
images.append(Image(data=image_data, type=cover_type,
desc=comment))
except KeyError:
pass
return images
def set_list(self, mutagen_file, values):
self.delete(mutagen_file)
for image in values:
image_type = image.type or ImageType.other
comment = image.desc or ''
image_data = comment.encode('utf8') + b'\x00' + image.data
cover_tag = self.TAG_NAMES[image_type]
mutagen_file[cover_tag] = image_data
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass
# MediaField is a descriptor that represents a single logical field. It
# aggregates several StorageStyles describing how to access the data for
# each file type.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field.
"""
def __init__(self, *styles, **kwargs):
"""Creates a new MediaField.
:param styles: `StorageStyle` instances that describe the strategy
for reading and writing the field in particular
formats. There must be at least one style for
each possible file format.
:param out_type: the type of the value that should be returned when
getting this property.
"""
self.out_type = kwargs.get('out_type', str)
self._styles = styles
def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style
def __get__(self, mediafile, owner=None):
out = None
for style in self.styles(mediafile.mgfile):
out = style.get(mediafile.mgfile)
if out:
break
return _safe_cast(self.out_type, out)
def __set__(self, mediafile, value):
if value is None:
value = self._none_value()
for style in self.styles(mediafile.mgfile):
style.set(mediafile.mgfile, value)
def __delete__(self, mediafile):
for style in self.styles(mediafile.mgfile):
style.delete(mediafile.mgfile)
def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == str:
return ''
class ListMediaField(MediaField):
"""Property descriptor that retrieves a list of multiple values from
a tag.
Uses ``get_list`` and set_list`` methods of its ``StorageStyle``
strategies to do the actual work.
"""
def __get__(self, mediafile, _):
values = []
for style in self.styles(mediafile.mgfile):
values.extend(style.get_list(mediafile.mgfile))
return [_safe_cast(self.out_type, value) for value in values]
def __set__(self, mediafile, values):
for style in self.styles(mediafile.mgfile):
style.set_list(mediafile.mgfile, values)
def single_field(self):
"""Returns a ``MediaField`` descriptor that gets and sets the
first item.
"""
options = {'out_type': self.out_type}
return MediaField(*self._styles, **options)
class DateField(MediaField):
"""Descriptor that handles serializing and deserializing dates
The getter parses value from tags into a ``datetime.date`` instance
and setter serializes such an instance into a string.
For granular access to year, month, and day, use the ``*_field``
methods to create corresponding `DateItemField`s.
"""
def __init__(self, *date_styles, **kwargs):
"""``date_styles`` is a list of ``StorageStyle``s to store and
retrieve the whole date from. The ``year`` option is an
additional list of fallback styles for the year. The year is
always set on this style, but is only retrieved if the main
storage styles do not return a value.
"""
super(DateField, self).__init__(*date_styles)
year_style = kwargs.get('year', None)
if year_style:
self._year_field = MediaField(*year_style)
def __get__(self, mediafile, owner=None):
year, month, day = self._get_date_tuple(mediafile)
if not year:
return None
try:
return datetime.date(
year,
month or 1,
day or 1
)
except ValueError: # Out of range values.
return None
def __set__(self, mediafile, date):
if date is None:
self._set_date_tuple(mediafile, None, None, None)
else:
self._set_date_tuple(mediafile, date.year, date.month, date.day)
def __delete__(self, mediafile):
super(DateField, self).__delete__(mediafile)
if hasattr(self, '_year_field'):
self._year_field.__delete__(mediafile)
def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, str):
datestring = re.sub(r'[Tt ].*$', '', str(datestring))
items = re.split('[-/]', str(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except:
items_.append(None)
return items_
def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = ['{0:04d}'.format(int(year))]
if month:
date.append('{0:02d}'.format(int(month)))
if month and day:
date.append('{0:02d}'.format(int(day)))
date = map(str, date)
super(DateField, self).__set__(mediafile, '-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year)
def year_field(self):
return DateItemField(self, 0)
def month_field(self):
return DateItemField(self, 1)
def day_field(self):
return DateItemField(self, 2)
class DateItemField(MediaField):
"""Descriptor that gets and sets constituent parts of a `DateField`:
the month, day, or year.
"""
def __init__(self, date_field, item_pos):
self.date_field = date_field
self.item_pos = item_pos
def __get__(self, mediafile, _):
return self.date_field._get_date_tuple(mediafile)[self.item_pos]
def __set__(self, mediafile, value):
items = self.date_field._get_date_tuple(mediafile)
items[self.item_pos] = value
self.date_field._set_date_tuple(mediafile, *items)
def __delete__(self, mediafile):
self.__set__(mediafile, None)
class CoverArtField(MediaField):
"""A descriptor that provides access to the *raw image data* for the
cover image on a file. This is used for backwards compatibility: the
full `ImageListField` provides richer `Image` objects.
When there are multiple images we try to pick the most likely to be a front
cover.
"""
def __init__(self):
pass
def __get__(self, mediafile, _):
candidates = mediafile.images
if candidates:
return self.guess_cover_image(candidates).data
else:
return None
@staticmethod
def guess_cover_image(candidates):
if len(candidates) == 1:
return candidates[0]
try:
return next(c for c in candidates if c.type == ImageType.front)
except StopIteration:
return candidates[0]
def __set__(self, mediafile, data):
if data:
mediafile.images = [Image(data=data)]
else:
mediafile.images = []
def __delete__(self, mediafile):
delattr(mediafile, 'images')
class ImageListField(ListMediaField):
"""Descriptor to access the list of images embedded in tags.
The getter returns a list of `Image` instances obtained from
the tags. The setter accepts a list of `Image` instances to be
written to the tags.
"""
def __init__(self):
# The storage styles used here must implement the
# `ListStorageStyle` interface and get and set lists of
# `Image`s.
super(ImageListField, self).__init__(
MP3ImageStorageStyle(),
MP4ImageStorageStyle(),
ASFImageStorageStyle(),
VorbisImageStorageStyle(),
FlacImageStorageStyle(),
APEv2ImageStorageStyle(),
out_type=Image,
)
# MediaFile is a collection of fields.
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
def __init__(self, path, id3v23=False):
"""Constructs a new `MediaFile` reflecting the file at path. May
throw `UnreadableFileError`.
By default, MP3 files are saved with ID3v2.4 tags. You can use
the older ID3v2.3 standard by specifying the `id3v23` option.
"""
self.path = path
try:
self.mgfile = mutagen.File(path)
except mutagen.MutagenError as exc:
log.debug('header parsing failed: {0}', str(exc))
raise UnreadableFileError(path)
except IOError as exc:
if type(exc) == IOError:
# This is a base IOError, not a subclass from Mutagen or
# anywhere else.
raise
else:
log.debug('{}', traceback.format_exc())
raise MutagenError(path, exc)
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug('{}', traceback.format_exc())
log.error('uncaught Mutagen exception in open: {0}', exc)
raise MutagenError(path, exc)
if self.mgfile is None:
# Mutagen couldn't guess the type
raise FileTypeError(path)
elif (type(self.mgfile).__name__ == 'M4A' or
type(self.mgfile).__name__ == 'MP4'):
info = self.mgfile.info
if hasattr(info, 'codec'):
if info.codec and info.codec.startswith('alac'):
self.type = 'alac'
else:
self.type = 'aac'
else:
# This hack differentiates AAC and ALAC on versions of
# Mutagen < 1.26. Once Mutagen > 1.26 is out and
# required by beets, we can remove this.
if hasattr(self.mgfile.info, 'bitrate') and \
self.mgfile.info.bitrate > 0:
self.type = 'aac'
else:
self.type = 'alac'
elif (type(self.mgfile).__name__ == 'ID3' or
type(self.mgfile).__name__ == 'MP3'):
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggOpus':
self.type = 'opus'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
elif type(self.mgfile).__name__ == 'ASF':
self.type = 'asf'
elif type(self.mgfile).__name__ == 'AIFF':
self.type = 'aiff'
else:
raise FileTypeError(path, type(self.mgfile).__name__)
# Add a set of tags if it's missing.
if self.mgfile.tags is None:
self.mgfile.add_tags()
# Set the ID3v2.3 flag only for MP3s.
self.id3v23 = id3v23 and self.type == 'mp3'
def save(self):
"""Write the object's tags back to the file.
"""
# Possibly save the tags to ID3v2.3.
kwargs = {}
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
# Isolate bugs in Mutagen.
try:
self.mgfile.save(**kwargs)
except (IOError, OSError):
# Propagate these through: they don't represent Mutagen bugs.
raise
except Exception as exc:
log.debug('{}', traceback.format_exc())
log.error('uncaught Mutagen exception in save: {0}', exc)
raise MutagenError(self.path, exc)
def delete(self):
"""Remove the current metadata tag from the file.
"""
try:
self.mgfile.delete()
except NotImplementedError:
# For Mutagen types that don't support deletion (notably,
# ASF), just delete each tag individually.
for tag in self.mgfile.keys():
del self.mgfile[tag]
# Convenient access to the set of available fields.
@classmethod
def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
yield property.decode('utf8')
@classmethod
def readable_fields(cls):
"""Get all metadata fields: the writable ones from
:meth:`fields` and also other audio properties.
"""
for property in cls.fields():
yield property
for property in ('length', 'samplerate', 'bitdepth', 'bitrate',
'channels', 'format'):
yield property
@classmethod
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor)
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field])
# Field definitions.
title = MediaField(
MP3StorageStyle('TIT2'),
MP4StorageStyle("\xa9nam"),
StorageStyle('TITLE'),
ASFStorageStyle('Title'),
)
artist = MediaField(
MP3StorageStyle('TPE1'),
MP4StorageStyle("\xa9ART"),
StorageStyle('ARTIST'),
ASFStorageStyle('Author'),
)
album = MediaField(
MP3StorageStyle('TALB'),
MP4StorageStyle("\xa9alb"),
StorageStyle('ALBUM'),
ASFStorageStyle('WM/AlbumTitle'),
)
genres = ListMediaField(
MP3ListStorageStyle('TCON'),
MP4ListStorageStyle("\xa9gen"),
ListStorageStyle('GENRE'),
ASFStorageStyle('WM/Genre'),
)
genre = genres.single_field()
composer = MediaField(
MP3StorageStyle('TCOM'),
MP4StorageStyle("\xa9wrt"),
StorageStyle('COMPOSER'),
ASFStorageStyle('WM/Composer'),
)
grouping = MediaField(
MP3StorageStyle('TIT1'),
MP4StorageStyle("\xa9grp"),
StorageStyle('GROUPING'),
ASFStorageStyle('WM/ContentGroupDescription'),
)
track = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=0),
MP4TupleStorageStyle('trkn', index=0),
StorageStyle('TRACK'),
StorageStyle('TRACKNUMBER'),
ASFStorageStyle('WM/TrackNumber'),
out_type=int,
)
tracktotal = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=1),
MP4TupleStorageStyle('trkn', index=1),
StorageStyle('TRACKTOTAL'),
StorageStyle('TRACKC'),
StorageStyle('TOTALTRACKS'),
ASFStorageStyle('TotalTracks'),
out_type=int,
)
disc = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=0),
MP4TupleStorageStyle('disk', index=0),
StorageStyle('DISC'),
StorageStyle('DISCNUMBER'),
ASFStorageStyle('WM/PartOfSet'),
out_type=int,
)
disctotal = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=1),
MP4TupleStorageStyle('disk', index=1),
StorageStyle('DISCTOTAL'),
StorageStyle('DISCC'),
StorageStyle('TOTALDISCS'),
ASFStorageStyle('TotalDiscs'),
out_type=int,
)
lyrics = MediaField(
MP3DescStorageStyle(key='USLT'),
MP4StorageStyle("\xa9lyr"),
StorageStyle('LYRICS'),
ASFStorageStyle('WM/Lyrics'),
)
comments = MediaField(
MP3DescStorageStyle(key='COMM'),
MP4StorageStyle("\xa9cmt"),
StorageStyle('DESCRIPTION'),
StorageStyle('COMMENT'),
ASFStorageStyle('WM/Comments'),
ASFStorageStyle('Description')
)
bpm = MediaField(
MP3StorageStyle('TBPM'),
MP4StorageStyle('tmpo', as_type=int),
StorageStyle('BPM'),
ASFStorageStyle('WM/BeatsPerMinute'),
out_type=int,
)
comp = MediaField(
MP3StorageStyle('TCMP'),
MP4BoolStorageStyle('cpil'),
StorageStyle('COMPILATION'),
ASFStorageStyle('WM/IsCompilation', as_type=bool),
out_type=bool,
)
albumartist = MediaField(
MP3StorageStyle('TPE2'),
MP4StorageStyle('aART'),
StorageStyle('ALBUM ARTIST'),
StorageStyle('ALBUMARTIST'),
ASFStorageStyle('WM/AlbumArtist'),
)
albumtype = MediaField(
MP3DescStorageStyle('MusicBrainz Album Type'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Type'),
StorageStyle('MUSICBRAINZ_ALBUMTYPE'),
ASFStorageStyle('MusicBrainz/Album Type'),
)
label = MediaField(
MP3StorageStyle('TPUB'),
MP4StorageStyle('----:com.apple.iTunes:Label'),
MP4StorageStyle('----:com.apple.iTunes:publisher'),
StorageStyle('LABEL'),
StorageStyle('PUBLISHER'), # Traktor
ASFStorageStyle('WM/Publisher'),
)
artist_sort = MediaField(
MP3StorageStyle('TSOP'),
MP4StorageStyle("soar"),
StorageStyle('ARTISTSORT'),
ASFStorageStyle('WM/ArtistSortOrder'),
)
albumartist_sort = MediaField(
MP3DescStorageStyle('ALBUMARTISTSORT'),
MP4StorageStyle("soaa"),
StorageStyle('ALBUMARTISTSORT'),
ASFStorageStyle('WM/AlbumArtistSortOrder'),
)
asin = MediaField(
MP3DescStorageStyle('ASIN'),
MP4StorageStyle("----:com.apple.iTunes:ASIN"),
StorageStyle('ASIN'),
ASFStorageStyle('MusicBrainz/ASIN'),
)
catalognum = MediaField(
MP3DescStorageStyle('CATALOGNUMBER'),
MP4StorageStyle("----:com.apple.iTunes:CATALOGNUMBER"),
StorageStyle('CATALOGNUMBER'),
ASFStorageStyle('WM/CatalogNo'),
)
disctitle = MediaField(
MP3StorageStyle('TSST'),
MP4StorageStyle("----:com.apple.iTunes:DISCSUBTITLE"),
StorageStyle('DISCSUBTITLE'),
ASFStorageStyle('WM/SetSubTitle'),
)
encoder = MediaField(
MP3StorageStyle('TENC'),
MP4StorageStyle("\xa9too"),
StorageStyle('ENCODEDBY'),
StorageStyle('ENCODER'),
ASFStorageStyle('WM/EncodedBy'),
)
script = MediaField(
MP3DescStorageStyle('Script'),
MP4StorageStyle("----:com.apple.iTunes:SCRIPT"),
StorageStyle('SCRIPT'),
ASFStorageStyle('WM/Script'),
)
language = MediaField(
MP3StorageStyle('TLAN'),
MP4StorageStyle("----:com.apple.iTunes:LANGUAGE"),
StorageStyle('LANGUAGE'),
ASFStorageStyle('WM/Language'),
)
country = MediaField(
MP3DescStorageStyle('MusicBrainz Album Release Country'),
MP4StorageStyle("----:com.apple.iTunes:MusicBrainz "
"Album Release Country"),
StorageStyle('RELEASECOUNTRY'),
ASFStorageStyle('MusicBrainz/Album Release Country'),
)
albumstatus = MediaField(
MP3DescStorageStyle('MusicBrainz Album Status'),
MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Status"),
StorageStyle('MUSICBRAINZ_ALBUMSTATUS'),
ASFStorageStyle('MusicBrainz/Album Status'),
)
media = MediaField(
MP3StorageStyle('TMED'),
MP4StorageStyle("----:com.apple.iTunes:MEDIA"),
StorageStyle('MEDIA'),
ASFStorageStyle('WM/Media'),
)
albumdisambig = MediaField(
# This tag mapping was invented for beets (not used by Picard, etc).
MP3DescStorageStyle('MusicBrainz Album Comment'),
MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Comment"),
StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'),
ASFStorageStyle('MusicBrainz/Album Comment'),
)
# Release date.
date = DateField(
MP3StorageStyle('TDRC'),
MP4StorageStyle("\xa9day"),
StorageStyle('DATE'),
ASFStorageStyle('WM/Year'),
year=(StorageStyle('YEAR'),))
year = date.year_field()
month = date.month_field()
day = date.day_field()
# *Original* release date.
original_date = DateField(
MP3StorageStyle('TDOR'),
MP4StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR'),
StorageStyle('ORIGINALDATE'),
ASFStorageStyle('WM/OriginalReleaseYear'))
original_year = original_date.year_field()
original_month = original_date.month_field()
original_day = original_date.day_field()
# Nonstandard metadata.
artist_credit = MediaField(
MP3DescStorageStyle('Artist Credit'),
MP4StorageStyle("----:com.apple.iTunes:Artist Credit"),
StorageStyle('ARTIST_CREDIT'),
ASFStorageStyle('beets/Artist Credit'),
)
albumartist_credit = MediaField(
MP3DescStorageStyle('Album Artist Credit'),
MP4StorageStyle("----:com.apple.iTunes:Album Artist Credit"),
StorageStyle('ALBUMARTIST_CREDIT'),
ASFStorageStyle('beets/Album Artist Credit'),
)
# Legacy album art field
art = CoverArtField()
# Image list
images = ImageListField()
# MusicBrainz IDs.
mb_trackid = MediaField(
MP3UFIDStorageStyle(owner='http://musicbrainz.org'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Track Id'),
StorageStyle('MUSICBRAINZ_TRACKID'),
ASFStorageStyle('MusicBrainz/Track Id'),
)
mb_albumid = MediaField(
MP3DescStorageStyle('MusicBrainz Album Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Id'),
StorageStyle('MUSICBRAINZ_ALBUMID'),
ASFStorageStyle('MusicBrainz/Album Id'),
)
mb_artistid = MediaField(
MP3DescStorageStyle('MusicBrainz Artist Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Artist Id'),
StorageStyle('MUSICBRAINZ_ARTISTID'),
ASFStorageStyle('MusicBrainz/Artist Id'),
)
mb_albumartistid = MediaField(
MP3DescStorageStyle('MusicBrainz Album Artist Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Artist Id'),
StorageStyle('MUSICBRAINZ_ALBUMARTISTID'),
ASFStorageStyle('MusicBrainz/Album Artist Id'),
)
mb_releasegroupid = MediaField(
MP3DescStorageStyle('MusicBrainz Release Group Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Group Id'),
StorageStyle('MUSICBRAINZ_RELEASEGROUPID'),
ASFStorageStyle('MusicBrainz/Release Group Id'),
)
# Acoustid fields.
acoustid_fingerprint = MediaField(
MP3DescStorageStyle('Acoustid Fingerprint'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Fingerprint'),
StorageStyle('ACOUSTID_FINGERPRINT'),
ASFStorageStyle('Acoustid/Fingerprint'),
)
acoustid_id = MediaField(
MP3DescStorageStyle('Acoustid Id'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Id'),
StorageStyle('ACOUSTID_ID'),
ASFStorageStyle('Acoustid/Id'),
)
# ReplayGain fields.
rg_track_gain = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=' dB'
),
MP3DescStorageStyle(
'replaygain_track_gain',
float_places=2, suffix=' dB'
),
MP3SoundCheckStorageStyle(
key='COMM',
index=0, desc='iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_gain',
float_places=2, suffix=b' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=0
),
StorageStyle(
'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=' dB'
),
ASFStorageStyle(
'replaygain_track_gain',
float_places=2, suffix=' dB'
),
out_type=float
)
rg_album_gain = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=' dB'
),
MP3DescStorageStyle(
'replaygain_album_gain',
float_places=2, suffix=' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle(
'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=' dB'
),
ASFStorageStyle(
'replaygain_album_gain',
float_places=2, suffix=' dB'
),
out_type=float
)
rg_track_peak = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_TRACK_PEAK',
float_places=6
),
MP3DescStorageStyle(
'replaygain_track_peak',
float_places=6
),
MP3SoundCheckStorageStyle(
key='COMM',
index=1, desc='iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_peak',
float_places=6
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle('REPLAYGAIN_TRACK_PEAK', float_places=6),
ASFStorageStyle('replaygain_track_peak', float_places=6),
out_type=float,
)
rg_album_peak = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_ALBUM_PEAK',
float_places=6
),
MP3DescStorageStyle(
'replaygain_album_peak',
float_places=6
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_peak',
float_places=6
),
StorageStyle('REPLAYGAIN_ALBUM_PEAK', float_places=6),
ASFStorageStyle('replaygain_album_peak', float_places=6),
out_type=float,
)
initial_key = MediaField(
MP3StorageStyle('TKEY'),
MP4StorageStyle('----:com.apple.iTunes:initialkey'),
StorageStyle('INITIALKEY'),
ASFStorageStyle('INITIALKEY'),
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if isinstance(self.mgfile.info, mutagen.mp3.MPEGInfo):
return {
mutagen.mp3.STEREO: 2,
mutagen.mp3.JOINTSTEREO: 2,
mutagen.mp3.DUALCHANNEL: 2,
mutagen.mp3.MONO: 1,
}[self.mgfile.info.mode]
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
size = os.path.getsize(self.path)
return int(size * 8 / self.length)
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
| # This file is part of beets.
# Copyright 2015, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
Internally ``MediaFile`` uses ``MediaField`` descriptors to access the
data from the tags. In turn ``MediaField`` uses a number of
``StorageStyle`` strategies to handle format specific logic.
"""
import mutagen
import datetime
import re
import base64
import math
import struct
import imghdr
import os
import traceback
import enum
import logging
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
log = logging.getLogger('beets')
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'aac': 'AAC',
'alac': 'ALAC',
'ogg': 'OGG',
'opus': 'Opus',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
'asf': 'Windows Media',
'aiff': 'AIFF',
}
# Exceptions.
class UnreadableFileError(Exception):
"""Mutagen is not able to extract information from the file.
"""
def __init__(self, path):
Exception.__init__(self, path)
class FileTypeError(UnreadableFileError):
"""Reading this type of file is not supported.
If passed the `mutagen_type` argument this indicates that the
mutagen type is not supported by `Mediafile`.
"""
def __init__(self, path, mutagen_type=None):
if mutagen_type is None:
msg = path
else:
msg = '{0}: of mutagen type {1}'.format(path, mutagen_type)
Exception.__init__(self, msg)
class MutagenError(UnreadableFileError):
"""Raised when Mutagen fails unexpectedly---probably due to a bug.
"""
def __init__(self, path, mutagen_exc):
msg = '{0}: {1}'.format(path, mutagen_exc)
Exception.__init__(self, msg)
# Utility.
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or str; otherwise, the
value is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, str):
val = str(val)
# Get a number from the front of the string.
val = re.match(r'[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == str:
if isinstance(val, bytes):
return val.decode('utf8', 'ignore')
elif isinstance(val, str):
return val
else:
return str(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if not isinstance(val, str):
val = str(val)
match = re.match(r'[\+-]?[0-9\.]+', val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val
# Image coding for ASF/WMA.
def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from(b'<bi', data)
pos = 5
mime = ""
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = ""
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le"))
def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data
# iTunes Sound Check encoding.
def _sc_decode(soundcheck):
"""Convert a Sound Check string value to a (gain, peak) tuple as
used by ReplayGain.
"""
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = soundcheck.replace(' ', '').decode('hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = min(round((10 ** (gain / -10)) * 1000), 65534)
# Same as above, except our reference level is 2500 units.
g2 = min(round((10 ** (gain / -10)) * 2500), 65534)
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, peak, peak, uk, uk)
return (' %08X' * 10) % values
# Cover art and other images.
def _image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
kind = imghdr.what(None, h=data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind)
class ImageType(enum.Enum):
"""Indicates the kind of an `Image` stored in a file's tag.
"""
other = 0
icon = 1
other_icon = 2
front = 3
back = 4
leaflet = 5
media = 6
lead_artist = 7
artist = 8
conductor = 9
group = 10
composer = 11
lyricist = 12
recording_location = 13
recording_session = 14
performance = 15
screen_capture = 16
fish = 17
illustration = 18
artist_logo = 19
publisher_logo = 20
class Image(object):
"""Structure representing image data and metadata that can be
stored and retrieved from tags.
The structure has four properties.
* ``data`` The binary data of the image
* ``desc`` An optional description of the image
* ``type`` An instance of `ImageType` indicating the kind of image
* ``mime_type`` Read-only property that contains the mime type of
the binary data
"""
def __init__(self, data, desc=None, type=None):
self.data = data
self.desc = desc
if isinstance(type, int):
try:
type = list(ImageType)[type]
except IndexError:
log.debug(u"ignoring unknown image type index {0}", type)
type = ImageType.other
self.type = type
@property
def mime_type(self):
if self.data:
return _image_mime_type(self.data)
@property
def type_index(self):
if self.type is None:
# This method is used when a tag format requires the type
# index to be set, so we return "other" as the default value.
return 0
return self.type.value
# StorageStyle classes describe strategies for accessing values in
# Mutagen file objects.
class StorageStyle(object):
"""A strategy for storing a value for a certain tag format (or set
of tag formats). This basic StorageStyle describes simple 1:1
mapping from raw values to keys in a Mutagen file object; subclasses
describe more sophisticated translations or format-specific access
strategies.
MediaFile uses a StorageStyle via three methods: ``get()``,
``set()``, and ``delete()``. It passes a Mutagen file object to
each.
Internally, the StorageStyle implements ``get()`` and ``set()``
using two steps that may be overridden by subtypes. To get a value,
the StorageStyle first calls ``fetch()`` to retrieve the value
corresponding to a key and then ``deserialize()`` to convert the raw
Mutagen value to a consumable Python value. Similarly, to set a
field, we call ``serialize()`` to encode the value and then
``store()`` to assign the result into the Mutagen object.
Each StorageStyle type has a class-level `formats` attribute that is
a list of strings indicating the formats that the style applies to.
MediaFile only uses StorageStyles that apply to the correct type for
a given audio file.
"""
formats = ['FLAC', 'OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac', 'APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio']
"""List of mutagen classes the StorageStyle can handle.
"""
def __init__(self, key, as_type=str, suffix=None, float_places=2):
"""Create a basic storage strategy. Parameters:
- `key`: The key on the Mutagen file object used to access the
field's data.
- `as_type`: The Python type that the value is stored as
internally (`str`, `int`, `bool`, or `bytes`).
- `suffix`: When `as_type` is a string type, append this before
storing the value.
- `float_places`: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
decimal point.
"""
self.key = key
self.as_type = as_type
self.suffix = suffix
self.float_places = float_places
# Convert suffix to correct string type.
if self.suffix and self.as_type is str \
and not isinstance(self.suffix, str):
self.suffix = self.suffix.decode('utf8')
# Getter.
def get(self, mutagen_file):
"""Get the value for the field using this style.
"""
return self.deserialize(self.fetch(mutagen_file))
def fetch(self, mutagen_file):
"""Retrieve the raw value of for this tag from the Mutagen file
object.
"""
try:
return mutagen_file[self.key][0]
except (KeyError, IndexError):
return None
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, str) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value
# Setter.
def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value))
def store(self, mutagen_file, value):
"""Store a serialized value in the Mutagen file object.
"""
mutagen_file[self.key] = [value]
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is str:
value = '{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is str:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = str(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
else:
value = str(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value
def delete(self, mutagen_file):
"""Remove the tag from the file.
"""
if self.key in mutagen_file:
del mutagen_file[self.key]
class ListStorageStyle(StorageStyle):
"""Abstract storage style that provides access to lists.
The ListMediaField descriptor uses a ListStorageStyle via two
methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file
object to each.
Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must
return a (possibly empty) list and ``store`` receives a serialized
list of values as the second argument.
The `serialize` and `deserialize` methods (from the base
`StorageStyle`) are still called with individual values. This class
handles packing and unpacking the values into lists.
"""
def get(self, mutagen_file):
"""Get the first value in the field's value list.
"""
try:
return self.get_list(mutagen_file)[0]
except IndexError:
return None
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
def fetch(self, mutagen_file):
"""Get the list of raw (serialized) values.
"""
try:
return mutagen_file[self.key]
except KeyError:
return []
def set(self, mutagen_file, value):
"""Set an individual value as the only value for the field using
this style.
"""
self.set_list(mutagen_file, [value])
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values])
def store(self, mutagen_file, values):
"""Set the list of all raw (serialized) values for this field.
"""
mutagen_file[self.key] = values
class SoundCheckStorageStyleMixin(object):
"""A mixin for storage styles that read and write iTunes SoundCheck
analysis values. The object must have an `index` field that
indicates which half of the gain/peak pair---0 or 1---the field
represents.
"""
def get(self, mutagen_file):
data = self.fetch(mutagen_file)
if data is not None:
return _sc_decode(data)[self.index]
def set(self, mutagen_file, value):
data = self.fetch(mutagen_file)
if data is None:
gain_peak = [0, 0]
else:
gain_peak = list(_sc_decode(data))
gain_peak[self.index] = value or 0
data = self.serialize(_sc_encode(*gain_peak))
self.store(mutagen_file, data)
class ASFStorageStyle(ListStorageStyle):
"""A general storage style for Windows Media/ASF files.
"""
formats = ['ASF']
def deserialize(self, data):
if isinstance(data, mutagen.asf.ASFBaseAttribute):
data = data.value
return data
class MP4StorageStyle(StorageStyle):
"""A general storage style for MPEG-4 tags.
"""
formats = ['MP4']
def serialize(self, value):
value = super(MP4StorageStyle, self).serialize(value)
if self.key.startswith('----:') and isinstance(value, str):
value = value.encode('utf8')
return value
class MP4TupleStorageStyle(MP4StorageStyle):
"""A style for storing values as part of a pair of numbers in an
MPEG-4 file.
"""
def __init__(self, key, index=0, **kwargs):
super(MP4TupleStorageStyle, self).__init__(key, **kwargs)
self.index = index
def deserialize(self, mutagen_value):
items = mutagen_value or []
packing_length = 2
return list(items) + [0] * (packing_length - len(items))
def get(self, mutagen_file):
value = super(MP4TupleStorageStyle, self).get(mutagen_file)[self.index]
if value == 0:
# The values are always present and saved as integers. So we
# assume that "0" indicates it is not set.
return None
else:
return value
def set(self, mutagen_file, value):
if value is None:
value = 0
items = self.deserialize(self.fetch(mutagen_file))
items[self.index] = int(value)
self.store(mutagen_file, items)
def delete(self, mutagen_file):
if self.index == 0:
super(MP4TupleStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP4ListStorageStyle(ListStorageStyle, MP4StorageStyle):
pass
class MP4SoundCheckStorageStyle(SoundCheckStorageStyleMixin, MP4StorageStyle):
def __init__(self, key, index=0, **kwargs):
super(MP4SoundCheckStorageStyle, self).__init__(key, **kwargs)
self.index = index
class MP4BoolStorageStyle(MP4StorageStyle):
"""A style for booleans in MPEG-4 files. (MPEG-4 has an atom type
specifically for representing booleans.)
"""
def get(self, mutagen_file):
try:
return mutagen_file[self.key]
except KeyError:
return None
def get_list(self, mutagen_file):
raise NotImplementedError('MP4 bool storage does not support lists')
def set(self, mutagen_file, value):
mutagen_file[self.key] = value
def set_list(self, mutagen_file, values):
raise NotImplementedError('MP4 bool storage does not support lists')
class MP4ImageStorageStyle(MP4ListStorageStyle):
"""Store images as MPEG-4 image atoms. Values are `Image` objects.
"""
def __init__(self, **kwargs):
super(MP4ImageStorageStyle, self).__init__(key=b'covr', **kwargs)
def deserialize(self, data):
return Image(data)
def serialize(self, image):
if image.mime_type == 'image/png':
kind = mutagen.mp4.MP4Cover.FORMAT_PNG
elif image.mime_type == 'image/jpeg':
kind = mutagen.mp4.MP4Cover.FORMAT_JPEG
else:
raise ValueError('MP4 files only supports PNG and JPEG images')
return mutagen.mp4.MP4Cover(image.data, kind)
class MP3StorageStyle(StorageStyle):
"""Store data in ID3 frames.
"""
formats = ['MP3', 'AIFF']
def __init__(self, key, id3_lang=None, **kwargs):
"""Create a new ID3 storage style. `id3_lang` is the value for
the language field of newly created frames.
"""
self.id3_lang = id3_lang
super(MP3StorageStyle, self).__init__(key, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text[0]
except (KeyError, IndexError):
return None
def store(self, mutagen_file, value):
frame = mutagen.id3.Frames[self.key](encoding=3, text=[value])
mutagen_file.tags.setall(self.key, [frame])
class MP3ListStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Store lists of data in multiple ID3 frames.
"""
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text
except KeyError:
return []
def store(self, mutagen_file, values):
frame = mutagen.id3.Frames[self.key](encoding=3, text=values)
mutagen_file.tags.setall(self.key, [frame])
class MP3UFIDStorageStyle(MP3StorageStyle):
"""Store data in a UFID ID3 frame with a particular owner.
"""
def __init__(self, owner, **kwargs):
self.owner = owner
super(MP3UFIDStorageStyle, self).__init__('UFID:' + owner, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].data
except KeyError:
return None
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == self.owner:
frame.data = value
else:
# New frame.
frame = mutagen.id3.UFID(owner=self.owner, data=value)
mutagen_file.tags.setall(self.key, [frame])
class MP3DescStorageStyle(MP3StorageStyle):
"""Store data in a TXXX (or similar) ID3 frame. The frame is
selected based its ``desc`` field.
"""
def __init__(self, desc='', key='TXXX', **kwargs):
self.description = desc
super(MP3DescStorageStyle, self).__init__(key=key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
if self.key != 'USLT':
value = [value]
# try modifying in place
found = False
for frame in frames:
if frame.desc.lower() == self.description.lower():
frame.text = value
found = True
# need to make a new frame?
if not found:
frame = mutagen.id3.Frames[self.key](
desc=bytes(self.description),
text=value,
encoding=3
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if self.key == 'USLT':
return frame.text
try:
return frame.text[0]
except IndexError:
return None
def delete(self, mutagen_file):
found_frame = None
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
found_frame = frame
break
if found_frame is not None:
del mutagen_file[frame.HashKey]
class MP3SlashPackStorageStyle(MP3StorageStyle):
"""Store value as part of pair that is serialized as a slash-
separated string.
"""
def __init__(self, key, pack_pos=0, **kwargs):
super(MP3SlashPackStorageStyle, self).__init__(key, **kwargs)
self.pack_pos = pack_pos
def _fetch_unpacked(self, mutagen_file):
data = self.fetch(mutagen_file)
if data:
items = str(data).split('/')
else:
items = []
packing_length = 2
return list(items) + [None] * (packing_length - len(items))
def get(self, mutagen_file):
return self._fetch_unpacked(mutagen_file)[self.pack_pos]
def set(self, mutagen_file, value):
items = self._fetch_unpacked(mutagen_file)
items[self.pack_pos] = value
if items[0] is None:
items[0] = ''
if items[1] is None:
items.pop() # Do not store last value
self.store(mutagen_file, '/'.join(map(str, items)))
def delete(self, mutagen_file):
if self.pack_pos == 0:
super(MP3SlashPackStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP3ImageStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Converts between APIC frames and ``Image`` instances.
The `get_list` method inherited from ``ListStorageStyle`` returns a
list of ``Image``s. Similarly, the `set_list` method accepts a
list of ``Image``s as its ``values`` argument.
"""
def __init__(self):
super(MP3ImageStorageStyle, self).__init__(key='APIC')
self.as_type = bytes
def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type)
def fetch(self, mutagen_file):
return mutagen_file.tags.getall(self.key)
def store(self, mutagen_file, frames):
mutagen_file.tags.setall(self.key, frames)
def delete(self, mutagen_file):
mutagen_file.tags.delall(self.key)
def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = (image.desc or '').encode('utf8')
frame.encoding = 3 # UTF-8 encoding of desc
frame.type = image.type_index
return frame
class MP3SoundCheckStorageStyle(SoundCheckStorageStyleMixin,
MP3DescStorageStyle):
def __init__(self, index=0, **kwargs):
super(MP3SoundCheckStorageStyle, self).__init__(**kwargs)
self.index = index
class ASFImageStorageStyle(ListStorageStyle):
"""Store images packed into Windows Media/ASF byte array attributes.
Values are `Image` objects.
"""
formats = ['ASF']
def __init__(self):
super(ASFImageStorageStyle, self).__init__(key='WM/Picture')
def deserialize(self, asf_picture):
mime, data, type, desc = _unpack_asf_image(asf_picture.value)
return Image(data, desc=desc, type=type)
def serialize(self, image):
pic = mutagen.asf.ASFByteArrayAttribute()
pic.value = _pack_asf_image(image.mime_type, image.data,
type=image.type_index,
description=image.desc or '')
return pic
class VorbisImageStorageStyle(ListStorageStyle):
"""Store images in Vorbis comments. Both legacy COVERART fields and
modern METADATA_BLOCK_PICTURE tags are supported. Data is
base64-encoded. Values are `Image` objects.
"""
formats = ['OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac']
def __init__(self):
super(VorbisImageStorageStyle, self).__init__(
key='metadata_block_picture'
)
self.as_type = bytes
def fetch(self, mutagen_file):
images = []
if 'metadata_block_picture' not in mutagen_file:
# Try legacy COVERART tags.
if 'coverart' in mutagen_file:
for data in mutagen_file['coverart']:
images.append(Image(base64.b64decode(data)))
return images
for data in mutagen_file["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
except (TypeError, AttributeError):
continue
images.append(Image(data=pic.data, desc=pic.desc,
type=pic.type))
return images
def store(self, mutagen_file, image_data):
# Strip all art, including legacy COVERART.
if 'coverart' in mutagen_file:
del mutagen_file['coverart']
if 'coverartmime' in mutagen_file:
del mutagen_file['coverartmime']
super(VorbisImageStorageStyle, self).store(mutagen_file, image_data)
def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or ''
return base64.b64encode(pic.write())
class FlacImageStorageStyle(ListStorageStyle):
"""Converts between ``mutagen.flac.Picture`` and ``Image`` instances.
"""
formats = ['FLAC']
def __init__(self):
super(FlacImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
return mutagen_file.pictures
def deserialize(self, flac_picture):
return Image(data=flac_picture.data, desc=flac_picture.desc,
type=flac_picture.type)
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic)
def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or ''
return pic
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
mutagen_file.clear_pictures()
class APEv2ImageStorageStyle(ListStorageStyle):
"""Store images in APEv2 tags. Values are `Image` objects.
"""
formats = ['APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio', 'OptimFROG']
TAG_NAMES = {
ImageType.other: 'Cover Art (other)',
ImageType.icon: 'Cover Art (icon)',
ImageType.other_icon: 'Cover Art (other icon)',
ImageType.front: 'Cover Art (front)',
ImageType.back: 'Cover Art (back)',
ImageType.leaflet: 'Cover Art (leaflet)',
ImageType.media: 'Cover Art (media)',
ImageType.lead_artist: 'Cover Art (lead)',
ImageType.artist: 'Cover Art (artist)',
ImageType.conductor: 'Cover Art (conductor)',
ImageType.group: 'Cover Art (band)',
ImageType.composer: 'Cover Art (composer)',
ImageType.lyricist: 'Cover Art (lyricist)',
ImageType.recording_location: 'Cover Art (studio)',
ImageType.recording_session: 'Cover Art (recording)',
ImageType.performance: 'Cover Art (performance)',
ImageType.screen_capture: 'Cover Art (movie scene)',
ImageType.fish: 'Cover Art (colored fish)',
ImageType.illustration: 'Cover Art (illustration)',
ImageType.artist_logo: 'Cover Art (band logo)',
ImageType.publisher_logo: 'Cover Art (publisher logo)',
}
def __init__(self):
super(APEv2ImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
images = []
for cover_type, cover_tag in self.TAG_NAMES.items():
try:
frame = mutagen_file[cover_tag]
text_delimiter_index = frame.value.find('\x00')
comment = frame.value[0:text_delimiter_index] \
if text_delimiter_index > 0 else None
image_data = frame.value[text_delimiter_index + 1:]
images.append(Image(data=image_data, type=cover_type,
desc=comment))
except KeyError:
pass
return images
def set_list(self, mutagen_file, values):
self.delete(mutagen_file)
for image in values:
image_type = image.type or ImageType.other
comment = image.desc or ''
image_data = comment.encode('utf8') + b'\x00' + image.data
cover_tag = self.TAG_NAMES[image_type]
mutagen_file[cover_tag] = image_data
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass
# MediaField is a descriptor that represents a single logical field. It
# aggregates several StorageStyles describing how to access the data for
# each file type.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field.
"""
def __init__(self, *styles, **kwargs):
"""Creates a new MediaField.
:param styles: `StorageStyle` instances that describe the strategy
for reading and writing the field in particular
formats. There must be at least one style for
each possible file format.
:param out_type: the type of the value that should be returned when
getting this property.
"""
self.out_type = kwargs.get('out_type', str)
self._styles = styles
def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style
def __get__(self, mediafile, owner=None):
out = None
for style in self.styles(mediafile.mgfile):
out = style.get(mediafile.mgfile)
if out:
break
return _safe_cast(self.out_type, out)
def __set__(self, mediafile, value):
if value is None:
value = self._none_value()
for style in self.styles(mediafile.mgfile):
style.set(mediafile.mgfile, value)
def __delete__(self, mediafile):
for style in self.styles(mediafile.mgfile):
style.delete(mediafile.mgfile)
def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == str:
return ''
class ListMediaField(MediaField):
"""Property descriptor that retrieves a list of multiple values from
a tag.
Uses ``get_list`` and set_list`` methods of its ``StorageStyle``
strategies to do the actual work.
"""
def __get__(self, mediafile, _):
values = []
for style in self.styles(mediafile.mgfile):
values.extend(style.get_list(mediafile.mgfile))
return [_safe_cast(self.out_type, value) for value in values]
def __set__(self, mediafile, values):
for style in self.styles(mediafile.mgfile):
style.set_list(mediafile.mgfile, values)
def single_field(self):
"""Returns a ``MediaField`` descriptor that gets and sets the
first item.
"""
options = {'out_type': self.out_type}
return MediaField(*self._styles, **options)
class DateField(MediaField):
"""Descriptor that handles serializing and deserializing dates
The getter parses value from tags into a ``datetime.date`` instance
and setter serializes such an instance into a string.
For granular access to year, month, and day, use the ``*_field``
methods to create corresponding `DateItemField`s.
"""
def __init__(self, *date_styles, **kwargs):
"""``date_styles`` is a list of ``StorageStyle``s to store and
retrieve the whole date from. The ``year`` option is an
additional list of fallback styles for the year. The year is
always set on this style, but is only retrieved if the main
storage styles do not return a value.
"""
super(DateField, self).__init__(*date_styles)
year_style = kwargs.get('year', None)
if year_style:
self._year_field = MediaField(*year_style)
def __get__(self, mediafile, owner=None):
year, month, day = self._get_date_tuple(mediafile)
if not year:
return None
try:
return datetime.date(
year,
month or 1,
day or 1
)
except ValueError: # Out of range values.
return None
def __set__(self, mediafile, date):
if date is None:
self._set_date_tuple(mediafile, None, None, None)
else:
self._set_date_tuple(mediafile, date.year, date.month, date.day)
def __delete__(self, mediafile):
super(DateField, self).__delete__(mediafile)
if hasattr(self, '_year_field'):
self._year_field.__delete__(mediafile)
def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, str):
datestring = re.sub(r'[Tt ].*$', '', str(datestring))
items = re.split('[-/]', str(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except:
items_.append(None)
return items_
def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = ['{0:04d}'.format(int(year))]
if month:
date.append('{0:02d}'.format(int(month)))
if month and day:
date.append('{0:02d}'.format(int(day)))
date = map(str, date)
super(DateField, self).__set__(mediafile, '-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year)
def year_field(self):
return DateItemField(self, 0)
def month_field(self):
return DateItemField(self, 1)
def day_field(self):
return DateItemField(self, 2)
class DateItemField(MediaField):
"""Descriptor that gets and sets constituent parts of a `DateField`:
the month, day, or year.
"""
def __init__(self, date_field, item_pos):
self.date_field = date_field
self.item_pos = item_pos
def __get__(self, mediafile, _):
return self.date_field._get_date_tuple(mediafile)[self.item_pos]
def __set__(self, mediafile, value):
items = self.date_field._get_date_tuple(mediafile)
items[self.item_pos] = value
self.date_field._set_date_tuple(mediafile, *items)
def __delete__(self, mediafile):
self.__set__(mediafile, None)
class CoverArtField(MediaField):
"""A descriptor that provides access to the *raw image data* for the
cover image on a file. This is used for backwards compatibility: the
full `ImageListField` provides richer `Image` objects.
When there are multiple images we try to pick the most likely to be a front
cover.
"""
def __init__(self):
pass
def __get__(self, mediafile, _):
candidates = mediafile.images
if candidates:
return self.guess_cover_image(candidates).data
else:
return None
@staticmethod
def guess_cover_image(candidates):
if len(candidates) == 1:
return candidates[0]
try:
return next(c for c in candidates if c.type == ImageType.front)
except StopIteration:
return candidates[0]
def __set__(self, mediafile, data):
if data:
mediafile.images = [Image(data=data)]
else:
mediafile.images = []
def __delete__(self, mediafile):
delattr(mediafile, 'images')
class ImageListField(ListMediaField):
"""Descriptor to access the list of images embedded in tags.
The getter returns a list of `Image` instances obtained from
the tags. The setter accepts a list of `Image` instances to be
written to the tags.
"""
def __init__(self):
# The storage styles used here must implement the
# `ListStorageStyle` interface and get and set lists of
# `Image`s.
super(ImageListField, self).__init__(
MP3ImageStorageStyle(),
MP4ImageStorageStyle(),
ASFImageStorageStyle(),
VorbisImageStorageStyle(),
FlacImageStorageStyle(),
APEv2ImageStorageStyle(),
out_type=Image,
)
# MediaFile is a collection of fields.
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
def __init__(self, path, id3v23=False):
"""Constructs a new `MediaFile` reflecting the file at path. May
throw `UnreadableFileError`.
By default, MP3 files are saved with ID3v2.4 tags. You can use
the older ID3v2.3 standard by specifying the `id3v23` option.
"""
self.path = path
try:
self.mgfile = mutagen.File(path)
except mutagen.MutagenError as exc:
log.debug('header parsing failed: {0}', str(exc))
raise UnreadableFileError(path)
except IOError as exc:
if type(exc) == IOError:
# This is a base IOError, not a subclass from Mutagen or
# anywhere else.
raise
else:
log.debug('{}', traceback.format_exc())
raise MutagenError(path, exc)
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug('{}', traceback.format_exc())
log.error('uncaught Mutagen exception in open: {0}', exc)
raise MutagenError(path, exc)
if self.mgfile is None:
# Mutagen couldn't guess the type
raise FileTypeError(path)
elif (type(self.mgfile).__name__ == 'M4A' or
type(self.mgfile).__name__ == 'MP4'):
info = self.mgfile.info
if hasattr(info, 'codec'):
if info.codec and info.codec.startswith('alac'):
self.type = 'alac'
else:
self.type = 'aac'
else:
# This hack differentiates AAC and ALAC on versions of
# Mutagen < 1.26. Once Mutagen > 1.26 is out and
# required by beets, we can remove this.
if hasattr(self.mgfile.info, 'bitrate') and \
self.mgfile.info.bitrate > 0:
self.type = 'aac'
else:
self.type = 'alac'
elif (type(self.mgfile).__name__ == 'ID3' or
type(self.mgfile).__name__ == 'MP3'):
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggOpus':
self.type = 'opus'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
elif type(self.mgfile).__name__ == 'ASF':
self.type = 'asf'
elif type(self.mgfile).__name__ == 'AIFF':
self.type = 'aiff'
else:
raise FileTypeError(path, type(self.mgfile).__name__)
# Add a set of tags if it's missing.
if self.mgfile.tags is None:
self.mgfile.add_tags()
# Set the ID3v2.3 flag only for MP3s.
self.id3v23 = id3v23 and self.type == 'mp3'
def save(self):
"""Write the object's tags back to the file.
"""
# Possibly save the tags to ID3v2.3.
kwargs = {}
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
# Isolate bugs in Mutagen.
try:
self.mgfile.save(**kwargs)
except (IOError, OSError):
# Propagate these through: they don't represent Mutagen bugs.
raise
except Exception as exc:
log.debug('{}', traceback.format_exc())
log.error('uncaught Mutagen exception in save: {0}', exc)
raise MutagenError(self.path, exc)
def delete(self):
"""Remove the current metadata tag from the file.
"""
try:
self.mgfile.delete()
except NotImplementedError:
# For Mutagen types that don't support deletion (notably,
# ASF), just delete each tag individually.
for tag in self.mgfile.keys():
del self.mgfile[tag]
# Convenient access to the set of available fields.
@classmethod
def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
yield property.decode('utf8')
@classmethod
def readable_fields(cls):
"""Get all metadata fields: the writable ones from
:meth:`fields` and also other audio properties.
"""
for property in cls.fields():
yield property
for property in ('length', 'samplerate', 'bitdepth', 'bitrate',
'channels', 'format'):
yield property
@classmethod
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor)
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field])
# Field definitions.
title = MediaField(
MP3StorageStyle('TIT2'),
MP4StorageStyle("\xa9nam"),
StorageStyle('TITLE'),
ASFStorageStyle('Title'),
)
artist = MediaField(
MP3StorageStyle('TPE1'),
MP4StorageStyle("\xa9ART"),
StorageStyle('ARTIST'),
ASFStorageStyle('Author'),
)
album = MediaField(
MP3StorageStyle('TALB'),
MP4StorageStyle("\xa9alb"),
StorageStyle('ALBUM'),
ASFStorageStyle('WM/AlbumTitle'),
)
genres = ListMediaField(
MP3ListStorageStyle('TCON'),
MP4ListStorageStyle("\xa9gen"),
ListStorageStyle('GENRE'),
ASFStorageStyle('WM/Genre'),
)
genre = genres.single_field()
composer = MediaField(
MP3StorageStyle('TCOM'),
MP4StorageStyle("\xa9wrt"),
StorageStyle('COMPOSER'),
ASFStorageStyle('WM/Composer'),
)
grouping = MediaField(
MP3StorageStyle('TIT1'),
MP4StorageStyle("\xa9grp"),
StorageStyle('GROUPING'),
ASFStorageStyle('WM/ContentGroupDescription'),
)
track = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=0),
MP4TupleStorageStyle('trkn', index=0),
StorageStyle('TRACK'),
StorageStyle('TRACKNUMBER'),
ASFStorageStyle('WM/TrackNumber'),
out_type=int,
)
tracktotal = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=1),
MP4TupleStorageStyle('trkn', index=1),
StorageStyle('TRACKTOTAL'),
StorageStyle('TRACKC'),
StorageStyle('TOTALTRACKS'),
ASFStorageStyle('TotalTracks'),
out_type=int,
)
disc = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=0),
MP4TupleStorageStyle('disk', index=0),
StorageStyle('DISC'),
StorageStyle('DISCNUMBER'),
ASFStorageStyle('WM/PartOfSet'),
out_type=int,
)
disctotal = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=1),
MP4TupleStorageStyle('disk', index=1),
StorageStyle('DISCTOTAL'),
StorageStyle('DISCC'),
StorageStyle('TOTALDISCS'),
ASFStorageStyle('TotalDiscs'),
out_type=int,
)
lyrics = MediaField(
MP3DescStorageStyle(key='USLT'),
MP4StorageStyle("\xa9lyr"),
StorageStyle('LYRICS'),
ASFStorageStyle('WM/Lyrics'),
)
comments = MediaField(
MP3DescStorageStyle(key='COMM'),
MP4StorageStyle("\xa9cmt"),
StorageStyle('DESCRIPTION'),
StorageStyle('COMMENT'),
ASFStorageStyle('WM/Comments'),
ASFStorageStyle('Description')
)
bpm = MediaField(
MP3StorageStyle('TBPM'),
MP4StorageStyle('tmpo', as_type=int),
StorageStyle('BPM'),
ASFStorageStyle('WM/BeatsPerMinute'),
out_type=int,
)
comp = MediaField(
MP3StorageStyle('TCMP'),
MP4BoolStorageStyle('cpil'),
StorageStyle('COMPILATION'),
ASFStorageStyle('WM/IsCompilation', as_type=bool),
out_type=bool,
)
albumartist = MediaField(
MP3StorageStyle('TPE2'),
MP4StorageStyle('aART'),
StorageStyle('ALBUM ARTIST'),
StorageStyle('ALBUMARTIST'),
ASFStorageStyle('WM/AlbumArtist'),
)
albumtype = MediaField(
MP3DescStorageStyle('MusicBrainz Album Type'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Type'),
StorageStyle('MUSICBRAINZ_ALBUMTYPE'),
ASFStorageStyle('MusicBrainz/Album Type'),
)
label = MediaField(
MP3StorageStyle('TPUB'),
MP4StorageStyle('----:com.apple.iTunes:Label'),
MP4StorageStyle('----:com.apple.iTunes:publisher'),
StorageStyle('LABEL'),
StorageStyle('PUBLISHER'), # Traktor
ASFStorageStyle('WM/Publisher'),
)
artist_sort = MediaField(
MP3StorageStyle('TSOP'),
MP4StorageStyle("soar"),
StorageStyle('ARTISTSORT'),
ASFStorageStyle('WM/ArtistSortOrder'),
)
albumartist_sort = MediaField(
MP3DescStorageStyle('ALBUMARTISTSORT'),
MP4StorageStyle("soaa"),
StorageStyle('ALBUMARTISTSORT'),
ASFStorageStyle('WM/AlbumArtistSortOrder'),
)
asin = MediaField(
MP3DescStorageStyle('ASIN'),
MP4StorageStyle("----:com.apple.iTunes:ASIN"),
StorageStyle('ASIN'),
ASFStorageStyle('MusicBrainz/ASIN'),
)
catalognum = MediaField(
MP3DescStorageStyle('CATALOGNUMBER'),
MP4StorageStyle("----:com.apple.iTunes:CATALOGNUMBER"),
StorageStyle('CATALOGNUMBER'),
ASFStorageStyle('WM/CatalogNo'),
)
disctitle = MediaField(
MP3StorageStyle('TSST'),
MP4StorageStyle("----:com.apple.iTunes:DISCSUBTITLE"),
StorageStyle('DISCSUBTITLE'),
ASFStorageStyle('WM/SetSubTitle'),
)
encoder = MediaField(
MP3StorageStyle('TENC'),
MP4StorageStyle("\xa9too"),
StorageStyle('ENCODEDBY'),
StorageStyle('ENCODER'),
ASFStorageStyle('WM/EncodedBy'),
)
script = MediaField(
MP3DescStorageStyle('Script'),
MP4StorageStyle("----:com.apple.iTunes:SCRIPT"),
StorageStyle('SCRIPT'),
ASFStorageStyle('WM/Script'),
)
language = MediaField(
MP3StorageStyle('TLAN'),
MP4StorageStyle("----:com.apple.iTunes:LANGUAGE"),
StorageStyle('LANGUAGE'),
ASFStorageStyle('WM/Language'),
)
country = MediaField(
MP3DescStorageStyle('MusicBrainz Album Release Country'),
MP4StorageStyle("----:com.apple.iTunes:MusicBrainz "
"Album Release Country"),
StorageStyle('RELEASECOUNTRY'),
ASFStorageStyle('MusicBrainz/Album Release Country'),
)
albumstatus = MediaField(
MP3DescStorageStyle('MusicBrainz Album Status'),
MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Status"),
StorageStyle('MUSICBRAINZ_ALBUMSTATUS'),
ASFStorageStyle('MusicBrainz/Album Status'),
)
media = MediaField(
MP3StorageStyle('TMED'),
MP4StorageStyle("----:com.apple.iTunes:MEDIA"),
StorageStyle('MEDIA'),
ASFStorageStyle('WM/Media'),
)
albumdisambig = MediaField(
# This tag mapping was invented for beets (not used by Picard, etc).
MP3DescStorageStyle('MusicBrainz Album Comment'),
MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Comment"),
StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'),
ASFStorageStyle('MusicBrainz/Album Comment'),
)
# Release date.
date = DateField(
MP3StorageStyle('TDRC'),
MP4StorageStyle("\xa9day"),
StorageStyle('DATE'),
ASFStorageStyle('WM/Year'),
year=(StorageStyle('YEAR'),))
year = date.year_field()
month = date.month_field()
day = date.day_field()
# *Original* release date.
original_date = DateField(
MP3StorageStyle('TDOR'),
MP4StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR'),
StorageStyle('ORIGINALDATE'),
ASFStorageStyle('WM/OriginalReleaseYear'))
original_year = original_date.year_field()
original_month = original_date.month_field()
original_day = original_date.day_field()
# Nonstandard metadata.
artist_credit = MediaField(
MP3DescStorageStyle('Artist Credit'),
MP4StorageStyle("----:com.apple.iTunes:Artist Credit"),
StorageStyle('ARTIST_CREDIT'),
ASFStorageStyle('beets/Artist Credit'),
)
albumartist_credit = MediaField(
MP3DescStorageStyle('Album Artist Credit'),
MP4StorageStyle("----:com.apple.iTunes:Album Artist Credit"),
StorageStyle('ALBUMARTIST_CREDIT'),
ASFStorageStyle('beets/Album Artist Credit'),
)
# Legacy album art field
art = CoverArtField()
# Image list
images = ImageListField()
# MusicBrainz IDs.
mb_trackid = MediaField(
MP3UFIDStorageStyle(owner='http://musicbrainz.org'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Track Id'),
StorageStyle('MUSICBRAINZ_TRACKID'),
ASFStorageStyle('MusicBrainz/Track Id'),
)
mb_albumid = MediaField(
MP3DescStorageStyle('MusicBrainz Album Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Id'),
StorageStyle('MUSICBRAINZ_ALBUMID'),
ASFStorageStyle('MusicBrainz/Album Id'),
)
mb_artistid = MediaField(
MP3DescStorageStyle('MusicBrainz Artist Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Artist Id'),
StorageStyle('MUSICBRAINZ_ARTISTID'),
ASFStorageStyle('MusicBrainz/Artist Id'),
)
mb_albumartistid = MediaField(
MP3DescStorageStyle('MusicBrainz Album Artist Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Artist Id'),
StorageStyle('MUSICBRAINZ_ALBUMARTISTID'),
ASFStorageStyle('MusicBrainz/Album Artist Id'),
)
mb_releasegroupid = MediaField(
MP3DescStorageStyle('MusicBrainz Release Group Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Group Id'),
StorageStyle('MUSICBRAINZ_RELEASEGROUPID'),
ASFStorageStyle('MusicBrainz/Release Group Id'),
)
# Acoustid fields.
acoustid_fingerprint = MediaField(
MP3DescStorageStyle('Acoustid Fingerprint'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Fingerprint'),
StorageStyle('ACOUSTID_FINGERPRINT'),
ASFStorageStyle('Acoustid/Fingerprint'),
)
acoustid_id = MediaField(
MP3DescStorageStyle('Acoustid Id'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Id'),
StorageStyle('ACOUSTID_ID'),
ASFStorageStyle('Acoustid/Id'),
)
# ReplayGain fields.
rg_track_gain = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=' dB'
),
MP3DescStorageStyle(
'replaygain_track_gain',
float_places=2, suffix=' dB'
),
MP3SoundCheckStorageStyle(
key='COMM',
index=0, desc='iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_gain',
float_places=2, suffix=b' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=0
),
StorageStyle(
'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=' dB'
),
ASFStorageStyle(
'replaygain_track_gain',
float_places=2, suffix=' dB'
),
out_type=float
)
rg_album_gain = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=' dB'
),
MP3DescStorageStyle(
'replaygain_album_gain',
float_places=2, suffix=' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle(
'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=' dB'
),
ASFStorageStyle(
'replaygain_album_gain',
float_places=2, suffix=' dB'
),
out_type=float
)
rg_track_peak = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_TRACK_PEAK',
float_places=6
),
MP3DescStorageStyle(
'replaygain_track_peak',
float_places=6
),
MP3SoundCheckStorageStyle(
key='COMM',
index=1, desc='iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_peak',
float_places=6
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle('REPLAYGAIN_TRACK_PEAK', float_places=6),
ASFStorageStyle('replaygain_track_peak', float_places=6),
out_type=float,
)
rg_album_peak = MediaField(
MP3DescStorageStyle(
'REPLAYGAIN_ALBUM_PEAK',
float_places=6
),
MP3DescStorageStyle(
'replaygain_album_peak',
float_places=6
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_peak',
float_places=6
),
StorageStyle('REPLAYGAIN_ALBUM_PEAK', float_places=6),
ASFStorageStyle('replaygain_album_peak', float_places=6),
out_type=float,
)
initial_key = MediaField(
MP3StorageStyle('TKEY'),
MP4StorageStyle('----:com.apple.iTunes:initialkey'),
StorageStyle('INITIALKEY'),
ASFStorageStyle('INITIALKEY'),
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if isinstance(self.mgfile.info, mutagen.mp3.MPEGInfo):
return {
mutagen.mp3.STEREO: 2,
mutagen.mp3.JOINTSTEREO: 2,
mutagen.mp3.DUALCHANNEL: 2,
mutagen.mp3.MONO: 1,
}[self.mgfile.info.mode]
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
size = os.path.getsize(self.path)
return int(size * 8 / self.length)
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
| en | 0.8037 | # This file is part of beets. # Copyright 2015, <NAME>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. Handles low-level interfacing for files' tags. Wraps Mutagen to automatically detect file types and provide a unified interface for a useful subset of music files' tags. Usage: >>> f = MediaFile('Lucy.mp3') >>> f.title 'Lucy in the Sky with Diamonds' >>> f.artist = 'The Beatles' >>> f.save() A field will always return a reasonable value of the correct type, even if no tag is present. If no value is available, the value will be false (e.g., zero or the empty string). Internally ``MediaFile`` uses ``MediaField`` descriptors to access the data from the tags. In turn ``MediaField`` uses a number of ``StorageStyle`` strategies to handle format specific logic. # Human-readable type names. # Exceptions. Mutagen is not able to extract information from the file. Reading this type of file is not supported. If passed the `mutagen_type` argument this indicates that the mutagen type is not supported by `Mediafile`. Raised when Mutagen fails unexpectedly---probably due to a bug. # Utility. Try to covert val to out_type but never raise an exception. If the value can't be converted, then a sensible default value is returned. out_type should be bool, int, or str; otherwise, the value is just passed through. # Just a number. # Process any other type as a string. # Get a number from the front of the string. # Should work for strings, bools, ints: # Image coding for ASF/WMA. Unpack image data from a WM/Picture tag. Return a tuple containing the MIME type, the raw image data, a type indicator, and the image's description. This function is treated as "untrusted" and could throw all manner of exceptions (out-of-bounds, etc.). We should clean this up sometime so that the failure modes are well-defined. Pack image data for a WM/Picture tag. # iTunes Sound Check encoding. Convert a Sound Check string value to a (gain, peak) tuple as used by ReplayGain. # SoundCheck tags consist of 10 numbers, each represented by 8 # characters of ASCII hex preceded by a space. # SoundCheck isn't in the format we expect, so return default # values. # SoundCheck stores absolute calculated/measured RMS value in an # unknown unit. We need to find the ratio of this measurement # compared to a reference value of 1000 to get our gain in dB. We # play it safe by using the larger of the two values (i.e., the most # attenuation). # Invalid gain value found. # SoundCheck stores peak values as the actual value of the sample, # and again separately for the left and right channels. We need to # convert this to a percentage of full scale, which is 32768 for a # 16 bit sample. Once again, we play it safe by using the larger of # the two values. Encode ReplayGain gain/peak values as a Sound Check string. # SoundCheck stores the peak value as the actual value of the # sample, rather than the percentage of full scale that RG uses, so # we do a simple conversion assuming 16 bit samples. # SoundCheck stores absolute RMS values in some unknown units rather # than the dB values RG uses. We can calculate these absolute values # from the gain ratio using a reference value of 1000 units. We also # enforce the maximum value here, which is equivalent to about # -18.2dB. # Same as above, except our reference level is 2500 units. # The purpose of these values are unknown, but they also seem to be # unused so we just use zero. # Cover art and other images. Return the MIME type of the image data (a bytestring). Indicates the kind of an `Image` stored in a file's tag. Structure representing image data and metadata that can be stored and retrieved from tags. The structure has four properties. * ``data`` The binary data of the image * ``desc`` An optional description of the image * ``type`` An instance of `ImageType` indicating the kind of image * ``mime_type`` Read-only property that contains the mime type of the binary data # This method is used when a tag format requires the type # index to be set, so we return "other" as the default value. # StorageStyle classes describe strategies for accessing values in # Mutagen file objects. A strategy for storing a value for a certain tag format (or set of tag formats). This basic StorageStyle describes simple 1:1 mapping from raw values to keys in a Mutagen file object; subclasses describe more sophisticated translations or format-specific access strategies. MediaFile uses a StorageStyle via three methods: ``get()``, ``set()``, and ``delete()``. It passes a Mutagen file object to each. Internally, the StorageStyle implements ``get()`` and ``set()`` using two steps that may be overridden by subtypes. To get a value, the StorageStyle first calls ``fetch()`` to retrieve the value corresponding to a key and then ``deserialize()`` to convert the raw Mutagen value to a consumable Python value. Similarly, to set a field, we call ``serialize()`` to encode the value and then ``store()`` to assign the result into the Mutagen object. Each StorageStyle type has a class-level `formats` attribute that is a list of strings indicating the formats that the style applies to. MediaFile only uses StorageStyles that apply to the correct type for a given audio file. List of mutagen classes the StorageStyle can handle. Create a basic storage strategy. Parameters: - `key`: The key on the Mutagen file object used to access the field's data. - `as_type`: The Python type that the value is stored as internally (`str`, `int`, `bool`, or `bytes`). - `suffix`: When `as_type` is a string type, append this before storing the value. - `float_places`: When the value is a floating-point number and encoded as a string, the number of digits to store after the decimal point. # Convert suffix to correct string type. # Getter. Get the value for the field using this style. Retrieve the raw value of for this tag from the Mutagen file object. Given a raw value stored on a Mutagen object, decode and return the represented value. # Setter. Assign the value for the field using this style. Store a serialized value in the Mutagen file object. Convert the external Python value to a type that is suitable for storing in a Mutagen file object. # Store bools as 1/0 instead of True/False. Remove the tag from the file. Abstract storage style that provides access to lists. The ListMediaField descriptor uses a ListStorageStyle via two methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file object to each. Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must return a (possibly empty) list and ``store`` receives a serialized list of values as the second argument. The `serialize` and `deserialize` methods (from the base `StorageStyle`) are still called with individual values. This class handles packing and unpacking the values into lists. Get the first value in the field's value list. Get a list of all values for the field using this style. Get the list of raw (serialized) values. Set an individual value as the only value for the field using this style. Set all values for the field using this style. `values` should be an iterable. Set the list of all raw (serialized) values for this field. A mixin for storage styles that read and write iTunes SoundCheck analysis values. The object must have an `index` field that indicates which half of the gain/peak pair---0 or 1---the field represents. A general storage style for Windows Media/ASF files. A general storage style for MPEG-4 tags. A style for storing values as part of a pair of numbers in an MPEG-4 file. # The values are always present and saved as integers. So we # assume that "0" indicates it is not set. A style for booleans in MPEG-4 files. (MPEG-4 has an atom type specifically for representing booleans.) Store images as MPEG-4 image atoms. Values are `Image` objects. Store data in ID3 frames. Create a new ID3 storage style. `id3_lang` is the value for the language field of newly created frames. Store lists of data in multiple ID3 frames. Store data in a UFID ID3 frame with a particular owner. # Replace existing frame data. # New frame. Store data in a TXXX (or similar) ID3 frame. The frame is selected based its ``desc`` field. # try modifying in place # need to make a new frame? Store value as part of pair that is serialized as a slash- separated string. # Do not store last value Converts between APIC frames and ``Image`` instances. The `get_list` method inherited from ``ListStorageStyle`` returns a list of ``Image``s. Similarly, the `set_list` method accepts a list of ``Image``s as its ``values`` argument. Convert APIC frame into Image. Return an APIC frame populated with data from ``image``. # UTF-8 encoding of desc Store images packed into Windows Media/ASF byte array attributes. Values are `Image` objects. Store images in Vorbis comments. Both legacy COVERART fields and modern METADATA_BLOCK_PICTURE tags are supported. Data is base64-encoded. Values are `Image` objects. # Try legacy COVERART tags. # Strip all art, including legacy COVERART. Turn a Image into a base64 encoded FLAC picture block. Converts between ``mutagen.flac.Picture`` and ``Image`` instances. ``pictures`` is a list of mutagen.flac.Picture instances. Turn a Image into a mutagen.flac.Picture. Remove all images from the file. Store images in APEv2 tags. Values are `Image` objects. Remove all images from the file. # MediaField is a descriptor that represents a single logical field. It # aggregates several StorageStyles describing how to access the data for # each file type. A descriptor providing access to a particular (abstract) metadata field. Creates a new MediaField. :param styles: `StorageStyle` instances that describe the strategy for reading and writing the field in particular formats. There must be at least one style for each possible file format. :param out_type: the type of the value that should be returned when getting this property. Yields the list of storage styles of this field that can handle the MediaFile's format. Get an appropriate "null" value for this field's type. This is used internally when setting the field to None. Property descriptor that retrieves a list of multiple values from a tag. Uses ``get_list`` and set_list`` methods of its ``StorageStyle`` strategies to do the actual work. Returns a ``MediaField`` descriptor that gets and sets the first item. Descriptor that handles serializing and deserializing dates The getter parses value from tags into a ``datetime.date`` instance and setter serializes such an instance into a string. For granular access to year, month, and day, use the ``*_field`` methods to create corresponding `DateItemField`s. ``date_styles`` is a list of ``StorageStyle``s to store and retrieve the whole date from. The ``year`` option is an additional list of fallback styles for the year. The year is always set on this style, but is only retrieved if the main storage styles do not return a value. # Out of range values. Get a 3-item sequence representing the date consisting of a year, month, and day number. Each number is either an integer or None. # Get the underlying data and split on hyphens and slashes. # Ensure that we have exactly 3 components, possibly by # truncating or padding. # Use year field if year is missing. # Convert each component to an integer if possible. Set the value of the field given a year, month, and day number. Each number can be an integer or None to indicate an unset component. Descriptor that gets and sets constituent parts of a `DateField`: the month, day, or year. A descriptor that provides access to the *raw image data* for the cover image on a file. This is used for backwards compatibility: the full `ImageListField` provides richer `Image` objects. When there are multiple images we try to pick the most likely to be a front cover. Descriptor to access the list of images embedded in tags. The getter returns a list of `Image` instances obtained from the tags. The setter accepts a list of `Image` instances to be written to the tags. # The storage styles used here must implement the # `ListStorageStyle` interface and get and set lists of # `Image`s. # MediaFile is a collection of fields. Represents a multimedia file on disk and provides access to its metadata. Constructs a new `MediaFile` reflecting the file at path. May throw `UnreadableFileError`. By default, MP3 files are saved with ID3v2.4 tags. You can use the older ID3v2.3 standard by specifying the `id3v23` option. # This is a base IOError, not a subclass from Mutagen or # anywhere else. # Isolate bugs in Mutagen. # Mutagen couldn't guess the type # This hack differentiates AAC and ALAC on versions of # Mutagen < 1.26. Once Mutagen > 1.26 is out and # required by beets, we can remove this. # Add a set of tags if it's missing. # Set the ID3v2.3 flag only for MP3s. Write the object's tags back to the file. # Possibly save the tags to ID3v2.3. # In case this is an MP3 object, not an ID3 object. # Isolate bugs in Mutagen. # Propagate these through: they don't represent Mutagen bugs. Remove the current metadata tag from the file. # For Mutagen types that don't support deletion (notably, # ASF), just delete each tag individually. # Convenient access to the set of available fields. Get the names of all writable properties that reflect metadata tags (i.e., those that are instances of :class:`MediaField`). Get all metadata fields: the writable ones from :meth:`fields` and also other audio properties. Add a field to store custom tags. :param name: the name of the property the field is accessed through. It must not already exist on this class. :param descriptor: an instance of :class:`MediaField`. Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. # Field definitions. # Traktor # This tag mapping was invented for beets (not used by Picard, etc). # Release date. # *Original* release date. # Nonstandard metadata. # Legacy album art field # Image list # MusicBrainz IDs. # Acoustid fields. # ReplayGain fields. The duration of the audio in seconds (a float). The audio's sample rate (an int). # Opus is always 48kHz internally. The number of bits per sample in the audio encoding (an int). Only available for certain file formats (zero where unavailable). The number of channels in the audio (an int). The number of bits per seconds used in the audio coding (an int). If this is provided explicitly by the compressed file format, this is a precise reflection of the encoding. Otherwise, it is estimated from the on-disk file size. In this case, some imprecision is possible because the file header is incorporated in the file size. # Many formats provide it explicitly. # Otherwise, we calculate bitrate from the file size. (This # is the case for all of the lossless formats.) # Avoid division by zero if length is not available. A string describing the file format/codec. | 2.221781 | 2 |
test/container.py | fjudith/microservices-demo-orders | 1 | 6625222 | <reponame>fjudith/microservices-demo-orders
import argparse
import sys
import unittest
import os
import urllib
from util.Api import Api
from time import sleep
from util.Docker import Docker
from util.Dredd import Dredd
class ServiceMock:
container_name = ''
hostname = ''
def start_container(self):
command = ['docker', 'run', '-d',
'--name', self.container_name,
'-h', self.container_name,
'-v', "{0}:{1}".format(os.getcwd(), "/data/"),
'-e', 'FLASK_APP=/data/test/json-server/server.py',
'weaveworksdemos/json-server',
'--port', '80']
Docker().execute(command)
sleep(2)
def cleanup(self):
Docker().kill_and_remove(self.container_name)
def __init__(self, container_name, hostname):
self.container_name = container_name
self.hostname = hostname
class OrdersContainerTest(unittest.TestCase):
TAG = "latest"
COMMIT = ""
container_name = Docker().random_container_name('orders')
mongo_container_name = Docker().random_container_name('orders-db')
def __init__(self, methodName='runTest'):
super(OrdersContainerTest, self).__init__(methodName)
self.users_mock = ServiceMock("users-orders-mock", "users-orders-mock")
self.payment_mock = ServiceMock("payment", "payment")
self.shipping_mock = ServiceMock("shipping", "shipping")
self.ip = ""
def setUp(self):
self.users_mock.start_container()
self.payment_mock.start_container()
self.shipping_mock.start_container()
Docker().start_container(container_name=self.mongo_container_name, image="mongo", host="orders-db")
command = ['docker', 'run',
'-d',
'--name', OrdersContainerTest.container_name,
'-h', OrdersContainerTest.container_name,
'--link',
OrdersContainerTest.mongo_container_name,
'--link',
self.users_mock.container_name,
'--link',
self.payment_mock.container_name,
'--link',
self.shipping_mock.container_name,
OrdersContainerTest.GROUP + '/orders:' + self.COMMIT]
Docker().execute(command, dump_streams=True)
self.ip = Docker().get_container_ip(OrdersContainerTest.container_name)
def tearDown(self):
Docker().kill_and_remove(OrdersContainerTest.container_name)
Docker().kill_and_remove(OrdersContainerTest.mongo_container_name)
self.users_mock.cleanup()
self.payment_mock.cleanup()
self.shipping_mock.cleanup()
def test_api_validated(self):
limit = 30
while Api().noResponse('http://' + self.ip + ':80/orders'):
if limit == 0:
self.fail("Couldn't get the API running")
limit = limit - 1
sleep(1)
out = Dredd().test_against_endpoint(
"orders", 'http://' + self.ip + ':80/',
links=[self.mongo_container_name, self.container_name],
env=[("MONGO_ENDPOINT", "mongodb://orders-db:27017/data")],
dump_streams=True)
self.assertGreater(out.find("0 failing"), -1)
self.assertGreater(out.find("0 errors"), -1)
print(out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
default_tag = "latest"
parser.add_argument('--tag', default=default_tag, help='The tag of the image to use. (default: latest)')
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
OrdersContainerTest.TAG = args.tag
if OrdersContainerTest.TAG == "":
OrdersContainerTest.TAG = default_tag
OrdersContainerTest.COMMIT = os.environ["COMMIT"]
OrdersContainerTest.GROUP = os.environ["GROUP"]
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args.unittest_args
unittest.main()
| import argparse
import sys
import unittest
import os
import urllib
from util.Api import Api
from time import sleep
from util.Docker import Docker
from util.Dredd import Dredd
class ServiceMock:
container_name = ''
hostname = ''
def start_container(self):
command = ['docker', 'run', '-d',
'--name', self.container_name,
'-h', self.container_name,
'-v', "{0}:{1}".format(os.getcwd(), "/data/"),
'-e', 'FLASK_APP=/data/test/json-server/server.py',
'weaveworksdemos/json-server',
'--port', '80']
Docker().execute(command)
sleep(2)
def cleanup(self):
Docker().kill_and_remove(self.container_name)
def __init__(self, container_name, hostname):
self.container_name = container_name
self.hostname = hostname
class OrdersContainerTest(unittest.TestCase):
TAG = "latest"
COMMIT = ""
container_name = Docker().random_container_name('orders')
mongo_container_name = Docker().random_container_name('orders-db')
def __init__(self, methodName='runTest'):
super(OrdersContainerTest, self).__init__(methodName)
self.users_mock = ServiceMock("users-orders-mock", "users-orders-mock")
self.payment_mock = ServiceMock("payment", "payment")
self.shipping_mock = ServiceMock("shipping", "shipping")
self.ip = ""
def setUp(self):
self.users_mock.start_container()
self.payment_mock.start_container()
self.shipping_mock.start_container()
Docker().start_container(container_name=self.mongo_container_name, image="mongo", host="orders-db")
command = ['docker', 'run',
'-d',
'--name', OrdersContainerTest.container_name,
'-h', OrdersContainerTest.container_name,
'--link',
OrdersContainerTest.mongo_container_name,
'--link',
self.users_mock.container_name,
'--link',
self.payment_mock.container_name,
'--link',
self.shipping_mock.container_name,
OrdersContainerTest.GROUP + '/orders:' + self.COMMIT]
Docker().execute(command, dump_streams=True)
self.ip = Docker().get_container_ip(OrdersContainerTest.container_name)
def tearDown(self):
Docker().kill_and_remove(OrdersContainerTest.container_name)
Docker().kill_and_remove(OrdersContainerTest.mongo_container_name)
self.users_mock.cleanup()
self.payment_mock.cleanup()
self.shipping_mock.cleanup()
def test_api_validated(self):
limit = 30
while Api().noResponse('http://' + self.ip + ':80/orders'):
if limit == 0:
self.fail("Couldn't get the API running")
limit = limit - 1
sleep(1)
out = Dredd().test_against_endpoint(
"orders", 'http://' + self.ip + ':80/',
links=[self.mongo_container_name, self.container_name],
env=[("MONGO_ENDPOINT", "mongodb://orders-db:27017/data")],
dump_streams=True)
self.assertGreater(out.find("0 failing"), -1)
self.assertGreater(out.find("0 errors"), -1)
print(out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
default_tag = "latest"
parser.add_argument('--tag', default=default_tag, help='The tag of the image to use. (default: latest)')
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
OrdersContainerTest.TAG = args.tag
if OrdersContainerTest.TAG == "":
OrdersContainerTest.TAG = default_tag
OrdersContainerTest.COMMIT = os.environ["COMMIT"]
OrdersContainerTest.GROUP = os.environ["GROUP"]
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args.unittest_args
unittest.main() | en | 0.44491 | # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) | 2.19538 | 2 |
webapp/api/api/forms.py | susheel/MedCATtrainer | 0 | 6625223 | from django.db.models.signals import post_save
from django.dispatch import receiver
from .data_utils import *
# Extract text from the uploaded dataset
@receiver(post_save, sender=Dataset)
def save_dataset(sender, instance, **kwargs):
text_from_csv(instance)
| from django.db.models.signals import post_save
from django.dispatch import receiver
from .data_utils import *
# Extract text from the uploaded dataset
@receiver(post_save, sender=Dataset)
def save_dataset(sender, instance, **kwargs):
text_from_csv(instance)
| en | 0.985072 | # Extract text from the uploaded dataset | 1.877985 | 2 |
evogtk/gui/shortcuts.py | R3v1L/evogtk | 0 | 6625224 | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <<EMAIL>>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# shortcuts
# Shortcuts helper class
###############################################################################
# TODO: Add methods for deleting a shortcut, clear shortcuts for a widget or all, and disconnnect a widget
# GTK Imports
import gtk
class ShortcutsHelper(object):
"""
Shortcuts helper class
"""
def __init__(self,gui_instance):
"""
Class constructor
"""
self.__gui_instance=gui_instance
self.__shortcutlist={}
self.__connected_widgets={}
def __shortcuts_handler(self,widget,event):
"""
Handles the shortcuts
"""
# Check widget
if self.__shortcutlist.has_key(widget):
shift=bool(event.state & gtk.gdk.SHIFT_MASK)
ctrl=bool(event.state & gtk.gdk.CONTROL_MASK)
alt=bool(event.state & gtk.gdk.MOD1_MASK)
# Check shortcut
if self.__shortcutlist[widget].has_key((event.keyval,shift,ctrl,alt)):
callback,task_list,pass_shortcut,user_params=self.__shortcutlist[widget][(event.keyval,shift,ctrl,alt)]
# Check task list
if not task_list or (self.__gui_instance.get_gui_task() in task_list):
# Check if we have to pass any params to callback and call it
if pass_shortcut:
return self.__shortcutlist[widget][(event.keyval,shift,ctrl,alt)][0](widget,event,(event.keyval,shift,ctrl,alt,task_list),**user_params)
else:
return self.__shortcutlist[widget][(event.keyval,shift,ctrl,alt)][0](widget,event,**user_params)
def bind_shortcut(self,widget,callback,key,shift=False,ctrl=False,alt=False,task_list=None,pass_shortcut=True,**user_params):
"""
Binds a shortcut to a given widget
"""
# Create widget shortcut dict
if not self.__shortcutlist.has_key(widget):
self.__shortcutlist[widget]={}
# Connect window key-press-event to shortcuts handler
if not self.__connected_widgets.has_key(widget):
self.__connected_widgets[widget]=widget.connect('key-press-event',self.__shortcuts_handler)
# Add shortcut to widget
self.__shortcutlist[widget][(key,shift,ctrl,alt)]=(callback,task_list,pass_shortcut,user_params)
| # -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <<EMAIL>>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# shortcuts
# Shortcuts helper class
###############################################################################
# TODO: Add methods for deleting a shortcut, clear shortcuts for a widget or all, and disconnnect a widget
# GTK Imports
import gtk
class ShortcutsHelper(object):
"""
Shortcuts helper class
"""
def __init__(self,gui_instance):
"""
Class constructor
"""
self.__gui_instance=gui_instance
self.__shortcutlist={}
self.__connected_widgets={}
def __shortcuts_handler(self,widget,event):
"""
Handles the shortcuts
"""
# Check widget
if self.__shortcutlist.has_key(widget):
shift=bool(event.state & gtk.gdk.SHIFT_MASK)
ctrl=bool(event.state & gtk.gdk.CONTROL_MASK)
alt=bool(event.state & gtk.gdk.MOD1_MASK)
# Check shortcut
if self.__shortcutlist[widget].has_key((event.keyval,shift,ctrl,alt)):
callback,task_list,pass_shortcut,user_params=self.__shortcutlist[widget][(event.keyval,shift,ctrl,alt)]
# Check task list
if not task_list or (self.__gui_instance.get_gui_task() in task_list):
# Check if we have to pass any params to callback and call it
if pass_shortcut:
return self.__shortcutlist[widget][(event.keyval,shift,ctrl,alt)][0](widget,event,(event.keyval,shift,ctrl,alt,task_list),**user_params)
else:
return self.__shortcutlist[widget][(event.keyval,shift,ctrl,alt)][0](widget,event,**user_params)
def bind_shortcut(self,widget,callback,key,shift=False,ctrl=False,alt=False,task_list=None,pass_shortcut=True,**user_params):
"""
Binds a shortcut to a given widget
"""
# Create widget shortcut dict
if not self.__shortcutlist.has_key(widget):
self.__shortcutlist[widget]={}
# Connect window key-press-event to shortcuts handler
if not self.__connected_widgets.has_key(widget):
self.__connected_widgets[widget]=widget.connect('key-press-event',self.__shortcuts_handler)
# Add shortcut to widget
self.__shortcutlist[widget][(key,shift,ctrl,alt)]=(callback,task_list,pass_shortcut,user_params)
| en | 0.540113 | # -*- coding: utf-8 -*- ############################################################################### # Copyright (C) 2008 EVO Sistemas Libres <<EMAIL>> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################### # shortcuts # Shortcuts helper class ############################################################################### # TODO: Add methods for deleting a shortcut, clear shortcuts for a widget or all, and disconnnect a widget # GTK Imports Shortcuts helper class Class constructor Handles the shortcuts # Check widget # Check shortcut # Check task list # Check if we have to pass any params to callback and call it Binds a shortcut to a given widget # Create widget shortcut dict # Connect window key-press-event to shortcuts handler # Add shortcut to widget | 1.598224 | 2 |
sdk/python/pulumi_aws/ec2/network_acl_rule.py | dixler/pulumi-aws | 0 | 6625225 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class NetworkAclRule(pulumi.CustomResource):
cidr_block: pulumi.Output[str]
"""
The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
"""
egress: pulumi.Output[bool]
"""
Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
"""
from_port: pulumi.Output[float]
"""
The from port to match.
"""
icmp_code: pulumi.Output[str]
"""
ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
"""
icmp_type: pulumi.Output[str]
"""
ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
"""
ipv6_cidr_block: pulumi.Output[str]
"""
The IPv6 CIDR block to allow or deny.
"""
network_acl_id: pulumi.Output[str]
"""
The ID of the network ACL.
"""
protocol: pulumi.Output[str]
"""
The protocol. A value of -1 means all protocols.
"""
rule_action: pulumi.Output[str]
"""
Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
"""
rule_number: pulumi.Output[float]
"""
The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
"""
to_port: pulumi.Output[float]
"""
The to port to match.
"""
def __init__(__self__, resource_name, opts=None, cidr_block=None, egress=None, from_port=None, icmp_code=None, icmp_type=None, ipv6_cidr_block=None, network_acl_id=None, protocol=None, rule_action=None, rule_number=None, to_port=None, __props__=None, __name__=None, __opts__=None):
"""
Creates an entry (a rule) in a network ACL with the specified rule number.
> **NOTE on Network ACLs and Network ACL Rules:** This provider currently
provides both a standalone Network ACL Rule resource and a Network ACL resource with rules
defined in-line. At this time you cannot use a Network ACL with in-line rules
in conjunction with any Network ACL Rule resources. Doing so will cause
a conflict of rule settings and will overwrite rules.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
:param pulumi.Input[bool] egress: Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
:param pulumi.Input[float] from_port: The from port to match.
:param pulumi.Input[str] icmp_code: ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] icmp_type: ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] ipv6_cidr_block: The IPv6 CIDR block to allow or deny.
:param pulumi.Input[str] network_acl_id: The ID of the network ACL.
:param pulumi.Input[str] protocol: The protocol. A value of -1 means all protocols.
:param pulumi.Input[str] rule_action: Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
:param pulumi.Input[float] rule_number: The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
:param pulumi.Input[float] to_port: The to port to match.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/network_acl_rule.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['cidr_block'] = cidr_block
__props__['egress'] = egress
__props__['from_port'] = from_port
__props__['icmp_code'] = icmp_code
__props__['icmp_type'] = icmp_type
__props__['ipv6_cidr_block'] = ipv6_cidr_block
if network_acl_id is None:
raise TypeError("Missing required property 'network_acl_id'")
__props__['network_acl_id'] = network_acl_id
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if rule_action is None:
raise TypeError("Missing required property 'rule_action'")
__props__['rule_action'] = rule_action
if rule_number is None:
raise TypeError("Missing required property 'rule_number'")
__props__['rule_number'] = rule_number
__props__['to_port'] = to_port
super(NetworkAclRule, __self__).__init__(
'aws:ec2/networkAclRule:NetworkAclRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, cidr_block=None, egress=None, from_port=None, icmp_code=None, icmp_type=None, ipv6_cidr_block=None, network_acl_id=None, protocol=None, rule_action=None, rule_number=None, to_port=None):
"""
Get an existing NetworkAclRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
:param pulumi.Input[bool] egress: Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
:param pulumi.Input[float] from_port: The from port to match.
:param pulumi.Input[str] icmp_code: ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] icmp_type: ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] ipv6_cidr_block: The IPv6 CIDR block to allow or deny.
:param pulumi.Input[str] network_acl_id: The ID of the network ACL.
:param pulumi.Input[str] protocol: The protocol. A value of -1 means all protocols.
:param pulumi.Input[str] rule_action: Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
:param pulumi.Input[float] rule_number: The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
:param pulumi.Input[float] to_port: The to port to match.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/network_acl_rule.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cidr_block"] = cidr_block
__props__["egress"] = egress
__props__["from_port"] = from_port
__props__["icmp_code"] = icmp_code
__props__["icmp_type"] = icmp_type
__props__["ipv6_cidr_block"] = ipv6_cidr_block
__props__["network_acl_id"] = network_acl_id
__props__["protocol"] = protocol
__props__["rule_action"] = rule_action
__props__["rule_number"] = rule_number
__props__["to_port"] = to_port
return NetworkAclRule(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class NetworkAclRule(pulumi.CustomResource):
cidr_block: pulumi.Output[str]
"""
The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
"""
egress: pulumi.Output[bool]
"""
Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
"""
from_port: pulumi.Output[float]
"""
The from port to match.
"""
icmp_code: pulumi.Output[str]
"""
ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
"""
icmp_type: pulumi.Output[str]
"""
ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
"""
ipv6_cidr_block: pulumi.Output[str]
"""
The IPv6 CIDR block to allow or deny.
"""
network_acl_id: pulumi.Output[str]
"""
The ID of the network ACL.
"""
protocol: pulumi.Output[str]
"""
The protocol. A value of -1 means all protocols.
"""
rule_action: pulumi.Output[str]
"""
Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
"""
rule_number: pulumi.Output[float]
"""
The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
"""
to_port: pulumi.Output[float]
"""
The to port to match.
"""
def __init__(__self__, resource_name, opts=None, cidr_block=None, egress=None, from_port=None, icmp_code=None, icmp_type=None, ipv6_cidr_block=None, network_acl_id=None, protocol=None, rule_action=None, rule_number=None, to_port=None, __props__=None, __name__=None, __opts__=None):
"""
Creates an entry (a rule) in a network ACL with the specified rule number.
> **NOTE on Network ACLs and Network ACL Rules:** This provider currently
provides both a standalone Network ACL Rule resource and a Network ACL resource with rules
defined in-line. At this time you cannot use a Network ACL with in-line rules
in conjunction with any Network ACL Rule resources. Doing so will cause
a conflict of rule settings and will overwrite rules.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
:param pulumi.Input[bool] egress: Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
:param pulumi.Input[float] from_port: The from port to match.
:param pulumi.Input[str] icmp_code: ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] icmp_type: ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] ipv6_cidr_block: The IPv6 CIDR block to allow or deny.
:param pulumi.Input[str] network_acl_id: The ID of the network ACL.
:param pulumi.Input[str] protocol: The protocol. A value of -1 means all protocols.
:param pulumi.Input[str] rule_action: Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
:param pulumi.Input[float] rule_number: The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
:param pulumi.Input[float] to_port: The to port to match.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/network_acl_rule.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['cidr_block'] = cidr_block
__props__['egress'] = egress
__props__['from_port'] = from_port
__props__['icmp_code'] = icmp_code
__props__['icmp_type'] = icmp_type
__props__['ipv6_cidr_block'] = ipv6_cidr_block
if network_acl_id is None:
raise TypeError("Missing required property 'network_acl_id'")
__props__['network_acl_id'] = network_acl_id
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if rule_action is None:
raise TypeError("Missing required property 'rule_action'")
__props__['rule_action'] = rule_action
if rule_number is None:
raise TypeError("Missing required property 'rule_number'")
__props__['rule_number'] = rule_number
__props__['to_port'] = to_port
super(NetworkAclRule, __self__).__init__(
'aws:ec2/networkAclRule:NetworkAclRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, cidr_block=None, egress=None, from_port=None, icmp_code=None, icmp_type=None, ipv6_cidr_block=None, network_acl_id=None, protocol=None, rule_action=None, rule_number=None, to_port=None):
"""
Get an existing NetworkAclRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
:param pulumi.Input[bool] egress: Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
:param pulumi.Input[float] from_port: The from port to match.
:param pulumi.Input[str] icmp_code: ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] icmp_type: ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
:param pulumi.Input[str] ipv6_cidr_block: The IPv6 CIDR block to allow or deny.
:param pulumi.Input[str] network_acl_id: The ID of the network ACL.
:param pulumi.Input[str] protocol: The protocol. A value of -1 means all protocols.
:param pulumi.Input[str] rule_action: Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
:param pulumi.Input[float] rule_number: The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
:param pulumi.Input[float] to_port: The to port to match.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/network_acl_rule.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cidr_block"] = cidr_block
__props__["egress"] = egress
__props__["from_port"] = from_port
__props__["icmp_code"] = icmp_code
__props__["icmp_type"] = icmp_type
__props__["ipv6_cidr_block"] = ipv6_cidr_block
__props__["network_acl_id"] = network_acl_id
__props__["protocol"] = protocol
__props__["rule_action"] = rule_action
__props__["rule_number"] = rule_number
__props__["to_port"] = to_port
return NetworkAclRule(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | en | 0.680281 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ). Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`. The from port to match. ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1 ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1 The IPv6 CIDR block to allow or deny. The ID of the network ACL. The protocol. A value of -1 means all protocols. Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny` The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number. The to port to match. Creates an entry (a rule) in a network ACL with the specified rule number. > **NOTE on Network ACLs and Network ACL Rules:** This provider currently provides both a standalone Network ACL Rule resource and a Network ACL resource with rules defined in-line. At this time you cannot use a Network ACL with in-line rules in conjunction with any Network ACL Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] cidr_block: The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ). :param pulumi.Input[bool] egress: Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`. :param pulumi.Input[float] from_port: The from port to match. :param pulumi.Input[str] icmp_code: ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1 :param pulumi.Input[str] icmp_type: ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1 :param pulumi.Input[str] ipv6_cidr_block: The IPv6 CIDR block to allow or deny. :param pulumi.Input[str] network_acl_id: The ID of the network ACL. :param pulumi.Input[str] protocol: The protocol. A value of -1 means all protocols. :param pulumi.Input[str] rule_action: Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny` :param pulumi.Input[float] rule_number: The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number. :param pulumi.Input[float] to_port: The to port to match. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/network_acl_rule.html.markdown. Get an existing NetworkAclRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] cidr_block: The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ). :param pulumi.Input[bool] egress: Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`. :param pulumi.Input[float] from_port: The from port to match. :param pulumi.Input[str] icmp_code: ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1 :param pulumi.Input[str] icmp_type: ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1 :param pulumi.Input[str] ipv6_cidr_block: The IPv6 CIDR block to allow or deny. :param pulumi.Input[str] network_acl_id: The ID of the network ACL. :param pulumi.Input[str] protocol: The protocol. A value of -1 means all protocols. :param pulumi.Input[str] rule_action: Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny` :param pulumi.Input[float] rule_number: The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number. :param pulumi.Input[float] to_port: The to port to match. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/network_acl_rule.html.markdown. | 1.857607 | 2 |
app/email.py | itsuprun/db_coursework | 0 | 6625226 | <gh_stars>0
from flask_mail import Message
from app import app,mail
from threading import Thread
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = <PASSWORD>password_<PASSWORD>()
send_email('[Moorodool] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
def send_contact_form_email(name, email, text):
send_email('Need help in smth',
sender=app.config['ADMINS'][0],
recipients=['<EMAIL>'],
text_body=render_template('email/contact.txt',
name = name,email = email, message = text),
html_body=render_template('email/contact.html',
name = name,email = email, message = text)) | from flask_mail import Message
from app import app,mail
from threading import Thread
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = <PASSWORD>password_<PASSWORD>()
send_email('[Moorodool] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
def send_contact_form_email(name, email, text):
send_email('Need help in smth',
sender=app.config['ADMINS'][0],
recipients=['<EMAIL>'],
text_body=render_template('email/contact.txt',
name = name,email = email, message = text),
html_body=render_template('email/contact.html',
name = name,email = email, message = text)) | none | 1 | 2.606474 | 3 | |
pyscripts/preprocess/stanford_scenes_down_sample.py | Twofyw/Adversarial_Structure_Matching | 11 | 6625227 | """Helper scripts to down-sample Stanford 2D3DS dataset.
"""
import os
import argparse
import PIL.Image as Image
import numpy as np
import cv2
def parse_args():
"""Parsse Command Line Arguments.
"""
parser = argparse.ArgumentParser(
description='Helper scripts to down-sample Stanford 2D3DS')
parser.add_argument('--data_dir', type=str,
help='/path/to/Stanford/2D3DS/dir.')
parser.add_argument('--new_dir', type=str,
help='/path/to/down-sampled/Stanford/2D3DS/dir.')
return parser.parse_args()
def main():
"""Down-sample RGB and Surface Normal.
"""
args = parse_args()
dir_names = ['area_1', 'area_2', 'area_3', 'area_4',
'area_5a', 'area_5b', 'area_6']
for root_dir_name in dir_names:
for sub_dir_name in ['rgb', 'normal']:
dir_name = os.path.join(args.data_dir,
root_dir_name,
'data',
sub_dir_name)
for dirpath, dirnames, filenames in os.walk(dir_name):
for file_name in filenames:
if '.png' not in file_name and '.jpg' not in file_name:
continue
arr = np.array(Image.open(os.path.join(dirpath, file_name)))
h, w = arr.shape[:2]
new_h, new_w = int(h/2), int(w/2)
if 'rgb' == sub_dir_name:
arr = cv2.resize(arr,
(new_w,new_h),
interpolation=cv2.INTER_LINEAR)
else:
arr = cv2.resize(arr,
(new_w,new_h),
interpolation=cv2.INTER_NEAREST)
new_dir = dirpath.replace(args.data_dir, args.new_dir)
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
new_name = os.path.join(new_dir, file_name)
Image.fromarray(arr, mode='RGB').save(new_name)
if __name__ == '__main__':
main()
| """Helper scripts to down-sample Stanford 2D3DS dataset.
"""
import os
import argparse
import PIL.Image as Image
import numpy as np
import cv2
def parse_args():
"""Parsse Command Line Arguments.
"""
parser = argparse.ArgumentParser(
description='Helper scripts to down-sample Stanford 2D3DS')
parser.add_argument('--data_dir', type=str,
help='/path/to/Stanford/2D3DS/dir.')
parser.add_argument('--new_dir', type=str,
help='/path/to/down-sampled/Stanford/2D3DS/dir.')
return parser.parse_args()
def main():
"""Down-sample RGB and Surface Normal.
"""
args = parse_args()
dir_names = ['area_1', 'area_2', 'area_3', 'area_4',
'area_5a', 'area_5b', 'area_6']
for root_dir_name in dir_names:
for sub_dir_name in ['rgb', 'normal']:
dir_name = os.path.join(args.data_dir,
root_dir_name,
'data',
sub_dir_name)
for dirpath, dirnames, filenames in os.walk(dir_name):
for file_name in filenames:
if '.png' not in file_name and '.jpg' not in file_name:
continue
arr = np.array(Image.open(os.path.join(dirpath, file_name)))
h, w = arr.shape[:2]
new_h, new_w = int(h/2), int(w/2)
if 'rgb' == sub_dir_name:
arr = cv2.resize(arr,
(new_w,new_h),
interpolation=cv2.INTER_LINEAR)
else:
arr = cv2.resize(arr,
(new_w,new_h),
interpolation=cv2.INTER_NEAREST)
new_dir = dirpath.replace(args.data_dir, args.new_dir)
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
new_name = os.path.join(new_dir, file_name)
Image.fromarray(arr, mode='RGB').save(new_name)
if __name__ == '__main__':
main()
| en | 0.615878 | Helper scripts to down-sample Stanford 2D3DS dataset. Parsse Command Line Arguments. Down-sample RGB and Surface Normal. | 2.784556 | 3 |
Incident-Response/Tools/cyphon/cyphon/contexts/serializers.py | sn0b4ll/Incident-Playbook | 1 | 6625228 | <reponame>sn0b4ll/Incident-Playbook<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Serializers for Contexts.
"""
# third party
from rest_framework import serializers
# local
from .models import Context, ContextFilter
from distilleries.models import Distillery
class ContextFilterSerializer(serializers.ModelSerializer):
"""
Serializer for ContextFilters.
"""
class Meta:
model = ContextFilter
fields = (
'id',
'context',
'search_field',
'operator',
'operator_text',
'value_field',
'url',
)
class ContextDistillerySerializer(serializers.ModelSerializer):
"""Serializer for a |Distillery| list."""
id = serializers.ReadOnlyField(source='pk') # pylint: disable=C0103
name = serializers.CharField(source='__str__')
class Meta(object):
"""Metadata options."""
model = Distillery
depth = 1
fields = (
'id',
'name',
'url',
)
class ContextSerializer(serializers.ModelSerializer):
"""
Serializer for Contexts.
"""
primary_distillery = ContextDistillerySerializer()
related_distillery = ContextDistillerySerializer()
filters = ContextFilterSerializer(many=True)
class Meta:
model = Context
fields = (
'id',
'name',
'primary_distillery',
'related_distillery',
'before_time_interval',
'before_time_unit',
'after_time_interval',
'after_time_unit',
'filters',
'filter_logic',
'url',
)
| # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Serializers for Contexts.
"""
# third party
from rest_framework import serializers
# local
from .models import Context, ContextFilter
from distilleries.models import Distillery
class ContextFilterSerializer(serializers.ModelSerializer):
"""
Serializer for ContextFilters.
"""
class Meta:
model = ContextFilter
fields = (
'id',
'context',
'search_field',
'operator',
'operator_text',
'value_field',
'url',
)
class ContextDistillerySerializer(serializers.ModelSerializer):
"""Serializer for a |Distillery| list."""
id = serializers.ReadOnlyField(source='pk') # pylint: disable=C0103
name = serializers.CharField(source='__str__')
class Meta(object):
"""Metadata options."""
model = Distillery
depth = 1
fields = (
'id',
'name',
'url',
)
class ContextSerializer(serializers.ModelSerializer):
"""
Serializer for Contexts.
"""
primary_distillery = ContextDistillerySerializer()
related_distillery = ContextDistillerySerializer()
filters = ContextFilterSerializer(many=True)
class Meta:
model = Context
fields = (
'id',
'name',
'primary_distillery',
'related_distillery',
'before_time_interval',
'before_time_unit',
'after_time_interval',
'after_time_unit',
'filters',
'filter_logic',
'url',
) | en | 0.830032 | # -*- coding: utf-8 -*- # Copyright 2017-2019 ControlScan, Inc. # # This file is part of Cyphon Engine. # # Cyphon Engine is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # Cyphon Engine is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>. Serializers for Contexts. # third party # local Serializer for ContextFilters. Serializer for a |Distillery| list. # pylint: disable=C0103 Metadata options. Serializer for Contexts. | 1.85911 | 2 |
authentik/core/tests/test_applications_views.py | BeryJu/passbook | 15 | 6625229 | <reponame>BeryJu/passbook
"""Test Applications API"""
from unittest.mock import MagicMock, patch
from django.urls import reverse
from authentik.core.models import Application
from authentik.core.tests.utils import create_test_admin_user, create_test_tenant
from authentik.flows.models import Flow, FlowDesignation
from authentik.flows.tests import FlowTestCase
from authentik.tenants.models import Tenant
class TestApplicationsViews(FlowTestCase):
"""Test applications Views"""
def setUp(self) -> None:
self.user = create_test_admin_user()
self.allowed = Application.objects.create(
name="allowed", slug="allowed", meta_launch_url="https://goauthentik.io/%(username)s"
)
def test_check_redirect(self):
"""Test redirect"""
empty_flow = Flow.objects.create(
name="foo",
slug="foo",
designation=FlowDesignation.AUTHENTICATION,
)
tenant: Tenant = create_test_tenant()
tenant.flow_authentication = empty_flow
tenant.save()
response = self.client.get(
reverse(
"authentik_core:application-launch",
kwargs={"application_slug": self.allowed.slug},
),
follow=True,
)
self.assertEqual(response.status_code, 200)
with patch(
"authentik.flows.stage.StageView.get_pending_user", MagicMock(return_value=self.user)
):
response = self.client.post(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": empty_flow.slug})
)
self.assertEqual(response.status_code, 200)
self.assertStageRedirects(response, f"https://goauthentik.io/{self.user.username}")
def test_check_redirect_auth(self):
"""Test redirect"""
self.client.force_login(self.user)
empty_flow = Flow.objects.create(
name="foo",
slug="foo",
designation=FlowDesignation.AUTHENTICATION,
)
tenant: Tenant = create_test_tenant()
tenant.flow_authentication = empty_flow
tenant.save()
response = self.client.get(
reverse(
"authentik_core:application-launch",
kwargs={"application_slug": self.allowed.slug},
),
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f"https://goauthentik.io/{self.user.username}")
| """Test Applications API"""
from unittest.mock import MagicMock, patch
from django.urls import reverse
from authentik.core.models import Application
from authentik.core.tests.utils import create_test_admin_user, create_test_tenant
from authentik.flows.models import Flow, FlowDesignation
from authentik.flows.tests import FlowTestCase
from authentik.tenants.models import Tenant
class TestApplicationsViews(FlowTestCase):
"""Test applications Views"""
def setUp(self) -> None:
self.user = create_test_admin_user()
self.allowed = Application.objects.create(
name="allowed", slug="allowed", meta_launch_url="https://goauthentik.io/%(username)s"
)
def test_check_redirect(self):
"""Test redirect"""
empty_flow = Flow.objects.create(
name="foo",
slug="foo",
designation=FlowDesignation.AUTHENTICATION,
)
tenant: Tenant = create_test_tenant()
tenant.flow_authentication = empty_flow
tenant.save()
response = self.client.get(
reverse(
"authentik_core:application-launch",
kwargs={"application_slug": self.allowed.slug},
),
follow=True,
)
self.assertEqual(response.status_code, 200)
with patch(
"authentik.flows.stage.StageView.get_pending_user", MagicMock(return_value=self.user)
):
response = self.client.post(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": empty_flow.slug})
)
self.assertEqual(response.status_code, 200)
self.assertStageRedirects(response, f"https://goauthentik.io/{self.user.username}")
def test_check_redirect_auth(self):
"""Test redirect"""
self.client.force_login(self.user)
empty_flow = Flow.objects.create(
name="foo",
slug="foo",
designation=FlowDesignation.AUTHENTICATION,
)
tenant: Tenant = create_test_tenant()
tenant.flow_authentication = empty_flow
tenant.save()
response = self.client.get(
reverse(
"authentik_core:application-launch",
kwargs={"application_slug": self.allowed.slug},
),
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f"https://goauthentik.io/{self.user.username}") | en | 0.668988 | Test Applications API Test applications Views Test redirect Test redirect | 2.257434 | 2 |
bin/cpu_monitor.py | lavarock1234/ros-system-monitor | 0 | 6625230 | #!/usr/bin/env python3
############################################################################
# Copyright (C) 2009, <NAME>, Inc. #
# Copyright (C) 2013 by <NAME> #
# <EMAIL> #
# Copyright (C) 2013 by <NAME> #
# <EMAIL> #
# #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# 3. The name of the copyright holders may be used to endorse or #
# promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL as eXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE as eVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
from __future__ import with_statement
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import string
import multiprocessing
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
cpu_load_warn = 0.9
cpu_load_error = 1.1
cpu_load1_warn = 0.9
cpu_load5_warn = 0.8
cpu_temp_warn = 85.0
cpu_temp_error = 90.0
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
def update_status_stale(stat, last_update_time):
time_since_update = rospy.get_time() - last_update_time
stale_status = 'OK'
if time_since_update > 20 and time_since_update <= 35:
stale_status = 'Lagging'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.WARN)
if time_since_update > 35:
stale_status = 'Stale'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.ERROR)
stat.values.pop(0)
stat.values.pop(0)
stat.values.insert(0, KeyValue(key = 'Update Status', value = stale_status))
stat.values.insert(1, KeyValue(key = 'Time Since Update', value = str(time_since_update)))
class CPUMonitor():
def __init__(self, hostname, diag_hostname):
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size = 100)
self._mutex = threading.Lock()
self._check_core_temps = rospy.get_param('~check_core_temps', True)
self._cpu_load_warn = rospy.get_param('~cpu_load_warn', cpu_load_warn)
self._cpu_load_error = rospy.get_param('~cpu_load_error', cpu_load_error)
self._cpu_load1_warn = rospy.get_param('~cpu_load1_warn', cpu_load1_warn)
self._cpu_load5_warn = rospy.get_param('~cpu_load5_warn', cpu_load5_warn)
self._cpu_temp_warn = rospy.get_param('~cpu_temp_warn', cpu_temp_warn)
self._cpu_temp_error = rospy.get_param('~cpu_temp_error', cpu_temp_error)
self._num_cores = multiprocessing.cpu_count()
self._temps_timer = None
self._usage_timer = None
# Get temp_input files
self._temp_vals = self.get_core_temp_names()
# CPU stats
self._temp_stat = DiagnosticStatus()
self._temp_stat.name = 'CPU Temperature'
self._temp_stat.level = 1
self._temp_stat.hardware_id = hostname
self._temp_stat.message = 'No Data'
self._temp_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._usage_stat = DiagnosticStatus()
self._usage_stat.name = 'CPU Usage'
self._usage_stat.level = 1
self._usage_stat.hardware_id = hostname
self._usage_stat.message = 'No Data'
self._usage_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._last_temp_time = 0
self._last_usage_time = 0
self._last_publish_time = 0
self._usage_old = 0
self._has_warned_mpstat = False
self._has_error_core_count = False
# Start checking everything
self.check_temps()
self.check_usage()
# Restart temperature checking
def _restart_temp_check(self):
rospy.logerr('Restarting temperature check thread in cpu_monitor. This should not happen')
try:
with self._mutex:
if self._temps_timer:
self._temps_timer.cancel()
self.check_temps()
except Exception as e:
rospy.logerr('Unable to restart temp thread. Error: %s' % traceback.format_exc())
## Must have the lock to cancel everything
def cancel_timers(self):
if self._temps_timer:
self._temps_timer.cancel()
if self._usage_timer:
self._usage_timer.cancel()
##\brief Check CPU core temps
##
## Use 'find /sys -name temp1_input' to find cores
## Read from every core, divide by 1000
def check_core_temps(self, sys_temp_strings):
diag_vals = []
diag_level = 0
diag_msgs = []
for index, temp_str in enumerate(sys_temp_strings):
if len(temp_str) < 5:
continue
cmd = 'cat %s' % temp_str
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
diag_level = DiagnosticStatus.ERROR
diag_msg = [ 'Core Temperature Error' ]
diag_vals = [ KeyValue(key = 'Core Temperature Error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return diag_vals, diag_msgs, diag_level
tmp = stdout.strip()
if unicode(tmp).isnumeric():
temp = float(tmp) / 1000
diag_vals.append(KeyValue(key = 'Core %d Temperature' % index, value = str(temp)+"DegC"))
if temp >= self._cpu_temp_warn:
diag_level = max(diag_level, DiagnosticStatus.WARN)
diag_msgs.append('Warm')
elif temp >= self._cpu_temp_error:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('Hot')
else:
diag_level = max(diag_level, DiagnosticStatus.ERROR) # Error if not numeric value
diag_vals.append(KeyValue(key = 'Core %s Temperature' % index, value = tmp))
return diag_vals, diag_msgs, diag_level
## Checks clock speed from reading from CPU info
def check_clock_speed(self):
vals = []
msgs = []
lvl = DiagnosticStatus.OK
try:
p = subprocess.Popen('cat /proc/cpuinfo | grep MHz',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
lvl = DiagnosticStatus.ERROR
msgs = [ 'Clock speed error' ]
vals = [ KeyValue(key = 'Clock speed error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return (vals, msgs, lvl)
for index, ln in enumerate(stdout.split(b'\n')):
words = ln.split(b':')
if len(words) < 2:
continue
speed = words[1].strip().split(b'.')[0] # Conversion to float doesn't work with decimal
vals.append(KeyValue(key = '%d Clock Speed (MHz)' % index, value = str(speed)))
except Exception as e:
rospy.logerr(traceback.format_exc())
lvl = DiagnosticStatus.ERROR
msgs.append('Exception')
vals.append(KeyValue(key = 'Exception', value = traceback.format_exc()))
return vals, msgs, lvl
# Add msgs output, too
##\brief Uses 'uptime' to see load average
def check_uptime(self):
level = DiagnosticStatus.OK
vals = []
load_dict = { 0: 'OK', 1: 'High Load', 2: 'Very High Load' }
try:
p = subprocess.Popen('uptime', stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
vals.append(KeyValue(key = 'uptime Failed', value = stderr))
return DiagnosticStatus.ERROR, vals
upvals = stdout.split()
load1 = float(upvals[-3].rstrip(b','))/self._num_cores
load5 = float(upvals[-2].rstrip(b','))/self._num_cores
load15 = float(upvals[-1])/self._num_cores
# Give warning if we go over load limit
if load1 > self._cpu_load1_warn or load5 > self._cpu_load5_warn:
level = DiagnosticStatus.WARN
vals.append(KeyValue(key = 'Load Average Status', value = load_dict[level]))
vals.append(KeyValue(key = 'Load Average (1min)', value = str(load1*1e2)+"%"))
vals.append(KeyValue(key = 'Load Average (5min)', value = str(load5*1e2)+"%"))
vals.append(KeyValue(key = 'Load Average (15min)', value = str(load15*1e2)+"%"))
except Exception as e:
rospy.logerr(traceback.format_exc())
level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Load Average Status', value = traceback.format_exc()))
return level, load_dict[level], vals
##\brief Use mpstat to find CPU usage
##
def check_mpstat(self):
vals = []
mp_level = DiagnosticStatus.OK
load_dict = { 0: 'OK', 1: 'High Load', 2: 'Error' }
try:
p = subprocess.Popen('mpstat -P ALL 1 1',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
if not self._has_warned_mpstat:
rospy.logerr("mpstat failed to run for cpu_monitor. Return code %d.", retcode)
self._has_warned_mpstat = True
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = '\"mpstat\" Call Error', value = str(retcode)))
return mp_level, 'Unable to Check CPU Usage', vals
# Check which column '%idle' is, #4539
# mpstat output changed between 8.06 and 8.1
rows = stdout.split('\n')
col_names = rows[2].split()
idle_col = -1 if (len(col_names) > 2 and col_names[-1] == '%idle') else -2
num_cores = 0
cores_loaded = 0
for index, row in enumerate(stdout.split('\n')):
if index < 3:
continue
# Skip row containing 'all' data
if row.find('all') > -1:
continue
lst = row.split()
if len(lst) < 8:
continue
## Ignore 'Average: ...' data
if lst[0].startswith('Average'):
continue
cpu_name = '%d' % (num_cores)
idle = lst[idle_col]
user = lst[3]
nice = lst[4]
system = lst[5]
core_level = 0
usage = (float(user)+float(nice))*1e-2
if usage > 10.0: # wrong reading, use old reading instead
rospy.logwarn('Read CPU usage of %f percent. Reverting to previous reading of %f percent'%(usage, self._usage_old))
usage = self._usage_old
self._usage_old = usage
if usage >= self._cpu_load_warn:
cores_loaded += 1
core_level = DiagnosticStatus.WARN
elif usage >= self._cpu_load_error:
core_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Core %s Status' % cpu_name, value = load_dict[core_level]))
vals.append(KeyValue(key = 'Core %s User' % cpu_name, value = user+"%"))
vals.append(KeyValue(key = 'Core %s Nice' % cpu_name, value = nice+"%"))
vals.append(KeyValue(key = 'Core %s System' % cpu_name, value = system+"%"))
vals.append(KeyValue(key = 'Core %s Idle' % cpu_name, value = idle+"%"))
num_cores += 1
# Warn for high load only if we have <= 2 cores that aren't loaded
if num_cores - cores_loaded <= 2 and num_cores > 2:
mp_level = DiagnosticStatus.WARN
if not self._num_cores:
self._num_cores = num_cores
# Check the number of cores if self._num_cores > 0, #4850
if self._num_cores != num_cores:
mp_level = DiagnosticStatus.ERROR
if not self._has_error_core_count:
rospy.logerr('Error checking number of cores. Expected %d, got %d. Computer may have not booted properly.',
self._num_cores, num_cores)
self._has_error_core_count = True
return DiagnosticStatus.ERROR, 'Incorrect number of CPU cores', vals
except Exception as e:
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'mpstat Exception', value = str(e)))
return mp_level, load_dict[mp_level], vals
## Returns names for core temperature files
## Returns list of names as each name can be read like file
def get_core_temp_names(self):
temp_vals = []
try:
p = subprocess.Popen('find /sys/devices -name temp1_input',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
rospy.logerr('Error find core temp locations: %s' % stderr)
return []
for ln in stdout.split(b'\n'):
temp_vals.append(ln.strip())
return temp_vals
except:
rospy.logerr('Exception finding temp vals: %s' % traceback.format_exc())
return []
## Call every 10sec at minimum
def check_temps(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = str(0) ) ]
diag_msgs = []
diag_level = 0
if self._check_core_temps:
core_vals, core_msgs, core_level = self.check_core_temps(self._temp_vals)
diag_vals.extend(core_vals)
diag_msgs.extend(core_msgs)
diag_level = max(diag_level, core_level)
diag_log = set(diag_msgs)
if len(diag_log) > 0:
message = ', '.join(diag_log)
else:
message = stat_dict[diag_level]
with self._mutex:
self._last_temp_time = rospy.get_time()
self._temp_stat.level = diag_level
self._temp_stat.message = message
self._temp_stat.values = diag_vals
if not rospy.is_shutdown():
self._temps_timer = threading.Timer(5.0, self.check_temps)
self._temps_timer.start()
else:
self.cancel_timers()
def check_usage(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_level = 0
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = 0 )]
diag_msgs = []
# Check clock speed
clock_vals, clock_msgs, clock_level = self.check_clock_speed()
diag_vals.extend(clock_vals)
diag_msgs.extend(clock_msgs)
diag_level = max(diag_level, clock_level)
# Check mpstat
mp_level, mp_msg, mp_vals = self.check_mpstat()
diag_vals.extend(mp_vals)
if mp_level > 0:
diag_msgs.append(mp_msg)
diag_level = max(diag_level, mp_level)
# Check uptime
uptime_level, up_msg, up_vals = self.check_uptime()
diag_vals.extend(up_vals)
if uptime_level > 0:
diag_msgs.append(up_msg)
diag_level = max(diag_level, uptime_level)
if diag_msgs and diag_level > 0:
usage_msg = ', '.join(set(diag_msgs))
else:
usage_msg = stat_dict[diag_level]
# Update status
with self._mutex:
self._last_usage_time = rospy.get_time()
self._usage_stat.level = diag_level
self._usage_stat.values = diag_vals
self._usage_stat.message = usage_msg
if not rospy.is_shutdown():
self._usage_timer = threading.Timer(5.0, self.check_usage)
self._usage_timer.start()
else:
self.cancel_timers()
def publish_stats(self):
with self._mutex:
# Update everything with last update times
update_status_stale(self._temp_stat, self._last_temp_time)
update_status_stale(self._usage_stat, self._last_usage_time)
msg = DiagnosticArray()
msg.header.stamp = rospy.get_rostime()
msg.status.append(self._temp_stat)
msg.status.append(self._usage_stat)
if rospy.get_time() - self._last_publish_time > 0.5:
self._diag_pub.publish(msg)
self._last_publish_time = rospy.get_time()
# Restart temperature checking if it goes stale, #4171
# Need to run this without mutex
if rospy.get_time() - self._last_temp_time > 90:
self._restart_temp_check()
if __name__ == '__main__':
hostname = socket.gethostname()
hostname = hostname.replace('-', '_')
import optparse
parser = optparse.OptionParser(usage="usage: cpu_monitor.py [--diag-hostname=cX]")
parser.add_option("--diag-hostname", dest="diag_hostname",
help="Computer name in diagnostics output (ex: 'c1')",
metavar="DIAG_HOSTNAME",
action="store", default = hostname)
options, args = parser.parse_args(rospy.myargv())
try:
rospy.init_node('cpu_monitor_%s' % hostname)
except rospy.exceptions.ROSInitException:
print >> sys.stderr, 'CPU monitor is unable to initialize node. Master may not be running.'
sys.exit(0)
cpu_node = CPUMonitor(hostname, options.diag_hostname)
rate = rospy.Rate(0.25)
try:
while not rospy.is_shutdown():
rate.sleep()
cpu_node.publish_stats()
except KeyboardInterrupt:
pass
except Exception as e:
traceback.print_exc()
rospy.logerr(traceback.format_exc())
cpu_node.cancel_timers()
sys.exit(0)
| #!/usr/bin/env python3
############################################################################
# Copyright (C) 2009, <NAME>, Inc. #
# Copyright (C) 2013 by <NAME> #
# <EMAIL> #
# Copyright (C) 2013 by <NAME> #
# <EMAIL> #
# #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# 3. The name of the copyright holders may be used to endorse or #
# promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL as eXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE as eVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
from __future__ import with_statement
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import string
import multiprocessing
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
cpu_load_warn = 0.9
cpu_load_error = 1.1
cpu_load1_warn = 0.9
cpu_load5_warn = 0.8
cpu_temp_warn = 85.0
cpu_temp_error = 90.0
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
def update_status_stale(stat, last_update_time):
time_since_update = rospy.get_time() - last_update_time
stale_status = 'OK'
if time_since_update > 20 and time_since_update <= 35:
stale_status = 'Lagging'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.WARN)
if time_since_update > 35:
stale_status = 'Stale'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.ERROR)
stat.values.pop(0)
stat.values.pop(0)
stat.values.insert(0, KeyValue(key = 'Update Status', value = stale_status))
stat.values.insert(1, KeyValue(key = 'Time Since Update', value = str(time_since_update)))
class CPUMonitor():
def __init__(self, hostname, diag_hostname):
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size = 100)
self._mutex = threading.Lock()
self._check_core_temps = rospy.get_param('~check_core_temps', True)
self._cpu_load_warn = rospy.get_param('~cpu_load_warn', cpu_load_warn)
self._cpu_load_error = rospy.get_param('~cpu_load_error', cpu_load_error)
self._cpu_load1_warn = rospy.get_param('~cpu_load1_warn', cpu_load1_warn)
self._cpu_load5_warn = rospy.get_param('~cpu_load5_warn', cpu_load5_warn)
self._cpu_temp_warn = rospy.get_param('~cpu_temp_warn', cpu_temp_warn)
self._cpu_temp_error = rospy.get_param('~cpu_temp_error', cpu_temp_error)
self._num_cores = multiprocessing.cpu_count()
self._temps_timer = None
self._usage_timer = None
# Get temp_input files
self._temp_vals = self.get_core_temp_names()
# CPU stats
self._temp_stat = DiagnosticStatus()
self._temp_stat.name = 'CPU Temperature'
self._temp_stat.level = 1
self._temp_stat.hardware_id = hostname
self._temp_stat.message = 'No Data'
self._temp_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._usage_stat = DiagnosticStatus()
self._usage_stat.name = 'CPU Usage'
self._usage_stat.level = 1
self._usage_stat.hardware_id = hostname
self._usage_stat.message = 'No Data'
self._usage_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._last_temp_time = 0
self._last_usage_time = 0
self._last_publish_time = 0
self._usage_old = 0
self._has_warned_mpstat = False
self._has_error_core_count = False
# Start checking everything
self.check_temps()
self.check_usage()
# Restart temperature checking
def _restart_temp_check(self):
rospy.logerr('Restarting temperature check thread in cpu_monitor. This should not happen')
try:
with self._mutex:
if self._temps_timer:
self._temps_timer.cancel()
self.check_temps()
except Exception as e:
rospy.logerr('Unable to restart temp thread. Error: %s' % traceback.format_exc())
## Must have the lock to cancel everything
def cancel_timers(self):
if self._temps_timer:
self._temps_timer.cancel()
if self._usage_timer:
self._usage_timer.cancel()
##\brief Check CPU core temps
##
## Use 'find /sys -name temp1_input' to find cores
## Read from every core, divide by 1000
def check_core_temps(self, sys_temp_strings):
diag_vals = []
diag_level = 0
diag_msgs = []
for index, temp_str in enumerate(sys_temp_strings):
if len(temp_str) < 5:
continue
cmd = 'cat %s' % temp_str
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
diag_level = DiagnosticStatus.ERROR
diag_msg = [ 'Core Temperature Error' ]
diag_vals = [ KeyValue(key = 'Core Temperature Error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return diag_vals, diag_msgs, diag_level
tmp = stdout.strip()
if unicode(tmp).isnumeric():
temp = float(tmp) / 1000
diag_vals.append(KeyValue(key = 'Core %d Temperature' % index, value = str(temp)+"DegC"))
if temp >= self._cpu_temp_warn:
diag_level = max(diag_level, DiagnosticStatus.WARN)
diag_msgs.append('Warm')
elif temp >= self._cpu_temp_error:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('Hot')
else:
diag_level = max(diag_level, DiagnosticStatus.ERROR) # Error if not numeric value
diag_vals.append(KeyValue(key = 'Core %s Temperature' % index, value = tmp))
return diag_vals, diag_msgs, diag_level
## Checks clock speed from reading from CPU info
def check_clock_speed(self):
vals = []
msgs = []
lvl = DiagnosticStatus.OK
try:
p = subprocess.Popen('cat /proc/cpuinfo | grep MHz',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
lvl = DiagnosticStatus.ERROR
msgs = [ 'Clock speed error' ]
vals = [ KeyValue(key = 'Clock speed error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return (vals, msgs, lvl)
for index, ln in enumerate(stdout.split(b'\n')):
words = ln.split(b':')
if len(words) < 2:
continue
speed = words[1].strip().split(b'.')[0] # Conversion to float doesn't work with decimal
vals.append(KeyValue(key = '%d Clock Speed (MHz)' % index, value = str(speed)))
except Exception as e:
rospy.logerr(traceback.format_exc())
lvl = DiagnosticStatus.ERROR
msgs.append('Exception')
vals.append(KeyValue(key = 'Exception', value = traceback.format_exc()))
return vals, msgs, lvl
# Add msgs output, too
##\brief Uses 'uptime' to see load average
def check_uptime(self):
level = DiagnosticStatus.OK
vals = []
load_dict = { 0: 'OK', 1: 'High Load', 2: 'Very High Load' }
try:
p = subprocess.Popen('uptime', stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
vals.append(KeyValue(key = 'uptime Failed', value = stderr))
return DiagnosticStatus.ERROR, vals
upvals = stdout.split()
load1 = float(upvals[-3].rstrip(b','))/self._num_cores
load5 = float(upvals[-2].rstrip(b','))/self._num_cores
load15 = float(upvals[-1])/self._num_cores
# Give warning if we go over load limit
if load1 > self._cpu_load1_warn or load5 > self._cpu_load5_warn:
level = DiagnosticStatus.WARN
vals.append(KeyValue(key = 'Load Average Status', value = load_dict[level]))
vals.append(KeyValue(key = 'Load Average (1min)', value = str(load1*1e2)+"%"))
vals.append(KeyValue(key = 'Load Average (5min)', value = str(load5*1e2)+"%"))
vals.append(KeyValue(key = 'Load Average (15min)', value = str(load15*1e2)+"%"))
except Exception as e:
rospy.logerr(traceback.format_exc())
level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Load Average Status', value = traceback.format_exc()))
return level, load_dict[level], vals
##\brief Use mpstat to find CPU usage
##
def check_mpstat(self):
vals = []
mp_level = DiagnosticStatus.OK
load_dict = { 0: 'OK', 1: 'High Load', 2: 'Error' }
try:
p = subprocess.Popen('mpstat -P ALL 1 1',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
if not self._has_warned_mpstat:
rospy.logerr("mpstat failed to run for cpu_monitor. Return code %d.", retcode)
self._has_warned_mpstat = True
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = '\"mpstat\" Call Error', value = str(retcode)))
return mp_level, 'Unable to Check CPU Usage', vals
# Check which column '%idle' is, #4539
# mpstat output changed between 8.06 and 8.1
rows = stdout.split('\n')
col_names = rows[2].split()
idle_col = -1 if (len(col_names) > 2 and col_names[-1] == '%idle') else -2
num_cores = 0
cores_loaded = 0
for index, row in enumerate(stdout.split('\n')):
if index < 3:
continue
# Skip row containing 'all' data
if row.find('all') > -1:
continue
lst = row.split()
if len(lst) < 8:
continue
## Ignore 'Average: ...' data
if lst[0].startswith('Average'):
continue
cpu_name = '%d' % (num_cores)
idle = lst[idle_col]
user = lst[3]
nice = lst[4]
system = lst[5]
core_level = 0
usage = (float(user)+float(nice))*1e-2
if usage > 10.0: # wrong reading, use old reading instead
rospy.logwarn('Read CPU usage of %f percent. Reverting to previous reading of %f percent'%(usage, self._usage_old))
usage = self._usage_old
self._usage_old = usage
if usage >= self._cpu_load_warn:
cores_loaded += 1
core_level = DiagnosticStatus.WARN
elif usage >= self._cpu_load_error:
core_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Core %s Status' % cpu_name, value = load_dict[core_level]))
vals.append(KeyValue(key = 'Core %s User' % cpu_name, value = user+"%"))
vals.append(KeyValue(key = 'Core %s Nice' % cpu_name, value = nice+"%"))
vals.append(KeyValue(key = 'Core %s System' % cpu_name, value = system+"%"))
vals.append(KeyValue(key = 'Core %s Idle' % cpu_name, value = idle+"%"))
num_cores += 1
# Warn for high load only if we have <= 2 cores that aren't loaded
if num_cores - cores_loaded <= 2 and num_cores > 2:
mp_level = DiagnosticStatus.WARN
if not self._num_cores:
self._num_cores = num_cores
# Check the number of cores if self._num_cores > 0, #4850
if self._num_cores != num_cores:
mp_level = DiagnosticStatus.ERROR
if not self._has_error_core_count:
rospy.logerr('Error checking number of cores. Expected %d, got %d. Computer may have not booted properly.',
self._num_cores, num_cores)
self._has_error_core_count = True
return DiagnosticStatus.ERROR, 'Incorrect number of CPU cores', vals
except Exception as e:
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'mpstat Exception', value = str(e)))
return mp_level, load_dict[mp_level], vals
## Returns names for core temperature files
## Returns list of names as each name can be read like file
def get_core_temp_names(self):
temp_vals = []
try:
p = subprocess.Popen('find /sys/devices -name temp1_input',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
rospy.logerr('Error find core temp locations: %s' % stderr)
return []
for ln in stdout.split(b'\n'):
temp_vals.append(ln.strip())
return temp_vals
except:
rospy.logerr('Exception finding temp vals: %s' % traceback.format_exc())
return []
## Call every 10sec at minimum
def check_temps(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = str(0) ) ]
diag_msgs = []
diag_level = 0
if self._check_core_temps:
core_vals, core_msgs, core_level = self.check_core_temps(self._temp_vals)
diag_vals.extend(core_vals)
diag_msgs.extend(core_msgs)
diag_level = max(diag_level, core_level)
diag_log = set(diag_msgs)
if len(diag_log) > 0:
message = ', '.join(diag_log)
else:
message = stat_dict[diag_level]
with self._mutex:
self._last_temp_time = rospy.get_time()
self._temp_stat.level = diag_level
self._temp_stat.message = message
self._temp_stat.values = diag_vals
if not rospy.is_shutdown():
self._temps_timer = threading.Timer(5.0, self.check_temps)
self._temps_timer.start()
else:
self.cancel_timers()
def check_usage(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_level = 0
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = 0 )]
diag_msgs = []
# Check clock speed
clock_vals, clock_msgs, clock_level = self.check_clock_speed()
diag_vals.extend(clock_vals)
diag_msgs.extend(clock_msgs)
diag_level = max(diag_level, clock_level)
# Check mpstat
mp_level, mp_msg, mp_vals = self.check_mpstat()
diag_vals.extend(mp_vals)
if mp_level > 0:
diag_msgs.append(mp_msg)
diag_level = max(diag_level, mp_level)
# Check uptime
uptime_level, up_msg, up_vals = self.check_uptime()
diag_vals.extend(up_vals)
if uptime_level > 0:
diag_msgs.append(up_msg)
diag_level = max(diag_level, uptime_level)
if diag_msgs and diag_level > 0:
usage_msg = ', '.join(set(diag_msgs))
else:
usage_msg = stat_dict[diag_level]
# Update status
with self._mutex:
self._last_usage_time = rospy.get_time()
self._usage_stat.level = diag_level
self._usage_stat.values = diag_vals
self._usage_stat.message = usage_msg
if not rospy.is_shutdown():
self._usage_timer = threading.Timer(5.0, self.check_usage)
self._usage_timer.start()
else:
self.cancel_timers()
def publish_stats(self):
with self._mutex:
# Update everything with last update times
update_status_stale(self._temp_stat, self._last_temp_time)
update_status_stale(self._usage_stat, self._last_usage_time)
msg = DiagnosticArray()
msg.header.stamp = rospy.get_rostime()
msg.status.append(self._temp_stat)
msg.status.append(self._usage_stat)
if rospy.get_time() - self._last_publish_time > 0.5:
self._diag_pub.publish(msg)
self._last_publish_time = rospy.get_time()
# Restart temperature checking if it goes stale, #4171
# Need to run this without mutex
if rospy.get_time() - self._last_temp_time > 90:
self._restart_temp_check()
if __name__ == '__main__':
hostname = socket.gethostname()
hostname = hostname.replace('-', '_')
import optparse
parser = optparse.OptionParser(usage="usage: cpu_monitor.py [--diag-hostname=cX]")
parser.add_option("--diag-hostname", dest="diag_hostname",
help="Computer name in diagnostics output (ex: 'c1')",
metavar="DIAG_HOSTNAME",
action="store", default = hostname)
options, args = parser.parse_args(rospy.myargv())
try:
rospy.init_node('cpu_monitor_%s' % hostname)
except rospy.exceptions.ROSInitException:
print >> sys.stderr, 'CPU monitor is unable to initialize node. Master may not be running.'
sys.exit(0)
cpu_node = CPUMonitor(hostname, options.diag_hostname)
rate = rospy.Rate(0.25)
try:
while not rospy.is_shutdown():
rate.sleep()
cpu_node.publish_stats()
except KeyboardInterrupt:
pass
except Exception as e:
traceback.print_exc()
rospy.logerr(traceback.format_exc())
cpu_node.cancel_timers()
sys.exit(0)
| en | 0.672495 | #!/usr/bin/env python3 ############################################################################ # Copyright (C) 2009, <NAME>, Inc. # # Copyright (C) 2013 by <NAME> # # <EMAIL> # # Copyright (C) 2013 by <NAME> # # <EMAIL> # # # # All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # 1. Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # 2. Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # 3. The name of the copyright holders may be used to endorse or # # promote products derived from this software without specific # # prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # # INCIDENTAL, SPECIAL as eXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # # ANY WAY OUT OF THE USE OF THIS SOFTWARE as eVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ############################################################################ # Get temp_input files # CPU stats # Start checking everything # Restart temperature checking ## Must have the lock to cancel everything ##\brief Check CPU core temps ## ## Use 'find /sys -name temp1_input' to find cores ## Read from every core, divide by 1000 # Error if not numeric value ## Checks clock speed from reading from CPU info # Conversion to float doesn't work with decimal # Add msgs output, too ##\brief Uses 'uptime' to see load average # Give warning if we go over load limit ##\brief Use mpstat to find CPU usage ## # Check which column '%idle' is, #4539 # mpstat output changed between 8.06 and 8.1 # Skip row containing 'all' data ## Ignore 'Average: ...' data # wrong reading, use old reading instead # Warn for high load only if we have <= 2 cores that aren't loaded # Check the number of cores if self._num_cores > 0, #4850 ## Returns names for core temperature files ## Returns list of names as each name can be read like file ## Call every 10sec at minimum # Check clock speed # Check mpstat # Check uptime # Update status # Update everything with last update times # Restart temperature checking if it goes stale, #4171 # Need to run this without mutex | 1.11579 | 1 |
notebooks/_solutions/pandas_03_selecting_data49.py | rprops/Python_DS-WS | 65 | 6625231 | len(titles[(titles['year'] >= 1950) & (titles['year'] <= 1959)]) | len(titles[(titles['year'] >= 1950) & (titles['year'] <= 1959)]) | none | 1 | 2.118747 | 2 | |
setup.py | helmholtz-analytics/heat | 105 | 6625232 | from setuptools import setup, find_packages
import codecs
with codecs.open("README.md", "r", "utf-8") as handle:
long_description = handle.read()
__version__ = None # appeases flake, assignment in exec() below
with open("./heat/core/version.py") as handle:
exec(handle.read())
setup(
name="heat",
packages=find_packages(exclude=("*tests*", "*benchmarks*")),
data_files=["README.md", "LICENSE"],
version=__version__,
description="A framework for high-performance data analytics and machine learning.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Helmholtz Association",
author_email="<EMAIL>",
url="https://github.com/helmholtz-analytics/heat",
keywords=["data", "analytics", "tensors", "distributed", "gpu"],
python_requires=">=3.7",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
],
install_requires=[
"mpi4py>=3.0.0",
"numpy>=1.13.0",
"torch>=1.7.0",
"scipy>=0.14.0",
"pillow>=6.0.0",
"torchvision>=0.8.0",
],
extras_require={
"docutils": ["docutils>=0.16"],
"hdf5": ["h5py>=2.8.0"],
"netcdf": ["netCDF4>=1.5.6"],
"dev": ["pre-commit>=1.18.3"],
"examples": ["scikit-learn>=0.24.0", "matplotlib>=3.1.0"],
},
)
| from setuptools import setup, find_packages
import codecs
with codecs.open("README.md", "r", "utf-8") as handle:
long_description = handle.read()
__version__ = None # appeases flake, assignment in exec() below
with open("./heat/core/version.py") as handle:
exec(handle.read())
setup(
name="heat",
packages=find_packages(exclude=("*tests*", "*benchmarks*")),
data_files=["README.md", "LICENSE"],
version=__version__,
description="A framework for high-performance data analytics and machine learning.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Helmholtz Association",
author_email="<EMAIL>",
url="https://github.com/helmholtz-analytics/heat",
keywords=["data", "analytics", "tensors", "distributed", "gpu"],
python_requires=">=3.7",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
],
install_requires=[
"mpi4py>=3.0.0",
"numpy>=1.13.0",
"torch>=1.7.0",
"scipy>=0.14.0",
"pillow>=6.0.0",
"torchvision>=0.8.0",
],
extras_require={
"docutils": ["docutils>=0.16"],
"hdf5": ["h5py>=2.8.0"],
"netcdf": ["netCDF4>=1.5.6"],
"dev": ["pre-commit>=1.18.3"],
"examples": ["scikit-learn>=0.24.0", "matplotlib>=3.1.0"],
},
)
| en | 0.855119 | # appeases flake, assignment in exec() below | 1.556808 | 2 |
python_raster_functions/PyTorch/FeatureClassifier.py | ArcGIS/raster-deep-learning | 154 | 6625233 | <filename>python_raster_functions/PyTorch/FeatureClassifier.py
from __future__ import division
import os
import sys
import json
import warnings
from fastai.vision import *
from torchvision import models as torchvision_models
import arcgis
from arcgis.learn import FeatureClassifier
import arcpy
import torch
from fastai.metrics import accuracy
import tempfile
from pathlib import Path
prf_root_dir = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.append(prf_root_dir)
import numpy as np
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_mean = 255 * np.array(imagenet_stats[0], dtype=np.float32)
imagenet_std = 255 * np.array(imagenet_stats[1], dtype=np.float32)
def norm(x, mean=imagenet_mean, std=imagenet_std):
return (x - mean)/std
def denorm(x, mean=imagenet_mean, std=imagenet_std):
return x * std + mean
class ChildObjectDetector:
def initialize(self, model, model_as_file):
if model_as_file:
with open(model, 'r') as f:
self.emd = json.load(f)
else:
self.emd = json.loads(model)
if arcpy.env.processorType == "GPU" and torch.cuda.is_available():
self.device = torch.device('cuda')
arcgis.env._processorType = "GPU"
else:
self.device = torch.device('cpu')
arcgis.env._processorType = "CPU"
# Using arcgis.learn FeatureClassifer from_model function.
self.cf = FeatureClassifier.from_model(emd_path=model)
self.model = self.cf.learn.model
self.model.eval()
def getParameterInfo(self, required_parameters):
return required_parameters
def getConfiguration(self, **scalars):
if 'BatchSize' not in self.emd and 'batch_size' not in scalars:
self.batch_size = 1
elif 'BatchSize' not in self.emd and 'batch_size' in scalars:
self.batch_size = int(scalars['batch_size'])
else:
self.batch_size = int(self.emd['BatchSize'])
return {
# CropSizeFixed is a boolean value parameter (1 or 0) in the emd file, representing whether the size of
# tile cropped around the feature is fixed or not.
# 1 -- fixed tile size, crop fixed size tiles centered on the feature. The tile can be bigger or smaller
# than the feature;
# 0 -- Variable tile size, crop out the feature using the smallest fitting rectangle. This results in tiles
# of varying size, both in x and y. the ImageWidth and ImageHeight in the emd file are still passed and used
# as a maximum size. If the feature is bigger than the defined ImageWidth/ImageHeight, the tiles are cropped
# the same way as in the fixed tile size option using the maximum size.
'CropSizeFixed': int(self.emd['CropSizeFixed']),
# BlackenAroundFeature is a boolean value paramater (1 or 0) in the emd file, representing whether blacken
# the pixels outside the feature in each image tile.
# 1 -- Blacken
# 0 -- Not blacken
'BlackenAroundFeature': int(self.emd['BlackenAroundFeature']),
'extractBands': tuple(self.emd['ExtractBands']),
'tx': self.emd['ImageWidth'],
'ty': self.emd['ImageHeight'],
'batch_size': self.batch_size
}
def vectorize(self, **pixelBlocks):
# Get pixel blocks - tuple of 3-d rasters: ([bands,height,width],[bands,height.width],...)
# Convert tuple to 4-d numpy array
batch_images = np.asarray(pixelBlocks['rasters_pixels'])
# Get the shape of the 4-d numpy array
batch, bands, height, width = batch_images.shape
# Transpose the image dimensions to [batch, height, width, bands]
batch_images = np.transpose(batch_images, [0, 2, 3, 1])
rings = []
labels, confidences = [], []
# Convert to torch tensor and transpose the dimensions to [batch, bands, height, width]
batch_images = torch.tensor(norm(batch_images).transpose(0, 3, 1, 2)).to(self.device)
# the second element in the passed tuple is hardcoded to make fastai's pred_batch work
predictions = self.cf.learn.pred_batch(batch=(batch_images, torch.tensor([40]).to(self.device)))
# torch.max returns the max value and the index of the max as a tuple
confidences, class_idxs = torch.max(predictions, dim=1)
# Using emd to map the class
class_map = [c['Name'] for c in self.emd["Classes"]]
labels = [class_map[c] for c in class_idxs]
# Appending this ring for all the features in the batch
rings = [[[[0, 0], [0, width - 1], [height - 1, width - 1], [height - 1, 0]]] for i in range(self.batch_size)]
return rings, confidences.tolist(), labels
| <filename>python_raster_functions/PyTorch/FeatureClassifier.py
from __future__ import division
import os
import sys
import json
import warnings
from fastai.vision import *
from torchvision import models as torchvision_models
import arcgis
from arcgis.learn import FeatureClassifier
import arcpy
import torch
from fastai.metrics import accuracy
import tempfile
from pathlib import Path
prf_root_dir = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.append(prf_root_dir)
import numpy as np
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_mean = 255 * np.array(imagenet_stats[0], dtype=np.float32)
imagenet_std = 255 * np.array(imagenet_stats[1], dtype=np.float32)
def norm(x, mean=imagenet_mean, std=imagenet_std):
return (x - mean)/std
def denorm(x, mean=imagenet_mean, std=imagenet_std):
return x * std + mean
class ChildObjectDetector:
def initialize(self, model, model_as_file):
if model_as_file:
with open(model, 'r') as f:
self.emd = json.load(f)
else:
self.emd = json.loads(model)
if arcpy.env.processorType == "GPU" and torch.cuda.is_available():
self.device = torch.device('cuda')
arcgis.env._processorType = "GPU"
else:
self.device = torch.device('cpu')
arcgis.env._processorType = "CPU"
# Using arcgis.learn FeatureClassifer from_model function.
self.cf = FeatureClassifier.from_model(emd_path=model)
self.model = self.cf.learn.model
self.model.eval()
def getParameterInfo(self, required_parameters):
return required_parameters
def getConfiguration(self, **scalars):
if 'BatchSize' not in self.emd and 'batch_size' not in scalars:
self.batch_size = 1
elif 'BatchSize' not in self.emd and 'batch_size' in scalars:
self.batch_size = int(scalars['batch_size'])
else:
self.batch_size = int(self.emd['BatchSize'])
return {
# CropSizeFixed is a boolean value parameter (1 or 0) in the emd file, representing whether the size of
# tile cropped around the feature is fixed or not.
# 1 -- fixed tile size, crop fixed size tiles centered on the feature. The tile can be bigger or smaller
# than the feature;
# 0 -- Variable tile size, crop out the feature using the smallest fitting rectangle. This results in tiles
# of varying size, both in x and y. the ImageWidth and ImageHeight in the emd file are still passed and used
# as a maximum size. If the feature is bigger than the defined ImageWidth/ImageHeight, the tiles are cropped
# the same way as in the fixed tile size option using the maximum size.
'CropSizeFixed': int(self.emd['CropSizeFixed']),
# BlackenAroundFeature is a boolean value paramater (1 or 0) in the emd file, representing whether blacken
# the pixels outside the feature in each image tile.
# 1 -- Blacken
# 0 -- Not blacken
'BlackenAroundFeature': int(self.emd['BlackenAroundFeature']),
'extractBands': tuple(self.emd['ExtractBands']),
'tx': self.emd['ImageWidth'],
'ty': self.emd['ImageHeight'],
'batch_size': self.batch_size
}
def vectorize(self, **pixelBlocks):
# Get pixel blocks - tuple of 3-d rasters: ([bands,height,width],[bands,height.width],...)
# Convert tuple to 4-d numpy array
batch_images = np.asarray(pixelBlocks['rasters_pixels'])
# Get the shape of the 4-d numpy array
batch, bands, height, width = batch_images.shape
# Transpose the image dimensions to [batch, height, width, bands]
batch_images = np.transpose(batch_images, [0, 2, 3, 1])
rings = []
labels, confidences = [], []
# Convert to torch tensor and transpose the dimensions to [batch, bands, height, width]
batch_images = torch.tensor(norm(batch_images).transpose(0, 3, 1, 2)).to(self.device)
# the second element in the passed tuple is hardcoded to make fastai's pred_batch work
predictions = self.cf.learn.pred_batch(batch=(batch_images, torch.tensor([40]).to(self.device)))
# torch.max returns the max value and the index of the max as a tuple
confidences, class_idxs = torch.max(predictions, dim=1)
# Using emd to map the class
class_map = [c['Name'] for c in self.emd["Classes"]]
labels = [class_map[c] for c in class_idxs]
# Appending this ring for all the features in the batch
rings = [[[[0, 0], [0, width - 1], [height - 1, width - 1], [height - 1, 0]]] for i in range(self.batch_size)]
return rings, confidences.tolist(), labels
| en | 0.77833 | # Using arcgis.learn FeatureClassifer from_model function. # CropSizeFixed is a boolean value parameter (1 or 0) in the emd file, representing whether the size of # tile cropped around the feature is fixed or not. # 1 -- fixed tile size, crop fixed size tiles centered on the feature. The tile can be bigger or smaller # than the feature; # 0 -- Variable tile size, crop out the feature using the smallest fitting rectangle. This results in tiles # of varying size, both in x and y. the ImageWidth and ImageHeight in the emd file are still passed and used # as a maximum size. If the feature is bigger than the defined ImageWidth/ImageHeight, the tiles are cropped # the same way as in the fixed tile size option using the maximum size. # BlackenAroundFeature is a boolean value paramater (1 or 0) in the emd file, representing whether blacken # the pixels outside the feature in each image tile. # 1 -- Blacken # 0 -- Not blacken # Get pixel blocks - tuple of 3-d rasters: ([bands,height,width],[bands,height.width],...) # Convert tuple to 4-d numpy array # Get the shape of the 4-d numpy array # Transpose the image dimensions to [batch, height, width, bands] # Convert to torch tensor and transpose the dimensions to [batch, bands, height, width] # the second element in the passed tuple is hardcoded to make fastai's pred_batch work # torch.max returns the max value and the index of the max as a tuple # Using emd to map the class # Appending this ring for all the features in the batch | 2.579615 | 3 |
vsts/vsts/work_item_tracking_process_definitions/v4_0/models/work_item_state_input_model.py | kenkuo/azure-devops-python-api | 0 | 6625234 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WorkItemStateInputModel(Model):
"""WorkItemStateInputModel.
:param color:
:type color: str
:param name:
:type name: str
:param order:
:type order: int
:param state_category:
:type state_category: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'}
}
def __init__(self, color=None, name=None, order=None, state_category=None):
super(WorkItemStateInputModel, self).__init__()
self.color = color
self.name = name
self.order = order
self.state_category = state_category
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WorkItemStateInputModel(Model):
"""WorkItemStateInputModel.
:param color:
:type color: str
:param name:
:type name: str
:param order:
:type order: int
:param state_category:
:type state_category: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'}
}
def __init__(self, color=None, name=None, order=None, state_category=None):
super(WorkItemStateInputModel, self).__init__()
self.color = color
self.name = name
self.order = order
self.state_category = state_category
| en | 0.439933 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- WorkItemStateInputModel. :param color: :type color: str :param name: :type name: str :param order: :type order: int :param state_category: :type state_category: str | 1.926133 | 2 |
PyTrinamic/referencedesigns/TMC4671_LEV/TMC4671_LEV_REF.py | bmoneke/PyTrinamic | 37 | 6625235 | <reponame>bmoneke/PyTrinamic
'''
Created on 08.01.2021
@author: ED
'''
import PyTrinamic
" interfaces "
from PyTrinamic.modules.tmcl_module_interface import tmcl_module_interface
from PyTrinamic.modules.tmcl_motor_interface import tmcl_motor_interface
" features "
from PyTrinamic.modules.features.open_loop_ap_feature import open_loop_ap_feature
from PyTrinamic.modules.features.digital_hall_weasel_ap_feature import digital_hall_weasel_ap_feature
from PyTrinamic.modules.features.linear_ramp_ap_feature import linear_ramp_ap_feature
from PyTrinamic.modules.features.pid_ap_feature import pid_ap_feature
from PyTrinamic.modules.features.commutation_selection_ap_feature import commutation_selection_ap_feature
class TMC4671_LEV_REF(tmcl_module_interface):
def __init__(self, connection, moduleID=1):
tmcl_module_interface.__init__(self, connection, moduleID)
self.GP = _GP
" add the motor with available features "
self._motors.append(TMC4671_LEV_REF_motor_interface(self, 0, PyTrinamic.MotorTypes.BLDC, _AP_MOTOR_0, _ENUM_MOTOR_0))
def moduleName(self):
return "TMC4671-LEV-REF"
def moduleDescription(self):
return "The TMC4671-LEV-REF is a highly compact controller/driver module for brushless DC (BLDC) motors with up to 30A coil current and hall sensor feedback. Supply voltage is 24-48V."
class _AP_MOTOR_0():
AdcPhaseA = 3
AdcPhaseB = 4
AdcOffsetPhaseA = 5
AdcOffsetPhaseB = 6
CurrentPhaseA = 7
CurrentPhaseB = 8
CurrentPhaseC = 9
DualShuntFactor = 10
OpenLoopCurrent = 12
" only for compatibility => "
StartCurrent = 12
" <= only for compatibility "
MotorType = 14
CommutationMode = 15
ActualOpenLoopAngle = 16
ActualHallAngle = 18
TorqueP = 20
TorqueI = 21
VelocityP = 22
VelocityI = 23
TargetTorque = 30
ActualTorque = 31
TargetVelocity = 40
RampVelocity = 41
ActualVelocity = 42
MaxVelocity = 43
Acceleration = 44
EnableRamp = 45
PedalPulsesPerRotation = 50
PedalSenseDelay = 52
TorqueSensorGain = 53
TorqueSensorOffset = 54
TorqueDeadband = 55
AssistCutOutDistance = 56
InitialRightTorque = 57
InitialRightTorqueSpeed = 58
LeftRightRatio = 60
AverageSportMode = 61
PedalDirection = 65
PedalMotorEnable = 66
AverageTorque = 67
PositiveMotoringRampTime = 70
NegativeMotoringRampTime = 71
Speed_0 = 73
Speed_1 = 74
Speed_2 = 75
Speed_3 = 76
Speed_4 = 77
Speed_5 = 78
Speed_6 = 79
Speed_7 = 80
Speed_8 = 81
Torque_0 = 82
Torque_1 = 83
Torque_2 = 84
Torque_3 = 85
Torque_4 = 86
Torque_5 = 87
Torque_6 = 88
Torque_7 = 89
Torque_8 = 90
MaximumSpeed = 91
ActualMapSpeedTorque = 92
ActualGain = 93
ActualTorqueLimit = 94
MaxTorque = 100
MotorPolePairs = 101
GearRatio = 102
WheelDiameter = 103
WheelPulsesPerRotation = 104
HallSensorOffset = 105
HallSensorPolarity = 106
HallSensorInterpolation = 107
HallSensorDirection = 108
CurrentRegulatorBandwidth = 110
MinimumMotorCurrent = 111
SwapMotorAAndCPhase = 114
MotorTestModes = 115
ActualSpeedRPM = 116
ActualSpeedMS = 117
ActualSpeedKMH = 118
MinBatteryVoltage = 130
MaxBatteryVoltage = 131
CutOffVoltage = 132
BatterySavingTimer = 133
SupplyVoltage = 220
DriverTemperature = 221
StatusFlags = 222
Supply12V = 223
Supply6V = 224
Supply5V = 225
PedalTorqueActual = 226
LeftPedalTorque = 227
RightPedalTorque = 228
TargetPedalTorque = 229
MainLoopsPerSecond = 230
TorqueLoopsPerSecond = 231
VelocityLoopsPerSecond = 232
PedalCounter = 233
PedalPosition = 234
PedalCountsPerSecond = 235
PedalVelocity = 236
FilteredPedalVelocity = 237
FilteredPedalVelocityFast = 238
DebugValue0 = 240
DebugValue1 = 241
DebugValue2 = 242
DebugValue3 = 243
DebugValue4 = 244
DebugValue5 = 245
DebugValue6 = 246
DebugValue7 = 247
DebugValue8 = 248
DebugValue9 = 249
DriverEnabled = 255
class _ENUM_MOTOR_0():
COMM_MODE_DISABLED = 0
COMM_MODE_OPENLOOP = 1
COMM_MODE_HALL = 2
COMM_MODE_HALL_PEDAL_CONTROLLED = 3
class _GP():
SerialBaudRate = 65
SerialAddress = 66
CANBitRate = 69
CANsendID = 70
CANreceiveID = 71
SerialHostAddress = 76
class TMC4671_LEV_REF_motor_interface(tmcl_motor_interface):
def __init__(self, parent, axisID, motorType, axisParameter, constants):
tmcl_motor_interface.__init__(self, parent, axisID, motorType, axisParameter, constants)
" add features "
self.openLoop = open_loop_ap_feature(self)
self.feature.update({"open_loop" : self.openLoop})
self.digitalHall = digital_hall_weasel_ap_feature(self)
self.feature.update({"digital_hall" : self.digitalHall})
self.linearRamp = linear_ramp_ap_feature(self)
self.linearRamp.disableMotorHaltedVelocity()
self.linearRamp.disableTargetReachedVelocity()
self.linearRamp.disableTargetReachedDistance()
self.feature.update({"linear_ramp" : self.linearRamp})
self.pid = pid_ap_feature(self)
self.feature.update({"pid" : self.pid})
self.commutationSelection = commutation_selection_ap_feature(self)
self.feature.update({"commutation_selection" : self.commutationSelection})
" motor type (BLDC only) "
def setMotorType(self, motorType):
pass
def motorType(self):
return PyTrinamic.MotorTypes.BLDC
" motor pole pairs "
def setMotorPolePairs(self, polePairs):
self.setAxisParameter(self.AP.MotorPolePairs, polePairs)
def motorPolePairs(self):
return self.axisParameter(self.AP.MotorPolePairs)
| '''
Created on 08.01.2021
@author: ED
'''
import PyTrinamic
" interfaces "
from PyTrinamic.modules.tmcl_module_interface import tmcl_module_interface
from PyTrinamic.modules.tmcl_motor_interface import tmcl_motor_interface
" features "
from PyTrinamic.modules.features.open_loop_ap_feature import open_loop_ap_feature
from PyTrinamic.modules.features.digital_hall_weasel_ap_feature import digital_hall_weasel_ap_feature
from PyTrinamic.modules.features.linear_ramp_ap_feature import linear_ramp_ap_feature
from PyTrinamic.modules.features.pid_ap_feature import pid_ap_feature
from PyTrinamic.modules.features.commutation_selection_ap_feature import commutation_selection_ap_feature
class TMC4671_LEV_REF(tmcl_module_interface):
def __init__(self, connection, moduleID=1):
tmcl_module_interface.__init__(self, connection, moduleID)
self.GP = _GP
" add the motor with available features "
self._motors.append(TMC4671_LEV_REF_motor_interface(self, 0, PyTrinamic.MotorTypes.BLDC, _AP_MOTOR_0, _ENUM_MOTOR_0))
def moduleName(self):
return "TMC4671-LEV-REF"
def moduleDescription(self):
return "The TMC4671-LEV-REF is a highly compact controller/driver module for brushless DC (BLDC) motors with up to 30A coil current and hall sensor feedback. Supply voltage is 24-48V."
class _AP_MOTOR_0():
AdcPhaseA = 3
AdcPhaseB = 4
AdcOffsetPhaseA = 5
AdcOffsetPhaseB = 6
CurrentPhaseA = 7
CurrentPhaseB = 8
CurrentPhaseC = 9
DualShuntFactor = 10
OpenLoopCurrent = 12
" only for compatibility => "
StartCurrent = 12
" <= only for compatibility "
MotorType = 14
CommutationMode = 15
ActualOpenLoopAngle = 16
ActualHallAngle = 18
TorqueP = 20
TorqueI = 21
VelocityP = 22
VelocityI = 23
TargetTorque = 30
ActualTorque = 31
TargetVelocity = 40
RampVelocity = 41
ActualVelocity = 42
MaxVelocity = 43
Acceleration = 44
EnableRamp = 45
PedalPulsesPerRotation = 50
PedalSenseDelay = 52
TorqueSensorGain = 53
TorqueSensorOffset = 54
TorqueDeadband = 55
AssistCutOutDistance = 56
InitialRightTorque = 57
InitialRightTorqueSpeed = 58
LeftRightRatio = 60
AverageSportMode = 61
PedalDirection = 65
PedalMotorEnable = 66
AverageTorque = 67
PositiveMotoringRampTime = 70
NegativeMotoringRampTime = 71
Speed_0 = 73
Speed_1 = 74
Speed_2 = 75
Speed_3 = 76
Speed_4 = 77
Speed_5 = 78
Speed_6 = 79
Speed_7 = 80
Speed_8 = 81
Torque_0 = 82
Torque_1 = 83
Torque_2 = 84
Torque_3 = 85
Torque_4 = 86
Torque_5 = 87
Torque_6 = 88
Torque_7 = 89
Torque_8 = 90
MaximumSpeed = 91
ActualMapSpeedTorque = 92
ActualGain = 93
ActualTorqueLimit = 94
MaxTorque = 100
MotorPolePairs = 101
GearRatio = 102
WheelDiameter = 103
WheelPulsesPerRotation = 104
HallSensorOffset = 105
HallSensorPolarity = 106
HallSensorInterpolation = 107
HallSensorDirection = 108
CurrentRegulatorBandwidth = 110
MinimumMotorCurrent = 111
SwapMotorAAndCPhase = 114
MotorTestModes = 115
ActualSpeedRPM = 116
ActualSpeedMS = 117
ActualSpeedKMH = 118
MinBatteryVoltage = 130
MaxBatteryVoltage = 131
CutOffVoltage = 132
BatterySavingTimer = 133
SupplyVoltage = 220
DriverTemperature = 221
StatusFlags = 222
Supply12V = 223
Supply6V = 224
Supply5V = 225
PedalTorqueActual = 226
LeftPedalTorque = 227
RightPedalTorque = 228
TargetPedalTorque = 229
MainLoopsPerSecond = 230
TorqueLoopsPerSecond = 231
VelocityLoopsPerSecond = 232
PedalCounter = 233
PedalPosition = 234
PedalCountsPerSecond = 235
PedalVelocity = 236
FilteredPedalVelocity = 237
FilteredPedalVelocityFast = 238
DebugValue0 = 240
DebugValue1 = 241
DebugValue2 = 242
DebugValue3 = 243
DebugValue4 = 244
DebugValue5 = 245
DebugValue6 = 246
DebugValue7 = 247
DebugValue8 = 248
DebugValue9 = 249
DriverEnabled = 255
class _ENUM_MOTOR_0():
COMM_MODE_DISABLED = 0
COMM_MODE_OPENLOOP = 1
COMM_MODE_HALL = 2
COMM_MODE_HALL_PEDAL_CONTROLLED = 3
class _GP():
SerialBaudRate = 65
SerialAddress = 66
CANBitRate = 69
CANsendID = 70
CANreceiveID = 71
SerialHostAddress = 76
class TMC4671_LEV_REF_motor_interface(tmcl_motor_interface):
def __init__(self, parent, axisID, motorType, axisParameter, constants):
tmcl_motor_interface.__init__(self, parent, axisID, motorType, axisParameter, constants)
" add features "
self.openLoop = open_loop_ap_feature(self)
self.feature.update({"open_loop" : self.openLoop})
self.digitalHall = digital_hall_weasel_ap_feature(self)
self.feature.update({"digital_hall" : self.digitalHall})
self.linearRamp = linear_ramp_ap_feature(self)
self.linearRamp.disableMotorHaltedVelocity()
self.linearRamp.disableTargetReachedVelocity()
self.linearRamp.disableTargetReachedDistance()
self.feature.update({"linear_ramp" : self.linearRamp})
self.pid = pid_ap_feature(self)
self.feature.update({"pid" : self.pid})
self.commutationSelection = commutation_selection_ap_feature(self)
self.feature.update({"commutation_selection" : self.commutationSelection})
" motor type (BLDC only) "
def setMotorType(self, motorType):
pass
def motorType(self):
return PyTrinamic.MotorTypes.BLDC
" motor pole pairs "
def setMotorPolePairs(self, polePairs):
self.setAxisParameter(self.AP.MotorPolePairs, polePairs)
def motorPolePairs(self):
return self.axisParameter(self.AP.MotorPolePairs) | en | 0.394564 | Created on 08.01.2021 @author: ED | 2.440171 | 2 |
idb/common/types.py | isabella232/idb | 0 | 6625236 | <gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import json
from abc import ABC, abstractmethod, abstractproperty
from dataclasses import asdict, dataclass, field
from datetime import timedelta
from enum import Enum
from io import StringIO
from typing import (
IO,
AsyncContextManager,
AsyncGenerator,
AsyncIterable,
AsyncIterator,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
LoggingMetadata = Dict[str, Optional[Union[str, List[str], int, float]]]
class IdbException(Exception):
pass
class IdbConnectionException(Exception):
pass
@dataclass(frozen=True)
class ExitWithCodeException(Exception):
exit_code: int
class Permission(Enum):
PHOTOS = 0
CAMERA = 1
CONTACTS = 2
URL = 3
LOCATION = 4
NOTIFICATION = 5
class TargetType(Enum):
DEVICE = 1
SIMULATOR = 2
@dataclass(frozen=True)
class ECIDFilter:
ecid: int
OnlyFilter = Union[TargetType, ECIDFilter]
class VideoFormat(Enum):
H264 = "h264"
RBGA = "rbga"
MJPEG = "mjpeg"
MINICAP = "minicap"
@dataclass(frozen=True)
class TCPAddress:
host: str
port: int
@dataclass(frozen=True)
class DomainSocketAddress:
path: str
Address = Union[TCPAddress, DomainSocketAddress]
class AppProcessState(Enum):
UNKNOWN = 0
NOT_RUNNING = 1
RUNNING = 2
@dataclass(frozen=True)
class InstalledAppInfo:
bundle_id: str
name: str
architectures: Set[str]
install_type: str
process_state: AppProcessState
debuggable: bool
@dataclass(frozen=True)
class InstrumentsTimings:
launch_error_timeout: Optional[float] = None
launch_retry_timeout: Optional[float] = None
terminate_timeout: Optional[float] = None
operation_duration: Optional[float] = None
class HIDButtonType(Enum):
APPLE_PAY = 1
HOME = 2
LOCK = 3
SIDE_BUTTON = 4
SIRI = 5
ConnectionDestination = Union[str, Address]
@dataclass(frozen=True)
class CompanionInfo:
udid: str
is_local: bool
address: Address
metadata: LoggingMetadata = field(default_factory=dict)
@dataclass(frozen=True)
class ScreenDimensions:
width: int
height: int
density: Optional[float]
width_points: Optional[int]
height_points: Optional[int]
DeviceDetails = Mapping[str, Union[int, str]]
@dataclass(frozen=True)
class TargetDescription:
udid: str
name: str
state: Optional[str]
target_type: Optional[str]
os_version: Optional[str]
architecture: Optional[str]
companion_info: Optional[CompanionInfo]
screen_dimensions: Optional[ScreenDimensions]
model: Optional[str] = None
device: Optional[DeviceDetails] = None
extended: Optional[DeviceDetails] = None
diagnostics: Optional[DeviceDetails] = None
metadata: LoggingMetadata = field(default_factory=dict)
@property
def as_json(self) -> str:
return json.dumps(asdict(self))
@dataclass(frozen=True)
class DaemonInfo:
host: str
port: int
targets: List[TargetDescription]
ConnectResponse = Union[CompanionInfo, DaemonInfo]
@dataclass(frozen=True)
class FileEntryInfo:
path: str
@dataclass(frozen=True)
class FileListing:
parent: str
entries: List[FileEntryInfo]
@dataclass(frozen=True)
class AccessibilityInfo:
json: Optional[str]
@dataclass(frozen=True)
class CrashLogInfo:
name: Optional[str]
bundle_id: Optional[str]
process_name: Optional[str]
parent_process_name: Optional[str]
process_identifier: Optional[int]
parent_process_identifier: Optional[int]
timestamp: Optional[int]
@dataclass(frozen=True)
class CrashLog:
info: Optional[CrashLogInfo]
contents: Optional[str]
@dataclass(frozen=True)
class CrashLogQuery:
since: Optional[int] = None
before: Optional[int] = None
bundle_id: Optional[str] = None
name: Optional[str] = None
@dataclass(frozen=True)
class TestRunFailureInfo:
message: str
file: str
line: int
@dataclass(frozen=True)
class TestAttachment:
payload: bytes
timestamp: float
name: str
uniform_type_identifier: str
@dataclass(frozen=True)
class TestActivity:
title: str
duration: float
uuid: str
activity_type: str
start: float
finish: float
name: str
attachments: List[TestAttachment]
sub_activities: List["TestActivity"]
@dataclass(frozen=True)
class TestRunInfo:
bundle_name: str
class_name: str
method_name: str
logs: List[str]
duration: float
passed: bool
failure_info: Optional[TestRunFailureInfo]
activityLogs: Optional[List[TestActivity]]
crashed: bool
@property
def crashed_outside_test_case(self) -> bool:
return self.crashed and self.class_name == "" and self.method_name == ""
@dataclass(frozen=True)
class InstalledTestInfo:
bundle_id: str
name: Optional[str]
architectures: Optional[Set[str]]
class HIDDirection(Enum):
DOWN = 0
UP = 1
@dataclass(frozen=True)
class Point:
x: float
y: float
@dataclass(frozen=True)
class HIDTouch:
point: Point
@dataclass(frozen=True)
class HIDButton:
button: HIDButtonType
@dataclass(frozen=True)
class HIDKey:
keycode: int
HIDPressAction = Union[HIDTouch, HIDButton, HIDKey]
@dataclass(frozen=True)
class HIDPress:
action: HIDPressAction
direction: HIDDirection
@dataclass(frozen=True)
class HIDSwipe:
start: Point
end: Point
delta: Optional[float]
@dataclass(frozen=True)
class HIDDelay:
duration: float
HIDEvent = Union[HIDPress, HIDSwipe, HIDDelay]
@dataclass(frozen=True)
class InstalledArtifact:
name: str
uuid: Optional[str]
progress: Optional[float]
class FileContainerType(Enum):
ROOT = "root"
MEDIA = "media"
CRASHES = "crashes"
PROVISIONING_PROFILES = "provisioning_profiles"
MDM_PROFILES = "mdm_profiles"
SPRINGBOARD_ICONS = "springboard_icons"
WALLPAPER = "wallpaper"
DISK_IMAGES = "disk_images"
FileContainer = Optional[Union[str, FileContainerType]]
class Companion(ABC):
@abstractmethod
async def create(
self, device_type: str, os_version: str, timeout: Optional[timedelta] = None
) -> TargetDescription:
pass
@abstractmethod
async def boot(
self, udid: str, verify: bool = True, timeout: Optional[timedelta] = None
) -> None:
pass
@abstractmethod
async def boot_headless( # pyre-fixme
self, udid: str, verify: bool = True, timeout: Optional[timedelta] = None
) -> AsyncContextManager[None]:
yield
@abstractmethod
async def shutdown(self, udid: str, timeout: Optional[timedelta] = None) -> None:
pass
@abstractmethod
async def erase(self, udid: str, timeout: Optional[timedelta] = None) -> None:
pass
@abstractmethod
async def clone(
self,
udid: str,
destination_device_set: Optional[str] = None,
timeout: Optional[timedelta] = None,
) -> TargetDescription:
pass
@abstractmethod
async def delete(
self, udid: Optional[str], timeout: Optional[timedelta] = None
) -> None:
pass
@abstractmethod
async def clean(self, udid: str, timeout: Optional[timedelta] = None) -> None:
pass
@abstractmethod
async def list_targets(
self, only: Optional[OnlyFilter] = None, timeout: Optional[timedelta] = None
) -> List[TargetDescription]:
pass
@abstractmethod
async def tail_targets(
self, only: Optional[OnlyFilter] = None
) -> AsyncGenerator[List[TargetDescription], None]:
yield
@abstractmethod
async def target_description(
self,
udid: Optional[str] = None,
only: Optional[OnlyFilter] = None,
timeout: Optional[timedelta] = None,
) -> TargetDescription:
pass
@abstractmethod
async def unix_domain_server( # pyre-fixme
self, udid: str, path: str, only: Optional[OnlyFilter] = None
) -> AsyncContextManager[str]:
yield
# Exposes the resource-specific commands that imply a connected companion
class Client(ABC):
@abstractmethod
async def list_apps(
self, fetch_process_state: bool = True
) -> List[InstalledAppInfo]:
pass
@abstractmethod
async def launch(
self,
bundle_id: str,
env: Optional[Dict[str, str]] = None,
args: Optional[List[str]] = None,
foreground_if_running: bool = False,
wait_for_debugger: bool = False,
stop: Optional[asyncio.Event] = None,
) -> None:
pass
@abstractmethod
async def run_xctest(
self,
test_bundle_id: str,
app_bundle_id: str,
test_host_app_bundle_id: Optional[str] = None,
is_ui_test: bool = False,
is_logic_test: bool = False,
tests_to_run: Optional[Set[str]] = None,
tests_to_skip: Optional[Set[str]] = None,
env: Optional[Dict[str, str]] = None,
args: Optional[List[str]] = None,
result_bundle_path: Optional[str] = None,
idb_log_buffer: Optional[StringIO] = None,
timeout: Optional[int] = None,
poll_interval_sec: float = 0.5,
report_activities: bool = False,
report_attachments: bool = False,
activities_output_path: Optional[str] = None,
coverage_output_path: Optional[str] = None,
log_directory_path: Optional[str] = None,
) -> AsyncIterator[TestRunInfo]:
yield
@abstractmethod
async def install(
self, bundle: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_dylib(
self, dylib: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_dsym(
self, dsym: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_xctest(
self, xctest: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_framework(
self, framework_path: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def uninstall(self, bundle_id: str) -> None:
pass
@abstractmethod
async def list_xctests(self) -> List[InstalledTestInfo]:
pass
@abstractmethod
async def terminate(self, bundle_id: str) -> None:
pass
@abstractmethod
async def list_test_bundle(self, test_bundle_id: str, app_path: str) -> List[str]:
pass
@abstractmethod
async def tail_logs(
self, stop: asyncio.Event, arguments: Optional[List[str]] = None
) -> AsyncIterator[str]:
yield
@abstractmethod
async def tail_companion_logs(self, stop: asyncio.Event) -> AsyncIterator[str]:
yield
@abstractmethod
async def clear_keychain(self) -> None:
pass
@abstractmethod
async def set_hardware_keyboard(self, enabled: bool) -> None:
pass
@abstractmethod
async def set_locale(self, locale_identifier: str) -> None:
pass
@abstractmethod
async def get_locale(self) -> str:
pass
@abstractmethod
async def list_locale_identifiers(self) -> List[str]:
pass
@abstractmethod
async def open_url(self, url: str) -> None:
pass
@abstractmethod
async def set_location(self, latitude: float, longitude: float) -> None:
pass
@abstractmethod
async def approve(
self, bundle_id: str, permissions: Set[Permission], scheme: Optional[str] = None
) -> None:
pass
@abstractmethod
async def record_video(self, stop: asyncio.Event, output_file: str) -> None:
pass
@abstractmethod
async def stream_video(
self,
output_file: Optional[str],
fps: Optional[int],
format: VideoFormat,
compression_quality: float,
scale_factor: float = 1,
) -> AsyncGenerator[bytes, None]:
yield
@abstractmethod
async def screenshot(self) -> bytes:
pass
@abstractmethod
async def tap(self, x: float, y: float, duration: Optional[float] = None) -> None:
pass
@abstractmethod
async def button(
self, button_type: HIDButtonType, duration: Optional[float] = None
) -> None:
pass
@abstractmethod
async def key(self, keycode: int, duration: Optional[float] = None) -> None:
return
@abstractmethod
async def key_sequence(self, key_sequence: List[int]) -> None:
pass
@abstractmethod
async def swipe(
self,
p_start: Tuple[int, int],
p_end: Tuple[int, int],
duration: Optional[float] = None,
delta: Optional[int] = None,
) -> None:
pass
@abstractmethod
async def crash_show(self, name: str) -> CrashLog:
pass
@abstractmethod
async def contacts_update(self, contacts_path: str) -> None:
pass
@abstractmethod
async def describe(self, fetch_diagnostics: bool = False) -> TargetDescription:
pass
@abstractmethod
async def accessibility_info(
self, point: Optional[Tuple[int, int]], nested: bool
) -> AccessibilityInfo:
pass
@abstractmethod
async def run_instruments(
self,
stop: asyncio.Event,
trace_basename: str,
template_name: str,
app_bundle_id: str,
app_environment: Optional[Dict[str, str]] = None,
app_arguments: Optional[List[str]] = None,
tool_arguments: Optional[List[str]] = None,
started: Optional[asyncio.Event] = None,
timings: Optional[InstrumentsTimings] = None,
post_process_arguments: Optional[List[str]] = None,
) -> List[str]:
pass
@abstractmethod
async def xctrace_record(
self,
stop: asyncio.Event,
output: str,
template_name: str,
all_processes: bool = False,
time_limit: Optional[float] = None,
package: Optional[str] = None,
process_to_attach: Optional[str] = None,
process_to_launch: Optional[str] = None,
process_env: Optional[Dict[str, str]] = None,
launch_args: Optional[List[str]] = None,
target_stdin: Optional[str] = None,
target_stdout: Optional[str] = None,
post_args: Optional[List[str]] = None,
stop_timeout: Optional[float] = None,
started: Optional[asyncio.Event] = None,
) -> List[str]:
pass
@abstractmethod
async def crash_list(self, query: CrashLogQuery) -> List[CrashLogInfo]:
pass
@abstractmethod
async def crash_delete(self, query: CrashLogQuery) -> List[CrashLogInfo]:
pass
@abstractmethod
async def add_media(self, file_paths: List[str]) -> None:
pass
@abstractmethod
async def focus(self) -> None:
pass
@abstractmethod
async def debugserver_start(self, bundle_id: str) -> List[str]:
pass
@abstractmethod
async def debugserver_stop(self) -> None:
pass
@abstractmethod
async def debugserver_status(self) -> Optional[List[str]]:
pass
@abstractmethod
async def text(self, text: str) -> None:
return
@abstractmethod
async def hid(self, event_iterator: AsyncIterable[HIDEvent]) -> None:
pass
@abstractmethod
async def ls_single(
self, container: FileContainer, path: str
) -> List[FileEntryInfo]:
pass
@abstractmethod
async def ls(self, container: FileContainer, paths: List[str]) -> List[FileListing]:
pass
@abstractmethod
async def mv(
self, container: FileContainer, src_paths: List[str], dest_path: str
) -> None:
pass
@abstractmethod
async def rm(self, container: FileContainer, paths: List[str]) -> None:
pass
@abstractmethod
async def mkdir(self, container: FileContainer, path: str) -> None:
pass
@abstractmethod
async def pull(
self, container: FileContainer, src_path: str, dest_path: str
) -> None:
pass
@abstractmethod
async def push(
self, src_paths: List[str], container: FileContainer, dest_path: str
) -> None:
pass
class ClientManager:
@abstractmethod
async def connect(
self,
destination: ConnectionDestination,
metadata: Optional[Dict[str, str]] = None,
) -> CompanionInfo:
pass
@abstractmethod
async def disconnect(self, destination: Union[Address, str]) -> None:
pass
@abstractmethod
async def list_targets(self) -> List[TargetDescription]:
pass
@abstractmethod
async def kill(self) -> None:
pass
class Server(ABC):
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
async def wait_closed(self) -> None:
pass
@abstractproperty
def ports(self) -> Dict[str, str]:
pass
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import json
from abc import ABC, abstractmethod, abstractproperty
from dataclasses import asdict, dataclass, field
from datetime import timedelta
from enum import Enum
from io import StringIO
from typing import (
IO,
AsyncContextManager,
AsyncGenerator,
AsyncIterable,
AsyncIterator,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
LoggingMetadata = Dict[str, Optional[Union[str, List[str], int, float]]]
class IdbException(Exception):
pass
class IdbConnectionException(Exception):
pass
@dataclass(frozen=True)
class ExitWithCodeException(Exception):
exit_code: int
class Permission(Enum):
PHOTOS = 0
CAMERA = 1
CONTACTS = 2
URL = 3
LOCATION = 4
NOTIFICATION = 5
class TargetType(Enum):
DEVICE = 1
SIMULATOR = 2
@dataclass(frozen=True)
class ECIDFilter:
ecid: int
OnlyFilter = Union[TargetType, ECIDFilter]
class VideoFormat(Enum):
H264 = "h264"
RBGA = "rbga"
MJPEG = "mjpeg"
MINICAP = "minicap"
@dataclass(frozen=True)
class TCPAddress:
host: str
port: int
@dataclass(frozen=True)
class DomainSocketAddress:
path: str
Address = Union[TCPAddress, DomainSocketAddress]
class AppProcessState(Enum):
UNKNOWN = 0
NOT_RUNNING = 1
RUNNING = 2
@dataclass(frozen=True)
class InstalledAppInfo:
bundle_id: str
name: str
architectures: Set[str]
install_type: str
process_state: AppProcessState
debuggable: bool
@dataclass(frozen=True)
class InstrumentsTimings:
launch_error_timeout: Optional[float] = None
launch_retry_timeout: Optional[float] = None
terminate_timeout: Optional[float] = None
operation_duration: Optional[float] = None
class HIDButtonType(Enum):
APPLE_PAY = 1
HOME = 2
LOCK = 3
SIDE_BUTTON = 4
SIRI = 5
ConnectionDestination = Union[str, Address]
@dataclass(frozen=True)
class CompanionInfo:
udid: str
is_local: bool
address: Address
metadata: LoggingMetadata = field(default_factory=dict)
@dataclass(frozen=True)
class ScreenDimensions:
width: int
height: int
density: Optional[float]
width_points: Optional[int]
height_points: Optional[int]
DeviceDetails = Mapping[str, Union[int, str]]
@dataclass(frozen=True)
class TargetDescription:
udid: str
name: str
state: Optional[str]
target_type: Optional[str]
os_version: Optional[str]
architecture: Optional[str]
companion_info: Optional[CompanionInfo]
screen_dimensions: Optional[ScreenDimensions]
model: Optional[str] = None
device: Optional[DeviceDetails] = None
extended: Optional[DeviceDetails] = None
diagnostics: Optional[DeviceDetails] = None
metadata: LoggingMetadata = field(default_factory=dict)
@property
def as_json(self) -> str:
return json.dumps(asdict(self))
@dataclass(frozen=True)
class DaemonInfo:
host: str
port: int
targets: List[TargetDescription]
ConnectResponse = Union[CompanionInfo, DaemonInfo]
@dataclass(frozen=True)
class FileEntryInfo:
path: str
@dataclass(frozen=True)
class FileListing:
parent: str
entries: List[FileEntryInfo]
@dataclass(frozen=True)
class AccessibilityInfo:
json: Optional[str]
@dataclass(frozen=True)
class CrashLogInfo:
name: Optional[str]
bundle_id: Optional[str]
process_name: Optional[str]
parent_process_name: Optional[str]
process_identifier: Optional[int]
parent_process_identifier: Optional[int]
timestamp: Optional[int]
@dataclass(frozen=True)
class CrashLog:
info: Optional[CrashLogInfo]
contents: Optional[str]
@dataclass(frozen=True)
class CrashLogQuery:
since: Optional[int] = None
before: Optional[int] = None
bundle_id: Optional[str] = None
name: Optional[str] = None
@dataclass(frozen=True)
class TestRunFailureInfo:
message: str
file: str
line: int
@dataclass(frozen=True)
class TestAttachment:
payload: bytes
timestamp: float
name: str
uniform_type_identifier: str
@dataclass(frozen=True)
class TestActivity:
title: str
duration: float
uuid: str
activity_type: str
start: float
finish: float
name: str
attachments: List[TestAttachment]
sub_activities: List["TestActivity"]
@dataclass(frozen=True)
class TestRunInfo:
bundle_name: str
class_name: str
method_name: str
logs: List[str]
duration: float
passed: bool
failure_info: Optional[TestRunFailureInfo]
activityLogs: Optional[List[TestActivity]]
crashed: bool
@property
def crashed_outside_test_case(self) -> bool:
return self.crashed and self.class_name == "" and self.method_name == ""
@dataclass(frozen=True)
class InstalledTestInfo:
bundle_id: str
name: Optional[str]
architectures: Optional[Set[str]]
class HIDDirection(Enum):
DOWN = 0
UP = 1
@dataclass(frozen=True)
class Point:
x: float
y: float
@dataclass(frozen=True)
class HIDTouch:
point: Point
@dataclass(frozen=True)
class HIDButton:
button: HIDButtonType
@dataclass(frozen=True)
class HIDKey:
keycode: int
HIDPressAction = Union[HIDTouch, HIDButton, HIDKey]
@dataclass(frozen=True)
class HIDPress:
action: HIDPressAction
direction: HIDDirection
@dataclass(frozen=True)
class HIDSwipe:
start: Point
end: Point
delta: Optional[float]
@dataclass(frozen=True)
class HIDDelay:
duration: float
HIDEvent = Union[HIDPress, HIDSwipe, HIDDelay]
@dataclass(frozen=True)
class InstalledArtifact:
name: str
uuid: Optional[str]
progress: Optional[float]
class FileContainerType(Enum):
ROOT = "root"
MEDIA = "media"
CRASHES = "crashes"
PROVISIONING_PROFILES = "provisioning_profiles"
MDM_PROFILES = "mdm_profiles"
SPRINGBOARD_ICONS = "springboard_icons"
WALLPAPER = "wallpaper"
DISK_IMAGES = "disk_images"
FileContainer = Optional[Union[str, FileContainerType]]
class Companion(ABC):
@abstractmethod
async def create(
self, device_type: str, os_version: str, timeout: Optional[timedelta] = None
) -> TargetDescription:
pass
@abstractmethod
async def boot(
self, udid: str, verify: bool = True, timeout: Optional[timedelta] = None
) -> None:
pass
@abstractmethod
async def boot_headless( # pyre-fixme
self, udid: str, verify: bool = True, timeout: Optional[timedelta] = None
) -> AsyncContextManager[None]:
yield
@abstractmethod
async def shutdown(self, udid: str, timeout: Optional[timedelta] = None) -> None:
pass
@abstractmethod
async def erase(self, udid: str, timeout: Optional[timedelta] = None) -> None:
pass
@abstractmethod
async def clone(
self,
udid: str,
destination_device_set: Optional[str] = None,
timeout: Optional[timedelta] = None,
) -> TargetDescription:
pass
@abstractmethod
async def delete(
self, udid: Optional[str], timeout: Optional[timedelta] = None
) -> None:
pass
@abstractmethod
async def clean(self, udid: str, timeout: Optional[timedelta] = None) -> None:
pass
@abstractmethod
async def list_targets(
self, only: Optional[OnlyFilter] = None, timeout: Optional[timedelta] = None
) -> List[TargetDescription]:
pass
@abstractmethod
async def tail_targets(
self, only: Optional[OnlyFilter] = None
) -> AsyncGenerator[List[TargetDescription], None]:
yield
@abstractmethod
async def target_description(
self,
udid: Optional[str] = None,
only: Optional[OnlyFilter] = None,
timeout: Optional[timedelta] = None,
) -> TargetDescription:
pass
@abstractmethod
async def unix_domain_server( # pyre-fixme
self, udid: str, path: str, only: Optional[OnlyFilter] = None
) -> AsyncContextManager[str]:
yield
# Exposes the resource-specific commands that imply a connected companion
class Client(ABC):
@abstractmethod
async def list_apps(
self, fetch_process_state: bool = True
) -> List[InstalledAppInfo]:
pass
@abstractmethod
async def launch(
self,
bundle_id: str,
env: Optional[Dict[str, str]] = None,
args: Optional[List[str]] = None,
foreground_if_running: bool = False,
wait_for_debugger: bool = False,
stop: Optional[asyncio.Event] = None,
) -> None:
pass
@abstractmethod
async def run_xctest(
self,
test_bundle_id: str,
app_bundle_id: str,
test_host_app_bundle_id: Optional[str] = None,
is_ui_test: bool = False,
is_logic_test: bool = False,
tests_to_run: Optional[Set[str]] = None,
tests_to_skip: Optional[Set[str]] = None,
env: Optional[Dict[str, str]] = None,
args: Optional[List[str]] = None,
result_bundle_path: Optional[str] = None,
idb_log_buffer: Optional[StringIO] = None,
timeout: Optional[int] = None,
poll_interval_sec: float = 0.5,
report_activities: bool = False,
report_attachments: bool = False,
activities_output_path: Optional[str] = None,
coverage_output_path: Optional[str] = None,
log_directory_path: Optional[str] = None,
) -> AsyncIterator[TestRunInfo]:
yield
@abstractmethod
async def install(
self, bundle: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_dylib(
self, dylib: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_dsym(
self, dsym: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_xctest(
self, xctest: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def install_framework(
self, framework_path: Union[str, IO[bytes]]
) -> AsyncIterator[InstalledArtifact]:
yield
@abstractmethod
async def uninstall(self, bundle_id: str) -> None:
pass
@abstractmethod
async def list_xctests(self) -> List[InstalledTestInfo]:
pass
@abstractmethod
async def terminate(self, bundle_id: str) -> None:
pass
@abstractmethod
async def list_test_bundle(self, test_bundle_id: str, app_path: str) -> List[str]:
pass
@abstractmethod
async def tail_logs(
self, stop: asyncio.Event, arguments: Optional[List[str]] = None
) -> AsyncIterator[str]:
yield
@abstractmethod
async def tail_companion_logs(self, stop: asyncio.Event) -> AsyncIterator[str]:
yield
@abstractmethod
async def clear_keychain(self) -> None:
pass
@abstractmethod
async def set_hardware_keyboard(self, enabled: bool) -> None:
pass
@abstractmethod
async def set_locale(self, locale_identifier: str) -> None:
pass
@abstractmethod
async def get_locale(self) -> str:
pass
@abstractmethod
async def list_locale_identifiers(self) -> List[str]:
pass
@abstractmethod
async def open_url(self, url: str) -> None:
pass
@abstractmethod
async def set_location(self, latitude: float, longitude: float) -> None:
pass
@abstractmethod
async def approve(
self, bundle_id: str, permissions: Set[Permission], scheme: Optional[str] = None
) -> None:
pass
@abstractmethod
async def record_video(self, stop: asyncio.Event, output_file: str) -> None:
pass
@abstractmethod
async def stream_video(
self,
output_file: Optional[str],
fps: Optional[int],
format: VideoFormat,
compression_quality: float,
scale_factor: float = 1,
) -> AsyncGenerator[bytes, None]:
yield
@abstractmethod
async def screenshot(self) -> bytes:
pass
@abstractmethod
async def tap(self, x: float, y: float, duration: Optional[float] = None) -> None:
pass
@abstractmethod
async def button(
self, button_type: HIDButtonType, duration: Optional[float] = None
) -> None:
pass
@abstractmethod
async def key(self, keycode: int, duration: Optional[float] = None) -> None:
return
@abstractmethod
async def key_sequence(self, key_sequence: List[int]) -> None:
pass
@abstractmethod
async def swipe(
self,
p_start: Tuple[int, int],
p_end: Tuple[int, int],
duration: Optional[float] = None,
delta: Optional[int] = None,
) -> None:
pass
@abstractmethod
async def crash_show(self, name: str) -> CrashLog:
pass
@abstractmethod
async def contacts_update(self, contacts_path: str) -> None:
pass
@abstractmethod
async def describe(self, fetch_diagnostics: bool = False) -> TargetDescription:
pass
@abstractmethod
async def accessibility_info(
self, point: Optional[Tuple[int, int]], nested: bool
) -> AccessibilityInfo:
pass
@abstractmethod
async def run_instruments(
self,
stop: asyncio.Event,
trace_basename: str,
template_name: str,
app_bundle_id: str,
app_environment: Optional[Dict[str, str]] = None,
app_arguments: Optional[List[str]] = None,
tool_arguments: Optional[List[str]] = None,
started: Optional[asyncio.Event] = None,
timings: Optional[InstrumentsTimings] = None,
post_process_arguments: Optional[List[str]] = None,
) -> List[str]:
pass
@abstractmethod
async def xctrace_record(
self,
stop: asyncio.Event,
output: str,
template_name: str,
all_processes: bool = False,
time_limit: Optional[float] = None,
package: Optional[str] = None,
process_to_attach: Optional[str] = None,
process_to_launch: Optional[str] = None,
process_env: Optional[Dict[str, str]] = None,
launch_args: Optional[List[str]] = None,
target_stdin: Optional[str] = None,
target_stdout: Optional[str] = None,
post_args: Optional[List[str]] = None,
stop_timeout: Optional[float] = None,
started: Optional[asyncio.Event] = None,
) -> List[str]:
pass
@abstractmethod
async def crash_list(self, query: CrashLogQuery) -> List[CrashLogInfo]:
pass
@abstractmethod
async def crash_delete(self, query: CrashLogQuery) -> List[CrashLogInfo]:
pass
@abstractmethod
async def add_media(self, file_paths: List[str]) -> None:
pass
@abstractmethod
async def focus(self) -> None:
pass
@abstractmethod
async def debugserver_start(self, bundle_id: str) -> List[str]:
pass
@abstractmethod
async def debugserver_stop(self) -> None:
pass
@abstractmethod
async def debugserver_status(self) -> Optional[List[str]]:
pass
@abstractmethod
async def text(self, text: str) -> None:
return
@abstractmethod
async def hid(self, event_iterator: AsyncIterable[HIDEvent]) -> None:
pass
@abstractmethod
async def ls_single(
self, container: FileContainer, path: str
) -> List[FileEntryInfo]:
pass
@abstractmethod
async def ls(self, container: FileContainer, paths: List[str]) -> List[FileListing]:
pass
@abstractmethod
async def mv(
self, container: FileContainer, src_paths: List[str], dest_path: str
) -> None:
pass
@abstractmethod
async def rm(self, container: FileContainer, paths: List[str]) -> None:
pass
@abstractmethod
async def mkdir(self, container: FileContainer, path: str) -> None:
pass
@abstractmethod
async def pull(
self, container: FileContainer, src_path: str, dest_path: str
) -> None:
pass
@abstractmethod
async def push(
self, src_paths: List[str], container: FileContainer, dest_path: str
) -> None:
pass
class ClientManager:
@abstractmethod
async def connect(
self,
destination: ConnectionDestination,
metadata: Optional[Dict[str, str]] = None,
) -> CompanionInfo:
pass
@abstractmethod
async def disconnect(self, destination: Union[Address, str]) -> None:
pass
@abstractmethod
async def list_targets(self) -> List[TargetDescription]:
pass
@abstractmethod
async def kill(self) -> None:
pass
class Server(ABC):
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
async def wait_closed(self) -> None:
pass
@abstractproperty
def ports(self) -> Dict[str, str]:
pass | en | 0.860955 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-fixme # pyre-fixme # Exposes the resource-specific commands that imply a connected companion | 1.891842 | 2 |
lando_util/organize_project/tests/test_organizer.py | Duke-GCB/lando-util | 0 | 6625237 | from unittest import TestCase
from unittest.mock import patch, Mock, call, mock_open, create_autospec
from lando_util.organize_project.organizer import write_data_to_file, Settings, ProjectData, Organizer
import json
import os
class TestOrganizerFuncs(TestCase):
def test_write_data_to_file(self):
mocked_open = mock_open()
with patch('builtins.open', mocked_open, create=True):
write_data_to_file(data='somedata', filepath='/tmp/somepath.txt')
mocked_open.assert_called_with('/tmp/somepath.txt', 'w')
mocked_open.return_value.write.assert_called_with('somedata')
class TestSettings(TestCase):
def setUp(self):
self.settings_packed_dict = {
"bespin_job_id": "1",
"destination_dir": 'somedir',
"downloaded_workflow_path": '/workflow/sort.cwl',
"workflow_to_read": '/workflow/read/sort.cwl',
"workflow_type": "packed",
"job_order_path": '/output/job_order.json',
"bespin_workflow_stdout_path": '/output/workflow-output.json',
"bespin_workflow_stderr_path": '/output/workflow-output.log',
"bespin_workflow_started": "2019-02-07T12:30",
"bespin_workflow_finished": "2019-02-07T12:45",
"methods_template": '#replace stuff',
"additional_log_files": [
"/bespin/output-data/job-51-bob-resource-usage.json",
]
}
self.settings_zipped_dict = {
"bespin_job_id": "1",
"destination_dir": 'somedir',
"downloaded_workflow_path": '/workflow/sort.cwl',
"workflow_to_read": '/workflow/read/sort.zip',
"workflow_type": "zipped",
"job_order_path": '/output/job_order.json',
"bespin_workflow_stdout_path": '/output/workflow-output.json',
"bespin_workflow_stderr_path": '/output/workflow-output.log',
"bespin_workflow_started": "2019-02-07T12:30",
"bespin_workflow_finished": "2019-02-07T12:45",
"methods_template": '#replace stuff',
"additional_log_files": [
"/bespin/output-data/job-51-bob-resource-usage.json",
]
}
@patch('lando_util.organize_project.organizer.json')
def test_packed_properties(self, mock_json):
mock_json.load.return_value = self.settings_packed_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.docs_dir, 'somedir/docs')
self.assertEqual(settings.readme_md_dest_path, 'somedir/docs/README.md')
self.assertEqual(settings.readme_html_dest_path, 'somedir/docs/README.html')
self.assertEqual(settings.logs_dir, 'somedir/docs/logs')
self.assertEqual(settings.bespin_workflow_stdout_dest_path, 'somedir/docs/logs/bespin-workflow-output.json')
self.assertEqual(settings.bespin_workflow_stderr_dest_path, 'somedir/docs/logs/bespin-workflow-output.log')
self.assertEqual(settings.job_data_dest_path, 'somedir/docs/logs/job-data.json')
self.assertEqual(settings.scripts_dir, 'somedir/docs/scripts')
self.assertEqual(settings.workflow_dest_path, 'somedir/docs/scripts/sort.cwl')
self.assertEqual(settings.job_order_dest_path, 'somedir/docs/scripts/job_order.json')
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 15.0)
self.assertEqual(settings.additional_log_files, ["/bespin/output-data/job-51-bob-resource-usage.json"])
self.assertEqual(settings.downloaded_workflow_path, '/workflow/sort.cwl')
self.assertEqual(settings.workflow_to_read, '/workflow/read/sort.cwl')
self.assertEqual(settings.workflow_type, 'packed')
@patch('lando_util.organize_project.organizer.json')
def test_zipped_properties(self, mock_json):
mock_json.load.return_value = self.settings_zipped_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.docs_dir, 'somedir/docs')
self.assertEqual(settings.readme_md_dest_path, 'somedir/docs/README.md')
self.assertEqual(settings.readme_html_dest_path, 'somedir/docs/README.html')
self.assertEqual(settings.logs_dir, 'somedir/docs/logs')
self.assertEqual(settings.bespin_workflow_stdout_dest_path, 'somedir/docs/logs/bespin-workflow-output.json')
self.assertEqual(settings.bespin_workflow_stderr_dest_path, 'somedir/docs/logs/bespin-workflow-output.log')
self.assertEqual(settings.job_data_dest_path, 'somedir/docs/logs/job-data.json')
self.assertEqual(settings.scripts_dir, 'somedir/docs/scripts')
self.assertEqual(settings.workflow_dest_path, 'somedir/docs/scripts/sort.cwl')
self.assertEqual(settings.job_order_dest_path, 'somedir/docs/scripts/job_order.json')
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 15.0)
self.assertEqual(settings.additional_log_files, ["/bespin/output-data/job-51-bob-resource-usage.json"])
self.assertEqual(settings.downloaded_workflow_path, '/workflow/sort.cwl')
self.assertEqual(settings.workflow_to_read, '/workflow/read/sort.zip')
self.assertEqual(settings.workflow_type, 'zipped')
@patch('lando_util.organize_project.organizer.json')
def test_bespin_workflow_elapsed_minutes(self, mock_json):
self.settings_packed_dict['bespin_workflow_started'] = '2019-02-07T12:30'
self.settings_packed_dict['bespin_workflow_finished'] = '2019-02-09T12:30'
mock_json.load.return_value = self.settings_packed_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 2 * 24 * 60)
@patch('lando_util.organize_project.organizer.json')
def test_bespin_workflow_elapsed_minutes_is_optional(self, mock_json):
del self.settings_packed_dict['bespin_workflow_started']
del self.settings_packed_dict['bespin_workflow_finished']
mock_json.load.return_value = self.settings_packed_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 0)
class TestProjectData(TestCase):
@patch('lando_util.organize_project.organizer.create_workflow_info')
@patch('lando_util.organize_project.organizer.ReadmeReport')
def test_constructor(self, mock_readme_report, mock_create_workflow_info):
mock_settings = Mock(
bespin_job_id='92',
bespin_workflow_started='2019-02-07T12:30',
bespin_workflow_finished='2019-02-09T12:45',
bespin_workflow_elapsed_minutes='120',
workflow_to_read='/input/read/workflow.cwl',
downloaded_workflow_path='/input/sort.cwl',
job_order_path='/data/job_order.json',
bespin_workflow_stdout_path='/output/workflow_stdout.json',
methods_template='#Markdown'
)
mock_create_workflow_info.return_value.count_output_files.return_value = 13
mock_create_workflow_info.return_value.total_file_size_str.return_value = '20 GiB'
project_data = ProjectData(mock_settings)
mock_create_workflow_info.assert_called_with('/input/read/workflow.cwl')
self.assertEqual(project_data.workflow_info, mock_create_workflow_info.return_value)
mock_workflow_info = mock_create_workflow_info.return_value
mock_workflow_info.update_with_job_order.assert_called_with(job_order_path='/data/job_order.json')
mock_workflow_info.update_with_job_output.assert_called_with(job_output_path='/output/workflow_stdout.json')
self.assertEqual(project_data.readme_report, mock_readme_report.return_value)
expected_job_data = {
'id': '92',
'started': '2019-02-07T12:30',
'finished': '2019-02-09T12:45',
'run_time': '120 minutes',
'num_output_files': 13,
'total_file_size_str': '20 GiB',
}
mock_readme_report.assert_called_with(project_data.workflow_info, expected_job_data)
self.assertEqual(project_data.job_data, expected_job_data)
class TestOrganizer(TestCase):
@patch('lando_util.organize_project.organizer.os')
@patch('lando_util.organize_project.organizer.shutil')
@patch('lando_util.organize_project.organizer.ProjectData')
@patch('lando_util.organize_project.organizer.write_data_to_file')
def test_run_packed(self, mock_write_data_to_file, mock_project_data, mock_shutil, mock_os):
mock_settings = Mock()
mock_settings.workflow_type = 'packed'
mock_settings.bespin_job_id = '42'
mock_settings.bespin_workflow_started = '2019-02-07T12:30'
mock_settings.bespin_workflow_finished = '2019-02-09T12:45'
mock_settings.bespin_workflow_elapsed_minutes = '120'
mock_settings.logs_dir = '/results/docs/logs/'
mock_settings.additional_log_files = ['/tmp/extra/usage-report.txt', '/data/log2.txt']
mock_settings.job_data = {}
mock_project_data.return_value = Mock(
methods_template='#Markdown',
job_data={
'id': '42',
}
)
mock_os.path = os.path
organizer = Organizer(mock_settings)
organizer.run()
mock_os.makedirs.assert_has_calls([
call(exist_ok=True, name=mock_settings.docs_dir),
call(exist_ok=True, name=mock_settings.scripts_dir),
call(exist_ok=True, name=mock_settings.logs_dir),
])
mock_shutil.copy.assert_has_calls([
call(mock_settings.downloaded_workflow_path, mock_settings.workflow_dest_path),
call(mock_settings.job_order_path, mock_settings.job_order_dest_path),
call(mock_settings.bespin_workflow_stdout_path, mock_settings.bespin_workflow_stdout_dest_path),
call(mock_settings.bespin_workflow_stderr_path, mock_settings.bespin_workflow_stderr_dest_path),
call('/tmp/extra/usage-report.txt', '/results/docs/logs/usage-report.txt'),
call('/data/log2.txt', '/results/docs/logs/log2.txt'),
])
project_data = mock_project_data.return_value
mock_write_data_to_file.assert_has_calls([
call(data=project_data.readme_report.render_markdown.return_value,
filepath=mock_settings.readme_md_dest_path),
call(data=project_data.readme_report.render_html.return_value,
filepath=mock_settings.readme_html_dest_path),
call(data=json.dumps({"id": "42"}),
filepath=mock_settings.job_data_dest_path),
])
@patch('lando_util.organize_project.organizer.os')
@patch('lando_util.organize_project.organizer.shutil')
@patch('lando_util.organize_project.organizer.ProjectData')
@patch('lando_util.organize_project.organizer.write_data_to_file')
@patch('lando_util.organize_project.organizer.zipfile')
def test_run_zipped(self, mock_zipfile, mock_write_data_to_file, mock_project_data, mock_shutil, mock_os):
mock_settings = Mock()
mock_settings.workflow_type = 'zipped'
mock_settings.bespin_job_id = '42'
mock_settings.bespin_workflow_started = '2019-02-07T12:30'
mock_settings.bespin_workflow_finished = '2019-02-09T12:45'
mock_settings.bespin_workflow_elapsed_minutes = '120'
mock_settings.downloaded_workflow_path = '/workflow/workflow.zip'
mock_settings.workflow_dest_path = '/workflow/outdir'
mock_settings.logs_dir = '/results/docs/logs/'
mock_settings.additional_log_files = ['/tmp/extra/usage-report.txt', '/data/log2.txt']
mock_settings.job_data = {}
mock_project_data.return_value = Mock(
methods_template='#Markdown',
job_data={
'id': '42',
}
)
mock_os.path = os.path
organizer = Organizer(mock_settings)
organizer.run()
mock_os.makedirs.assert_has_calls([
call(exist_ok=True, name=mock_settings.docs_dir),
call(exist_ok=True, name=mock_settings.scripts_dir),
call(exist_ok=True, name=mock_settings.logs_dir),
])
mock_shutil.copy.assert_has_calls([
call(mock_settings.job_order_path, mock_settings.job_order_dest_path),
call(mock_settings.bespin_workflow_stdout_path, mock_settings.bespin_workflow_stdout_dest_path),
call(mock_settings.bespin_workflow_stderr_path, mock_settings.bespin_workflow_stderr_dest_path),
call('/tmp/extra/usage-report.txt', '/results/docs/logs/usage-report.txt'),
call('/data/log2.txt', '/results/docs/logs/log2.txt'),
])
project_data = mock_project_data.return_value
mock_write_data_to_file.assert_has_calls([
call(data=project_data.readme_report.render_markdown.return_value,
filepath=mock_settings.readme_md_dest_path),
call(data=project_data.readme_report.render_html.return_value,
filepath=mock_settings.readme_html_dest_path),
call(data=json.dumps({"id": "42"}),
filepath=mock_settings.job_data_dest_path),
])
mock_zipfile.ZipFile.assert_called_with('/workflow/workflow.zip')
mock_zipfile.ZipFile.return_value.__enter__.return_value.extractall.assert_called_with('/workflow/outdir')
| from unittest import TestCase
from unittest.mock import patch, Mock, call, mock_open, create_autospec
from lando_util.organize_project.organizer import write_data_to_file, Settings, ProjectData, Organizer
import json
import os
class TestOrganizerFuncs(TestCase):
def test_write_data_to_file(self):
mocked_open = mock_open()
with patch('builtins.open', mocked_open, create=True):
write_data_to_file(data='somedata', filepath='/tmp/somepath.txt')
mocked_open.assert_called_with('/tmp/somepath.txt', 'w')
mocked_open.return_value.write.assert_called_with('somedata')
class TestSettings(TestCase):
def setUp(self):
self.settings_packed_dict = {
"bespin_job_id": "1",
"destination_dir": 'somedir',
"downloaded_workflow_path": '/workflow/sort.cwl',
"workflow_to_read": '/workflow/read/sort.cwl',
"workflow_type": "packed",
"job_order_path": '/output/job_order.json',
"bespin_workflow_stdout_path": '/output/workflow-output.json',
"bespin_workflow_stderr_path": '/output/workflow-output.log',
"bespin_workflow_started": "2019-02-07T12:30",
"bespin_workflow_finished": "2019-02-07T12:45",
"methods_template": '#replace stuff',
"additional_log_files": [
"/bespin/output-data/job-51-bob-resource-usage.json",
]
}
self.settings_zipped_dict = {
"bespin_job_id": "1",
"destination_dir": 'somedir',
"downloaded_workflow_path": '/workflow/sort.cwl',
"workflow_to_read": '/workflow/read/sort.zip',
"workflow_type": "zipped",
"job_order_path": '/output/job_order.json',
"bespin_workflow_stdout_path": '/output/workflow-output.json',
"bespin_workflow_stderr_path": '/output/workflow-output.log',
"bespin_workflow_started": "2019-02-07T12:30",
"bespin_workflow_finished": "2019-02-07T12:45",
"methods_template": '#replace stuff',
"additional_log_files": [
"/bespin/output-data/job-51-bob-resource-usage.json",
]
}
@patch('lando_util.organize_project.organizer.json')
def test_packed_properties(self, mock_json):
mock_json.load.return_value = self.settings_packed_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.docs_dir, 'somedir/docs')
self.assertEqual(settings.readme_md_dest_path, 'somedir/docs/README.md')
self.assertEqual(settings.readme_html_dest_path, 'somedir/docs/README.html')
self.assertEqual(settings.logs_dir, 'somedir/docs/logs')
self.assertEqual(settings.bespin_workflow_stdout_dest_path, 'somedir/docs/logs/bespin-workflow-output.json')
self.assertEqual(settings.bespin_workflow_stderr_dest_path, 'somedir/docs/logs/bespin-workflow-output.log')
self.assertEqual(settings.job_data_dest_path, 'somedir/docs/logs/job-data.json')
self.assertEqual(settings.scripts_dir, 'somedir/docs/scripts')
self.assertEqual(settings.workflow_dest_path, 'somedir/docs/scripts/sort.cwl')
self.assertEqual(settings.job_order_dest_path, 'somedir/docs/scripts/job_order.json')
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 15.0)
self.assertEqual(settings.additional_log_files, ["/bespin/output-data/job-51-bob-resource-usage.json"])
self.assertEqual(settings.downloaded_workflow_path, '/workflow/sort.cwl')
self.assertEqual(settings.workflow_to_read, '/workflow/read/sort.cwl')
self.assertEqual(settings.workflow_type, 'packed')
@patch('lando_util.organize_project.organizer.json')
def test_zipped_properties(self, mock_json):
mock_json.load.return_value = self.settings_zipped_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.docs_dir, 'somedir/docs')
self.assertEqual(settings.readme_md_dest_path, 'somedir/docs/README.md')
self.assertEqual(settings.readme_html_dest_path, 'somedir/docs/README.html')
self.assertEqual(settings.logs_dir, 'somedir/docs/logs')
self.assertEqual(settings.bespin_workflow_stdout_dest_path, 'somedir/docs/logs/bespin-workflow-output.json')
self.assertEqual(settings.bespin_workflow_stderr_dest_path, 'somedir/docs/logs/bespin-workflow-output.log')
self.assertEqual(settings.job_data_dest_path, 'somedir/docs/logs/job-data.json')
self.assertEqual(settings.scripts_dir, 'somedir/docs/scripts')
self.assertEqual(settings.workflow_dest_path, 'somedir/docs/scripts/sort.cwl')
self.assertEqual(settings.job_order_dest_path, 'somedir/docs/scripts/job_order.json')
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 15.0)
self.assertEqual(settings.additional_log_files, ["/bespin/output-data/job-51-bob-resource-usage.json"])
self.assertEqual(settings.downloaded_workflow_path, '/workflow/sort.cwl')
self.assertEqual(settings.workflow_to_read, '/workflow/read/sort.zip')
self.assertEqual(settings.workflow_type, 'zipped')
@patch('lando_util.organize_project.organizer.json')
def test_bespin_workflow_elapsed_minutes(self, mock_json):
self.settings_packed_dict['bespin_workflow_started'] = '2019-02-07T12:30'
self.settings_packed_dict['bespin_workflow_finished'] = '2019-02-09T12:30'
mock_json.load.return_value = self.settings_packed_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 2 * 24 * 60)
@patch('lando_util.organize_project.organizer.json')
def test_bespin_workflow_elapsed_minutes_is_optional(self, mock_json):
del self.settings_packed_dict['bespin_workflow_started']
del self.settings_packed_dict['bespin_workflow_finished']
mock_json.load.return_value = self.settings_packed_dict
mock_cmdfile = Mock()
settings = Settings(mock_cmdfile)
self.assertEqual(settings.bespin_workflow_elapsed_minutes, 0)
class TestProjectData(TestCase):
@patch('lando_util.organize_project.organizer.create_workflow_info')
@patch('lando_util.organize_project.organizer.ReadmeReport')
def test_constructor(self, mock_readme_report, mock_create_workflow_info):
mock_settings = Mock(
bespin_job_id='92',
bespin_workflow_started='2019-02-07T12:30',
bespin_workflow_finished='2019-02-09T12:45',
bespin_workflow_elapsed_minutes='120',
workflow_to_read='/input/read/workflow.cwl',
downloaded_workflow_path='/input/sort.cwl',
job_order_path='/data/job_order.json',
bespin_workflow_stdout_path='/output/workflow_stdout.json',
methods_template='#Markdown'
)
mock_create_workflow_info.return_value.count_output_files.return_value = 13
mock_create_workflow_info.return_value.total_file_size_str.return_value = '20 GiB'
project_data = ProjectData(mock_settings)
mock_create_workflow_info.assert_called_with('/input/read/workflow.cwl')
self.assertEqual(project_data.workflow_info, mock_create_workflow_info.return_value)
mock_workflow_info = mock_create_workflow_info.return_value
mock_workflow_info.update_with_job_order.assert_called_with(job_order_path='/data/job_order.json')
mock_workflow_info.update_with_job_output.assert_called_with(job_output_path='/output/workflow_stdout.json')
self.assertEqual(project_data.readme_report, mock_readme_report.return_value)
expected_job_data = {
'id': '92',
'started': '2019-02-07T12:30',
'finished': '2019-02-09T12:45',
'run_time': '120 minutes',
'num_output_files': 13,
'total_file_size_str': '20 GiB',
}
mock_readme_report.assert_called_with(project_data.workflow_info, expected_job_data)
self.assertEqual(project_data.job_data, expected_job_data)
class TestOrganizer(TestCase):
@patch('lando_util.organize_project.organizer.os')
@patch('lando_util.organize_project.organizer.shutil')
@patch('lando_util.organize_project.organizer.ProjectData')
@patch('lando_util.organize_project.organizer.write_data_to_file')
def test_run_packed(self, mock_write_data_to_file, mock_project_data, mock_shutil, mock_os):
mock_settings = Mock()
mock_settings.workflow_type = 'packed'
mock_settings.bespin_job_id = '42'
mock_settings.bespin_workflow_started = '2019-02-07T12:30'
mock_settings.bespin_workflow_finished = '2019-02-09T12:45'
mock_settings.bespin_workflow_elapsed_minutes = '120'
mock_settings.logs_dir = '/results/docs/logs/'
mock_settings.additional_log_files = ['/tmp/extra/usage-report.txt', '/data/log2.txt']
mock_settings.job_data = {}
mock_project_data.return_value = Mock(
methods_template='#Markdown',
job_data={
'id': '42',
}
)
mock_os.path = os.path
organizer = Organizer(mock_settings)
organizer.run()
mock_os.makedirs.assert_has_calls([
call(exist_ok=True, name=mock_settings.docs_dir),
call(exist_ok=True, name=mock_settings.scripts_dir),
call(exist_ok=True, name=mock_settings.logs_dir),
])
mock_shutil.copy.assert_has_calls([
call(mock_settings.downloaded_workflow_path, mock_settings.workflow_dest_path),
call(mock_settings.job_order_path, mock_settings.job_order_dest_path),
call(mock_settings.bespin_workflow_stdout_path, mock_settings.bespin_workflow_stdout_dest_path),
call(mock_settings.bespin_workflow_stderr_path, mock_settings.bespin_workflow_stderr_dest_path),
call('/tmp/extra/usage-report.txt', '/results/docs/logs/usage-report.txt'),
call('/data/log2.txt', '/results/docs/logs/log2.txt'),
])
project_data = mock_project_data.return_value
mock_write_data_to_file.assert_has_calls([
call(data=project_data.readme_report.render_markdown.return_value,
filepath=mock_settings.readme_md_dest_path),
call(data=project_data.readme_report.render_html.return_value,
filepath=mock_settings.readme_html_dest_path),
call(data=json.dumps({"id": "42"}),
filepath=mock_settings.job_data_dest_path),
])
@patch('lando_util.organize_project.organizer.os')
@patch('lando_util.organize_project.organizer.shutil')
@patch('lando_util.organize_project.organizer.ProjectData')
@patch('lando_util.organize_project.organizer.write_data_to_file')
@patch('lando_util.organize_project.organizer.zipfile')
def test_run_zipped(self, mock_zipfile, mock_write_data_to_file, mock_project_data, mock_shutil, mock_os):
mock_settings = Mock()
mock_settings.workflow_type = 'zipped'
mock_settings.bespin_job_id = '42'
mock_settings.bespin_workflow_started = '2019-02-07T12:30'
mock_settings.bespin_workflow_finished = '2019-02-09T12:45'
mock_settings.bespin_workflow_elapsed_minutes = '120'
mock_settings.downloaded_workflow_path = '/workflow/workflow.zip'
mock_settings.workflow_dest_path = '/workflow/outdir'
mock_settings.logs_dir = '/results/docs/logs/'
mock_settings.additional_log_files = ['/tmp/extra/usage-report.txt', '/data/log2.txt']
mock_settings.job_data = {}
mock_project_data.return_value = Mock(
methods_template='#Markdown',
job_data={
'id': '42',
}
)
mock_os.path = os.path
organizer = Organizer(mock_settings)
organizer.run()
mock_os.makedirs.assert_has_calls([
call(exist_ok=True, name=mock_settings.docs_dir),
call(exist_ok=True, name=mock_settings.scripts_dir),
call(exist_ok=True, name=mock_settings.logs_dir),
])
mock_shutil.copy.assert_has_calls([
call(mock_settings.job_order_path, mock_settings.job_order_dest_path),
call(mock_settings.bespin_workflow_stdout_path, mock_settings.bespin_workflow_stdout_dest_path),
call(mock_settings.bespin_workflow_stderr_path, mock_settings.bespin_workflow_stderr_dest_path),
call('/tmp/extra/usage-report.txt', '/results/docs/logs/usage-report.txt'),
call('/data/log2.txt', '/results/docs/logs/log2.txt'),
])
project_data = mock_project_data.return_value
mock_write_data_to_file.assert_has_calls([
call(data=project_data.readme_report.render_markdown.return_value,
filepath=mock_settings.readme_md_dest_path),
call(data=project_data.readme_report.render_html.return_value,
filepath=mock_settings.readme_html_dest_path),
call(data=json.dumps({"id": "42"}),
filepath=mock_settings.job_data_dest_path),
])
mock_zipfile.ZipFile.assert_called_with('/workflow/workflow.zip')
mock_zipfile.ZipFile.return_value.__enter__.return_value.extractall.assert_called_with('/workflow/outdir')
| none | 1 | 2.589783 | 3 | |
drl/agents/heads/action_value_heads.py | lucaslingle/pytorch_drl | 0 | 6625238 | <filename>drl/agents/heads/action_value_heads.py<gh_stars>0
"""
Action-value prediction heads.
"""
from typing import Mapping, Any, Type, Callable, Optional
import abc
import torch as tc
from drl.agents.heads.abstract import Head
from drl.agents.architectures.stateless.abstract import HeadEligibleArchitecture
class ActionValueHead(Head, metaclass=abc.ABCMeta):
"""
Abstract class for action-value prediction heads.
"""
class SimpleActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for simple action-value prediction heads
(as opposed to distributional).
"""
class DistributionalActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for distributional action-value prediction heads.
Reference:
<NAME> et al., 2017 -
'A Distributional Perspective on Reinforcement Learning'.
"""
def __init__(self, vmin: float, vmax: float, num_bins: int):
"""
Args:
vmin (float): Minimum return value.
vmax (float): Maximum return value.
num_bins (int): Number of bins for distributional value learning.
"""
ActionValueHead.__init__(self)
self._vmin = vmin
self._vmax = vmax
self._num_bins = num_bins
def returns_to_bin_ids(self, returns):
returns = tc.clip(returns, self._vmin, self._vmax)
bin_width = (self._vmax - self._vmin) / self._num_bins
bin_edges = self._vmin + bin_width * tc.arange(self._num_bins +
1).float()
indices = tc.bucketize(returns, bin_edges)
return indices
class DiscreteActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for discrete-action action-value prediction heads.
"""
def __init__(self, num_features: int, num_actions: int):
"""
Args:
num_features (int): Number of input features.
num_actions (int): Number of actions.
"""
ActionValueHead.__init__(self)
self._num_features = num_features
self._num_actions = num_actions
@property
def num_features(self):
return self._num_features
@property
def num_actions(self):
return self._num_actions
class ContinuousActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for continuous-action action-value prediction heads.
"""
class SimpleDiscreteActionValueHead(SimpleActionValueHead,
DiscreteActionValueHead):
"""
Simple discrete-action action-value prediction head.
References:
<NAME> et al., 2015 -
'Human Level Control through Deep Reinforcement Learning'
<NAME> et al., 2016 -
'Dueling Network Architectures for Deep Reinforcement Learning'
"""
def __init__(
self,
num_features: int,
num_actions: int,
head_architecture_cls: Type[HeadEligibleArchitecture],
head_architecture_cls_args: Mapping[str, Any],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
**kwargs: Mapping[str, Any]):
"""
Args:
num_features (int): Number of input features.
num_actions (int): Number of actions.
head_architecture_cls (Type[HeadEligibleArchitecture]): Class object
for policy head architecture. Must be a derived class of
HeadEligibleArchitecture.
head_architecture_cls_args (Mapping[str, Any]): Keyword arguments
for head architecture.
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
SimpleActionValueHead.__init__(self)
DiscreteActionValueHead.__init__(self, num_features, num_actions)
self._action_value_head = head_architecture_cls(
input_dim=num_features,
output_dim=num_actions,
w_init=w_init,
b_init=b_init,
**head_architecture_cls_args)
def forward(
self, features: tc.Tensor, **kwargs: Mapping[str,
Any]) -> tc.Tensor:
"""
Args:
features (torch.Tensor): Torch tensor with shape [batch_size, num_features].
**kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
torch.Tensor: Torch tensor of shape [batch_size, num_actions],
containing the estimated state-action-conditional values.
"""
qpreds = self._action_value_head(features)
return qpreds
class SimpleContinuousActionValueHead(SimpleActionValueHead,
ContinuousActionValueHead):
"""
Simple continuous-action action-value prediction head.
Reference:
<NAME> et al., 2015 -
'Continuous Control with Deep Reinforcement Learning'.
"""
def __init__(
self,
num_features: int,
head_architecture_cls: Type[HeadEligibleArchitecture],
head_architecture_cls_args: Mapping[str, Any],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
**kwargs: Mapping[str, Any]):
"""
Args:
num_features (int): Number of input features.
head_architecture_cls (Type[HeadEligibleArchitecture]): Class object
for policy head architecture. Must be a derived class of
HeadEligibleArchitecture.
head_architecture_cls_args (Mapping[str, Any]): Keyword arguments
for head architecture.
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
SimpleActionValueHead.__init__(self)
ContinuousActionValueHead.__init__(self)
self._action_value_head = head_architecture_cls(
input_dim=num_features,
output_dim=1,
w_init=w_init,
b_init=b_init,
**head_architecture_cls_args)
def forward(
self, features: tc.Tensor, **kwargs: Mapping[str,
Any]) -> tc.Tensor:
"""
Args:
features (torch.Tensor): Torch tensor with shape [batch_size, num_features].
**kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
torch.Tensor: Torch tensor of shape [batch_size], containing the
estimated state-action-conditional values.
"""
qpred = self._action_value_head(features).squeeze(-1)
return qpred
class DistributionalDiscreteActionValueHead(DistributionalActionValueHead,
DiscreteActionValueHead):
"""
Distributional discrete-action action-value prediction head.
Reference:
<NAME> et al., 2017 -
'A Distributional Perspective on Reinforcement Learning'.
"""
def __init__(
self,
num_features: int,
num_actions: int,
head_architecture_cls: Type[HeadEligibleArchitecture],
head_architecture_cls_args: Mapping[str, Any],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
vmin: float,
vmax: float,
num_bins: int,
**kwargs: Mapping[str, Any]):
"""
Args:
num_features (int): Number of input features.
num_actions (int): Number of actions.
head_architecture_cls (Type[HeadEligibleArchitecture]): Class object
for policy head architecture. Must be a derived class of
HeadEligibleArchitecture.
head_architecture_cls_args (Mapping[str, Any]): Keyword arguments
for head architecture.
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
vmin (float): Minimum return value.
vmax (float): Maximum return value.
num_bins (int): Number of bins for distributional value learning.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
DistributionalActionValueHead.__init__(self, vmin, vmax, num_bins)
DiscreteActionValueHead.__init__(self, num_features, num_actions)
self._action_value_head = head_architecture_cls(
input_dim=num_features,
output_dim=num_actions * num_bins,
w_init=w_init,
b_init=b_init,
**head_architecture_cls_args)
def logits_to_mean(self, q_value_logits: tc.Tensor) -> tc.Tensor:
"""
Args:
q_value_logits (torch.Tensor): Torch tensor of shape
[batch_size, num_actions, num_bins], containing action-value logits.
Returns:
torch.Tensor: Torch tensor of shape [batch_size, num_actions]
containing the mean q-value predicted for each action.
"""
bin_width = (self._vmax - self._vmin) / self._num_bins
bin_midpoints = self._vmin + 0.5 * bin_width + \
bin_width * tc.arange(self._num_bins).float()
bin_midpoints = bin_midpoints.view(1, 1, self._num_bins)
value_dists = tc.nn.functional.softmax(input=q_value_logits, dim=-1)
q_value_means = (value_dists * bin_midpoints).sum(dim=-1)
return q_value_means
def forward(
self, features: tc.Tensor, **kwargs: Mapping[str,
Any]) -> tc.Tensor:
"""
Args:
features (torch.Tensor): Torch tensor with shape [batch_size, num_features].
**kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
torch.Tensor: Torch tensor with shape [batch_size, num_actions, num_bins],
containing the logits of the estimated state-action-conditional
value distribution.
"""
q_value_logits_flat = self._action_value_head(features)
q_value_logits = q_value_logits_flat.reshape(
-1, self._num_actions, self._num_bins)
return q_value_logits
| <filename>drl/agents/heads/action_value_heads.py<gh_stars>0
"""
Action-value prediction heads.
"""
from typing import Mapping, Any, Type, Callable, Optional
import abc
import torch as tc
from drl.agents.heads.abstract import Head
from drl.agents.architectures.stateless.abstract import HeadEligibleArchitecture
class ActionValueHead(Head, metaclass=abc.ABCMeta):
"""
Abstract class for action-value prediction heads.
"""
class SimpleActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for simple action-value prediction heads
(as opposed to distributional).
"""
class DistributionalActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for distributional action-value prediction heads.
Reference:
<NAME> et al., 2017 -
'A Distributional Perspective on Reinforcement Learning'.
"""
def __init__(self, vmin: float, vmax: float, num_bins: int):
"""
Args:
vmin (float): Minimum return value.
vmax (float): Maximum return value.
num_bins (int): Number of bins for distributional value learning.
"""
ActionValueHead.__init__(self)
self._vmin = vmin
self._vmax = vmax
self._num_bins = num_bins
def returns_to_bin_ids(self, returns):
returns = tc.clip(returns, self._vmin, self._vmax)
bin_width = (self._vmax - self._vmin) / self._num_bins
bin_edges = self._vmin + bin_width * tc.arange(self._num_bins +
1).float()
indices = tc.bucketize(returns, bin_edges)
return indices
class DiscreteActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for discrete-action action-value prediction heads.
"""
def __init__(self, num_features: int, num_actions: int):
"""
Args:
num_features (int): Number of input features.
num_actions (int): Number of actions.
"""
ActionValueHead.__init__(self)
self._num_features = num_features
self._num_actions = num_actions
@property
def num_features(self):
return self._num_features
@property
def num_actions(self):
return self._num_actions
class ContinuousActionValueHead(ActionValueHead, metaclass=abc.ABCMeta):
"""
Abstract class for continuous-action action-value prediction heads.
"""
class SimpleDiscreteActionValueHead(SimpleActionValueHead,
DiscreteActionValueHead):
"""
Simple discrete-action action-value prediction head.
References:
<NAME> et al., 2015 -
'Human Level Control through Deep Reinforcement Learning'
<NAME> et al., 2016 -
'Dueling Network Architectures for Deep Reinforcement Learning'
"""
def __init__(
self,
num_features: int,
num_actions: int,
head_architecture_cls: Type[HeadEligibleArchitecture],
head_architecture_cls_args: Mapping[str, Any],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
**kwargs: Mapping[str, Any]):
"""
Args:
num_features (int): Number of input features.
num_actions (int): Number of actions.
head_architecture_cls (Type[HeadEligibleArchitecture]): Class object
for policy head architecture. Must be a derived class of
HeadEligibleArchitecture.
head_architecture_cls_args (Mapping[str, Any]): Keyword arguments
for head architecture.
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
SimpleActionValueHead.__init__(self)
DiscreteActionValueHead.__init__(self, num_features, num_actions)
self._action_value_head = head_architecture_cls(
input_dim=num_features,
output_dim=num_actions,
w_init=w_init,
b_init=b_init,
**head_architecture_cls_args)
def forward(
self, features: tc.Tensor, **kwargs: Mapping[str,
Any]) -> tc.Tensor:
"""
Args:
features (torch.Tensor): Torch tensor with shape [batch_size, num_features].
**kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
torch.Tensor: Torch tensor of shape [batch_size, num_actions],
containing the estimated state-action-conditional values.
"""
qpreds = self._action_value_head(features)
return qpreds
class SimpleContinuousActionValueHead(SimpleActionValueHead,
ContinuousActionValueHead):
"""
Simple continuous-action action-value prediction head.
Reference:
<NAME> et al., 2015 -
'Continuous Control with Deep Reinforcement Learning'.
"""
def __init__(
self,
num_features: int,
head_architecture_cls: Type[HeadEligibleArchitecture],
head_architecture_cls_args: Mapping[str, Any],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
**kwargs: Mapping[str, Any]):
"""
Args:
num_features (int): Number of input features.
head_architecture_cls (Type[HeadEligibleArchitecture]): Class object
for policy head architecture. Must be a derived class of
HeadEligibleArchitecture.
head_architecture_cls_args (Mapping[str, Any]): Keyword arguments
for head architecture.
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
SimpleActionValueHead.__init__(self)
ContinuousActionValueHead.__init__(self)
self._action_value_head = head_architecture_cls(
input_dim=num_features,
output_dim=1,
w_init=w_init,
b_init=b_init,
**head_architecture_cls_args)
def forward(
self, features: tc.Tensor, **kwargs: Mapping[str,
Any]) -> tc.Tensor:
"""
Args:
features (torch.Tensor): Torch tensor with shape [batch_size, num_features].
**kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
torch.Tensor: Torch tensor of shape [batch_size], containing the
estimated state-action-conditional values.
"""
qpred = self._action_value_head(features).squeeze(-1)
return qpred
class DistributionalDiscreteActionValueHead(DistributionalActionValueHead,
DiscreteActionValueHead):
"""
Distributional discrete-action action-value prediction head.
Reference:
<NAME> et al., 2017 -
'A Distributional Perspective on Reinforcement Learning'.
"""
def __init__(
self,
num_features: int,
num_actions: int,
head_architecture_cls: Type[HeadEligibleArchitecture],
head_architecture_cls_args: Mapping[str, Any],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
vmin: float,
vmax: float,
num_bins: int,
**kwargs: Mapping[str, Any]):
"""
Args:
num_features (int): Number of input features.
num_actions (int): Number of actions.
head_architecture_cls (Type[HeadEligibleArchitecture]): Class object
for policy head architecture. Must be a derived class of
HeadEligibleArchitecture.
head_architecture_cls_args (Mapping[str, Any]): Keyword arguments
for head architecture.
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
vmin (float): Minimum return value.
vmax (float): Maximum return value.
num_bins (int): Number of bins for distributional value learning.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
DistributionalActionValueHead.__init__(self, vmin, vmax, num_bins)
DiscreteActionValueHead.__init__(self, num_features, num_actions)
self._action_value_head = head_architecture_cls(
input_dim=num_features,
output_dim=num_actions * num_bins,
w_init=w_init,
b_init=b_init,
**head_architecture_cls_args)
def logits_to_mean(self, q_value_logits: tc.Tensor) -> tc.Tensor:
"""
Args:
q_value_logits (torch.Tensor): Torch tensor of shape
[batch_size, num_actions, num_bins], containing action-value logits.
Returns:
torch.Tensor: Torch tensor of shape [batch_size, num_actions]
containing the mean q-value predicted for each action.
"""
bin_width = (self._vmax - self._vmin) / self._num_bins
bin_midpoints = self._vmin + 0.5 * bin_width + \
bin_width * tc.arange(self._num_bins).float()
bin_midpoints = bin_midpoints.view(1, 1, self._num_bins)
value_dists = tc.nn.functional.softmax(input=q_value_logits, dim=-1)
q_value_means = (value_dists * bin_midpoints).sum(dim=-1)
return q_value_means
def forward(
self, features: tc.Tensor, **kwargs: Mapping[str,
Any]) -> tc.Tensor:
"""
Args:
features (torch.Tensor): Torch tensor with shape [batch_size, num_features].
**kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
torch.Tensor: Torch tensor with shape [batch_size, num_actions, num_bins],
containing the logits of the estimated state-action-conditional
value distribution.
"""
q_value_logits_flat = self._action_value_head(features)
q_value_logits = q_value_logits_flat.reshape(
-1, self._num_actions, self._num_bins)
return q_value_logits
| en | 0.66272 | Action-value prediction heads. Abstract class for action-value prediction heads. Abstract class for simple action-value prediction heads (as opposed to distributional). Abstract class for distributional action-value prediction heads. Reference: <NAME> et al., 2017 - 'A Distributional Perspective on Reinforcement Learning'. Args: vmin (float): Minimum return value. vmax (float): Maximum return value. num_bins (int): Number of bins for distributional value learning. Abstract class for discrete-action action-value prediction heads. Args: num_features (int): Number of input features. num_actions (int): Number of actions. Abstract class for continuous-action action-value prediction heads. Simple discrete-action action-value prediction head. References: <NAME> et al., 2015 - 'Human Level Control through Deep Reinforcement Learning' <NAME> et al., 2016 - 'Dueling Network Architectures for Deep Reinforcement Learning' Args: num_features (int): Number of input features. num_actions (int): Number of actions. head_architecture_cls (Type[HeadEligibleArchitecture]): Class object for policy head architecture. Must be a derived class of HeadEligibleArchitecture. head_architecture_cls_args (Mapping[str, Any]): Keyword arguments for head architecture. w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer. b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer. **kwargs (Mapping[str, Any]): Keyword arguments. Args: features (torch.Tensor): Torch tensor with shape [batch_size, num_features]. **kwargs (Mapping[str, Any]): Keyword arguments. Returns: torch.Tensor: Torch tensor of shape [batch_size, num_actions], containing the estimated state-action-conditional values. Simple continuous-action action-value prediction head. Reference: <NAME> et al., 2015 - 'Continuous Control with Deep Reinforcement Learning'. Args: num_features (int): Number of input features. head_architecture_cls (Type[HeadEligibleArchitecture]): Class object for policy head architecture. Must be a derived class of HeadEligibleArchitecture. head_architecture_cls_args (Mapping[str, Any]): Keyword arguments for head architecture. w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer. b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer. **kwargs (Mapping[str, Any]): Keyword arguments. Args: features (torch.Tensor): Torch tensor with shape [batch_size, num_features]. **kwargs (Mapping[str, Any]): Keyword arguments. Returns: torch.Tensor: Torch tensor of shape [batch_size], containing the estimated state-action-conditional values. Distributional discrete-action action-value prediction head. Reference: <NAME> et al., 2017 - 'A Distributional Perspective on Reinforcement Learning'. Args: num_features (int): Number of input features. num_actions (int): Number of actions. head_architecture_cls (Type[HeadEligibleArchitecture]): Class object for policy head architecture. Must be a derived class of HeadEligibleArchitecture. head_architecture_cls_args (Mapping[str, Any]): Keyword arguments for head architecture. w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer. b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer. vmin (float): Minimum return value. vmax (float): Maximum return value. num_bins (int): Number of bins for distributional value learning. **kwargs (Mapping[str, Any]): Keyword arguments. Args: q_value_logits (torch.Tensor): Torch tensor of shape [batch_size, num_actions, num_bins], containing action-value logits. Returns: torch.Tensor: Torch tensor of shape [batch_size, num_actions] containing the mean q-value predicted for each action. Args: features (torch.Tensor): Torch tensor with shape [batch_size, num_features]. **kwargs (Mapping[str, Any]): Keyword arguments. Returns: torch.Tensor: Torch tensor with shape [batch_size, num_actions, num_bins], containing the logits of the estimated state-action-conditional value distribution. | 2.411965 | 2 |
appCore/apps/replica/contrib/micro/serializers.py | jadedgamer/alifewellplayed.com | 4 | 6625239 | from rest_framework import serializers
from .models import Timeline, Note
class TimelineSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
api_url = serializers.HyperlinkedIdentityField(view_name='rest_replica:micro-timeline-note-list', lookup_field='slug')
class Meta:
model = Timeline
fields = ('id', 'user', 'date_created', 'date_updated', 'name', 'slug', 'rev_order', 'is_public', 'id', 'api_url')
class NoteSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
#timeline = TimelineSerializer(many=False, required=False)
api_url = serializers.HyperlinkedIdentityField( view_name='rest_replica:micro-note-detail', lookup_field='id')
class Meta:
model = Note
fields = ('id', 'user', 'date_created', 'date_updated', 'is_private', 'body_html', 'api_url')
class NoteCreateSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
timeline = TimelineSerializer(many=False, required=False)
api_url = serializers.HyperlinkedIdentityField( view_name='rest_replica:micro-note-detail', lookup_field='id')
class Meta:
model = Note
fields = ('id', 'user', 'date_created', 'date_updated', 'timeline', 'is_private', 'body', 'api_url')
| from rest_framework import serializers
from .models import Timeline, Note
class TimelineSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
api_url = serializers.HyperlinkedIdentityField(view_name='rest_replica:micro-timeline-note-list', lookup_field='slug')
class Meta:
model = Timeline
fields = ('id', 'user', 'date_created', 'date_updated', 'name', 'slug', 'rev_order', 'is_public', 'id', 'api_url')
class NoteSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
#timeline = TimelineSerializer(many=False, required=False)
api_url = serializers.HyperlinkedIdentityField( view_name='rest_replica:micro-note-detail', lookup_field='id')
class Meta:
model = Note
fields = ('id', 'user', 'date_created', 'date_updated', 'is_private', 'body_html', 'api_url')
class NoteCreateSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
timeline = TimelineSerializer(many=False, required=False)
api_url = serializers.HyperlinkedIdentityField( view_name='rest_replica:micro-note-detail', lookup_field='id')
class Meta:
model = Note
fields = ('id', 'user', 'date_created', 'date_updated', 'timeline', 'is_private', 'body', 'api_url')
| en | 0.360055 | #timeline = TimelineSerializer(many=False, required=False) | 2.048605 | 2 |
cm/chef_api.py | tombh/deis | 1 | 6625240 | <reponame>tombh/deis
"""
Classes and functions for interacting with OpsCode Chef.
This file derives from pyChef: https://github.com/coderanger/pychef
"""
import base64
import datetime
import hashlib
import httplib
import json
import re
import time
import urlparse
from chef_rsa import Key
def ruby_b64encode(value):
"""The Ruby function Base64.encode64 automatically breaks things up
into 60-character chunks.
"""
b64 = base64.b64encode(value)
for i in xrange(0, len(b64), 60):
yield b64[i:i + 60]
class UTC(datetime.tzinfo):
"""UTC timezone stub."""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return self.ZERO
utc = UTC()
def canonical_time(timestamp):
if timestamp.tzinfo is not None:
timestamp = timestamp.astimezone(utc).replace(tzinfo=None)
return timestamp.replace(microsecond=0).isoformat() + 'Z'
canonical_path_regex = re.compile(r'/+')
def canonical_path(path):
path = canonical_path_regex.sub('/', path)
if len(path) > 1:
path = path.rstrip('/')
return path
def canonical_request(http_method, path, hashed_body, timestamp, user_id):
# Canonicalize request parameters
http_method = http_method.upper()
path = canonical_path(path)
if isinstance(timestamp, datetime.datetime):
timestamp = canonical_time(timestamp)
hashed_path = sha1_base64(path)
return """\
Method:{}
Hashed Path:{}
X-Ops-Content-Hash:{}
X-Ops-Timestamp:{}
X-Ops-UserId:{}""".format(http_method, hashed_path, hashed_body, timestamp,
user_id)
def sha1_base64(value):
return '\n'.join(ruby_b64encode(hashlib.sha1(value).digest()))
def create_authorization(blank_headers, verb, url, priv_key, user, body=''):
headers = blank_headers.copy()
rsa_key = Key(fp=priv_key)
timestamp = canonical_time(datetime.datetime.utcnow())
hashed_body = sha1_base64(body)
canon = canonical_request(verb, url, hashed_body, timestamp, user)
b64_priv = ruby_b64encode(rsa_key.private_encrypt(canon))
for i, line in enumerate(b64_priv):
headers['X-Ops-Authorization-' + str(i + 1)] = line
headers['X-Ops-Timestamp'] = timestamp
headers['X-Ops-Content-Hash'] = hashed_body
headers['X-Ops-UserId'] = user
return headers
class ChefAPI(object):
"""The ChefAPI object is a wrapper for a single Chef server.
.. admonition:: The API stack
PyChef maintains a stack of :class:`ChefAPI` objects to be use with
other methods if an API object isn't given explicitly. The first
ChefAPI created will become the default, though you can set a specific
default using :meth:`ChefAPI.set_default`. You can also use a ChefAPI
as a context manager to create a scoped default::
with ChefAPI('http://localhost:4000', 'client.pem', 'admin'):
n = Node('web1')
"""
headers = {
'Accept': 'application/json',
'X-Chef-Version': '11.0.4.x',
'X-Ops-Sign': 'version=1.0',
'Content-Type': 'application/json'
}
def __init__(self, server_url, client_name, client_key):
self.server_url = server_url
self.client_name = client_name
self.client_key = client_key
self.hostname = urlparse.urlsplit(self.server_url).netloc
self.path = urlparse.urlsplit(self.server_url).path
self.headers.update({'Host': self.hostname})
self.conn = httplib.HTTPSConnection(self.hostname)
self.conn.connect()
def request(self, verb, path, body='', attempts=5, interval=5):
url = self.path + path
headers = create_authorization(
self.headers, verb, url, self.client_key, self.client_name, body)
# retry all chef api requests
for _ in range(attempts):
self.conn.request(verb, url, body=body, headers=headers)
resp = self.conn.getresponse()
if resp.status != 500:
break
time.sleep(interval)
else:
errmsg = 'Chef API requests failed: {}'.format(path)
raise RuntimeError(errmsg)
return resp.read(), resp.status
def create_databag(self, name):
body = json.dumps({'name': name, 'id': name})
resp = self.request('POST', '/data', body)
return resp
def create_databag_item(self, name, item_name, item_value):
item_dict = {'id': item_name}
item_dict.update(item_value)
body = json.dumps(item_dict)
resp = self.request('POST', '/data/%s' % name, body)
return resp
def get_databag(self, bag_name):
return self.request('GET', '/data/%s' % bag_name)
def delete_databag(self, bag_name):
return self.request('DELETE', '/data/%s' % bag_name)
def delete_databag_item(self, bag_name, item_name):
return self.request('DELETE', '/data/%s/%s' % (bag_name, item_name))
def update_databag_item(self, bag_name, item_name, item_value):
body = json.dumps(item_value)
return self.request('PUT', '/data/%s/%s' % (bag_name, item_name), body)
def get_all_databag_items(self, bag_name):
return self.request('GET', '/data/%s' % bag_name)
def get_databag_item(self, bag_name, item_name):
return self.request('GET', '/data/%s/%s' % (bag_name, item_name))
def get_all_cookbooks(self):
return self.request('GET', '/cookbooks')
def get_node(self, node_id):
return self.request('GET', '/nodes/%s' % node_id)
def delete_node(self, node_id):
return self.request('DELETE', '/nodes/%s' % node_id)
def delete_client(self, client_id):
return self.request('DELETE', '/clients/%s' % client_id)
| """
Classes and functions for interacting with OpsCode Chef.
This file derives from pyChef: https://github.com/coderanger/pychef
"""
import base64
import datetime
import hashlib
import httplib
import json
import re
import time
import urlparse
from chef_rsa import Key
def ruby_b64encode(value):
"""The Ruby function Base64.encode64 automatically breaks things up
into 60-character chunks.
"""
b64 = base64.b64encode(value)
for i in xrange(0, len(b64), 60):
yield b64[i:i + 60]
class UTC(datetime.tzinfo):
"""UTC timezone stub."""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return self.ZERO
utc = UTC()
def canonical_time(timestamp):
if timestamp.tzinfo is not None:
timestamp = timestamp.astimezone(utc).replace(tzinfo=None)
return timestamp.replace(microsecond=0).isoformat() + 'Z'
canonical_path_regex = re.compile(r'/+')
def canonical_path(path):
path = canonical_path_regex.sub('/', path)
if len(path) > 1:
path = path.rstrip('/')
return path
def canonical_request(http_method, path, hashed_body, timestamp, user_id):
# Canonicalize request parameters
http_method = http_method.upper()
path = canonical_path(path)
if isinstance(timestamp, datetime.datetime):
timestamp = canonical_time(timestamp)
hashed_path = sha1_base64(path)
return """\
Method:{}
Hashed Path:{}
X-Ops-Content-Hash:{}
X-Ops-Timestamp:{}
X-Ops-UserId:{}""".format(http_method, hashed_path, hashed_body, timestamp,
user_id)
def sha1_base64(value):
return '\n'.join(ruby_b64encode(hashlib.sha1(value).digest()))
def create_authorization(blank_headers, verb, url, priv_key, user, body=''):
headers = blank_headers.copy()
rsa_key = Key(fp=priv_key)
timestamp = canonical_time(datetime.datetime.utcnow())
hashed_body = sha1_base64(body)
canon = canonical_request(verb, url, hashed_body, timestamp, user)
b64_priv = ruby_b64encode(rsa_key.private_encrypt(canon))
for i, line in enumerate(b64_priv):
headers['X-Ops-Authorization-' + str(i + 1)] = line
headers['X-Ops-Timestamp'] = timestamp
headers['X-Ops-Content-Hash'] = hashed_body
headers['X-Ops-UserId'] = user
return headers
class ChefAPI(object):
"""The ChefAPI object is a wrapper for a single Chef server.
.. admonition:: The API stack
PyChef maintains a stack of :class:`ChefAPI` objects to be use with
other methods if an API object isn't given explicitly. The first
ChefAPI created will become the default, though you can set a specific
default using :meth:`ChefAPI.set_default`. You can also use a ChefAPI
as a context manager to create a scoped default::
with ChefAPI('http://localhost:4000', 'client.pem', 'admin'):
n = Node('web1')
"""
headers = {
'Accept': 'application/json',
'X-Chef-Version': '11.0.4.x',
'X-Ops-Sign': 'version=1.0',
'Content-Type': 'application/json'
}
def __init__(self, server_url, client_name, client_key):
self.server_url = server_url
self.client_name = client_name
self.client_key = client_key
self.hostname = urlparse.urlsplit(self.server_url).netloc
self.path = urlparse.urlsplit(self.server_url).path
self.headers.update({'Host': self.hostname})
self.conn = httplib.HTTPSConnection(self.hostname)
self.conn.connect()
def request(self, verb, path, body='', attempts=5, interval=5):
url = self.path + path
headers = create_authorization(
self.headers, verb, url, self.client_key, self.client_name, body)
# retry all chef api requests
for _ in range(attempts):
self.conn.request(verb, url, body=body, headers=headers)
resp = self.conn.getresponse()
if resp.status != 500:
break
time.sleep(interval)
else:
errmsg = 'Chef API requests failed: {}'.format(path)
raise RuntimeError(errmsg)
return resp.read(), resp.status
def create_databag(self, name):
body = json.dumps({'name': name, 'id': name})
resp = self.request('POST', '/data', body)
return resp
def create_databag_item(self, name, item_name, item_value):
item_dict = {'id': item_name}
item_dict.update(item_value)
body = json.dumps(item_dict)
resp = self.request('POST', '/data/%s' % name, body)
return resp
def get_databag(self, bag_name):
return self.request('GET', '/data/%s' % bag_name)
def delete_databag(self, bag_name):
return self.request('DELETE', '/data/%s' % bag_name)
def delete_databag_item(self, bag_name, item_name):
return self.request('DELETE', '/data/%s/%s' % (bag_name, item_name))
def update_databag_item(self, bag_name, item_name, item_value):
body = json.dumps(item_value)
return self.request('PUT', '/data/%s/%s' % (bag_name, item_name), body)
def get_all_databag_items(self, bag_name):
return self.request('GET', '/data/%s' % bag_name)
def get_databag_item(self, bag_name, item_name):
return self.request('GET', '/data/%s/%s' % (bag_name, item_name))
def get_all_cookbooks(self):
return self.request('GET', '/cookbooks')
def get_node(self, node_id):
return self.request('GET', '/nodes/%s' % node_id)
def delete_node(self, node_id):
return self.request('DELETE', '/nodes/%s' % node_id)
def delete_client(self, client_id):
return self.request('DELETE', '/clients/%s' % client_id) | en | 0.585471 | Classes and functions for interacting with OpsCode Chef. This file derives from pyChef: https://github.com/coderanger/pychef The Ruby function Base64.encode64 automatically breaks things up into 60-character chunks. UTC timezone stub. # Canonicalize request parameters \ Method:{} Hashed Path:{} X-Ops-Content-Hash:{} X-Ops-Timestamp:{} X-Ops-UserId:{} The ChefAPI object is a wrapper for a single Chef server. .. admonition:: The API stack PyChef maintains a stack of :class:`ChefAPI` objects to be use with other methods if an API object isn't given explicitly. The first ChefAPI created will become the default, though you can set a specific default using :meth:`ChefAPI.set_default`. You can also use a ChefAPI as a context manager to create a scoped default:: with ChefAPI('http://localhost:4000', 'client.pem', 'admin'): n = Node('web1') # retry all chef api requests | 2.512526 | 3 |
packages/python/plotly/plotly/validators/layout/slider/transition/_easing.py | mastermind88/plotly.py | 0 | 6625241 | <filename>packages/python/plotly/plotly/validators/layout/slider/transition/_easing.py
import _plotly_utils.basevalidators
class EasingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="easing", parent_name="layout.slider.transition", **kwargs
):
super(EasingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
values=kwargs.pop(
"values",
[
"linear",
"quad",
"cubic",
"sin",
"exp",
"circle",
"elastic",
"back",
"bounce",
"linear-in",
"quad-in",
"cubic-in",
"sin-in",
"exp-in",
"circle-in",
"elastic-in",
"back-in",
"bounce-in",
"linear-out",
"quad-out",
"cubic-out",
"sin-out",
"exp-out",
"circle-out",
"elastic-out",
"back-out",
"bounce-out",
"linear-in-out",
"quad-in-out",
"cubic-in-out",
"sin-in-out",
"exp-in-out",
"circle-in-out",
"elastic-in-out",
"back-in-out",
"bounce-in-out",
],
),
**kwargs,
)
| <filename>packages/python/plotly/plotly/validators/layout/slider/transition/_easing.py
import _plotly_utils.basevalidators
class EasingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="easing", parent_name="layout.slider.transition", **kwargs
):
super(EasingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
values=kwargs.pop(
"values",
[
"linear",
"quad",
"cubic",
"sin",
"exp",
"circle",
"elastic",
"back",
"bounce",
"linear-in",
"quad-in",
"cubic-in",
"sin-in",
"exp-in",
"circle-in",
"elastic-in",
"back-in",
"bounce-in",
"linear-out",
"quad-out",
"cubic-out",
"sin-out",
"exp-out",
"circle-out",
"elastic-out",
"back-out",
"bounce-out",
"linear-in-out",
"quad-in-out",
"cubic-in-out",
"sin-in-out",
"exp-in-out",
"circle-in-out",
"elastic-in-out",
"back-in-out",
"bounce-in-out",
],
),
**kwargs,
)
| none | 1 | 2.390076 | 2 | |
tests/test_app.py | krish-adi/streamlit-barfi | 6 | 6625242 | import sys
sys.path.append('../')
from matplotlib import pyplot as plt
from barfi import st_barfi, barfi_schemas
import streamlit as st
from test_blocks import base_blocks
barfi_schema_name = st.selectbox(
'Select a saved schema to load:', barfi_schemas())
compute_engine = st.checkbox('Activate barfi compute engine', value=False)
barfi_result = st_barfi(base_blocks=base_blocks, compute_engine=compute_engine,
load_schema=barfi_schema_name)
if barfi_result:
st.write(barfi_result)
| import sys
sys.path.append('../')
from matplotlib import pyplot as plt
from barfi import st_barfi, barfi_schemas
import streamlit as st
from test_blocks import base_blocks
barfi_schema_name = st.selectbox(
'Select a saved schema to load:', barfi_schemas())
compute_engine = st.checkbox('Activate barfi compute engine', value=False)
barfi_result = st_barfi(base_blocks=base_blocks, compute_engine=compute_engine,
load_schema=barfi_schema_name)
if barfi_result:
st.write(barfi_result)
| none | 1 | 2.262938 | 2 | |
examples/snippets/data_io/df_connect/export_complex.py | nguyentr17/tamr-toolbox | 6 | 6625243 | """
An example script to demonstrate how to export datasets from Tamr using df_connect
sending multiple datasets to multiple different databases with multiple different
parameters/behaviors
"""
import tamr_toolbox as tbox
# load example multi config
my_config = tbox.utils.config.from_yaml("examples/resources/conf/connect_multi_export.yaml")
# stream dataset A to Oracle with default export values from config file
my_connect_oracle = tbox.data_io.df_connect.client.from_config(my_config, jdbc_key="oracle")
tbox.data_io.df_connect.client.export_dataset(
my_connect_oracle, dataset_name="dataset_A", target_table_name="target_A", jdbc_key="oracle"
)
# stream dataset A to Oracle target table B, while truncating before loading and only 1k records
tbox.data_io.df_connect.client.export_dataset(
my_connect_oracle,
dataset_name="dataset_A",
target_table_name="target_B",
truncate_before_load=True,
limit_records=1000,
)
# stream dataset A to Postgres, keeping all Tamr-generated columns
my_connect_postgres = tbox.data_io.df_connect.client.from_config(my_config, jdbc_key="postgres")
tbox.data_io.df_connect.client.export_dataset(
my_connect_postgres,
dataset_name="dataset_A",
target_table_name="target_postgres_A",
columns_exclude_regex="",
)
# stream dataset A to Postgres, flattening arrays into single string with comma separation
tbox.data_io.df_connect.client.export_dataset(
my_connect_postgres,
dataset_name="dataset_A",
target_table_name="target_postgres_B",
multi_value_delimiter=",",
)
| """
An example script to demonstrate how to export datasets from Tamr using df_connect
sending multiple datasets to multiple different databases with multiple different
parameters/behaviors
"""
import tamr_toolbox as tbox
# load example multi config
my_config = tbox.utils.config.from_yaml("examples/resources/conf/connect_multi_export.yaml")
# stream dataset A to Oracle with default export values from config file
my_connect_oracle = tbox.data_io.df_connect.client.from_config(my_config, jdbc_key="oracle")
tbox.data_io.df_connect.client.export_dataset(
my_connect_oracle, dataset_name="dataset_A", target_table_name="target_A", jdbc_key="oracle"
)
# stream dataset A to Oracle target table B, while truncating before loading and only 1k records
tbox.data_io.df_connect.client.export_dataset(
my_connect_oracle,
dataset_name="dataset_A",
target_table_name="target_B",
truncate_before_load=True,
limit_records=1000,
)
# stream dataset A to Postgres, keeping all Tamr-generated columns
my_connect_postgres = tbox.data_io.df_connect.client.from_config(my_config, jdbc_key="postgres")
tbox.data_io.df_connect.client.export_dataset(
my_connect_postgres,
dataset_name="dataset_A",
target_table_name="target_postgres_A",
columns_exclude_regex="",
)
# stream dataset A to Postgres, flattening arrays into single string with comma separation
tbox.data_io.df_connect.client.export_dataset(
my_connect_postgres,
dataset_name="dataset_A",
target_table_name="target_postgres_B",
multi_value_delimiter=",",
)
| en | 0.656404 | An example script to demonstrate how to export datasets from Tamr using df_connect sending multiple datasets to multiple different databases with multiple different parameters/behaviors # load example multi config # stream dataset A to Oracle with default export values from config file # stream dataset A to Oracle target table B, while truncating before loading and only 1k records # stream dataset A to Postgres, keeping all Tamr-generated columns # stream dataset A to Postgres, flattening arrays into single string with comma separation | 2.609313 | 3 |
rubikscube_solver/normalizer.py | sonalimahajan12/Automation-scripts | 496 | 6625244 | """Normalizer module."""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from sys import exit as Die
try:
import json
except ImportError as err:
Die(err)
class Normalizer:
"""Normalizer class."""
def algorithm(self, alg, language):
"""Normalize an algorithm from the json-written manual.
:param alg: The algorithm itself
:returns: list
"""
with open('solve-manual.json') as f:
manual = json.load(f)
solution = []
for notation in alg.split(' '):
solution.append(manual[language][notation])
return solution
normalize = Normalizer()
| """Normalizer module."""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from sys import exit as Die
try:
import json
except ImportError as err:
Die(err)
class Normalizer:
"""Normalizer class."""
def algorithm(self, alg, language):
"""Normalize an algorithm from the json-written manual.
:param alg: The algorithm itself
:returns: list
"""
with open('solve-manual.json') as f:
manual = json.load(f)
solution = []
for notation in alg.split(' '):
solution.append(manual[language][notation])
return solution
normalize = Normalizer()
| en | 0.573652 | Normalizer module. # !/usr/bin/env python3 # -*- coding: utf-8 -*- Normalizer class. Normalize an algorithm from the json-written manual. :param alg: The algorithm itself :returns: list | 3.202833 | 3 |
nn_dataflow/tools/nn_dataflow_search.py | Jrebort/nn_dataflow | 0 | 6625245 | <gh_stars>0
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import argparse
import json
import multiprocessing
import sys
import time
from collections import OrderedDict
from nn_dataflow.core import NNDataflow
from nn_dataflow.core import Cost
from nn_dataflow.core import DataCategoryEnum as de
from nn_dataflow.core import MapStrategyEyeriss
from nn_dataflow.core import MemHierEnum as me
from nn_dataflow.core import NodeRegion
from nn_dataflow.core import Option
from nn_dataflow.core import PhyDim2
from nn_dataflow.core import Resource
from nn_dataflow.nns import all_networks
from nn_dataflow.nns import import_network
from nn_dataflow.version import get_version
def stats_dict(dfsch, cost):
'''
Get the stats as an OrderedDict from the NNDataflowScheme.
'''
stats = OrderedDict()
## Basic stats.
stats['total_cost'] = dfsch.total_cost
stats['total_time'] = dfsch.total_time
stats['total_ops'] = dfsch.total_ops
stats['total_accesses'] = dfsch.total_accesses
stats['total_noc_hops'] = dfsch.total_noc_hops
## Cost breakdown.
total_op_cost = dfsch.total_ops * cost.mac_op
total_access_cost = sum(a * c for a, c
in zip(dfsch.total_accesses, cost.mem_hier))
total_noc_cost = dfsch.total_noc_hops * cost.noc_hop
total_static_cost = dfsch.total_time * cost.idl_unit
sum_cost = total_op_cost + total_access_cost + total_noc_cost \
+ total_static_cost
assert abs(sum_cost / dfsch.total_cost - 1) < 0.001
stats['total_op_cost'] = total_op_cost
stats['total_access_cost'] = total_access_cost
stats['total_noc_cost'] = total_noc_cost
stats['total_static_cost'] = total_static_cost
## Other stats.
stats['active_node_pes'] = dfsch.perlayer_stats('active_node_pes')
stats['dram_bandwidth'] = dfsch.perlayer_stats('dram_bandwidth')
stats['segment_time'] = dfsch.segment_time_list()
stats['segment_dram_time'] = dfsch.segment_dram_time_list()
stats['input_layout'] = dfsch.input_layout
stats['ext_layout_dict'] = dfsch.ext_layout_dict
stats['schedules'] = dfsch.res_dict
return stats
def do_scheduling(args):
'''
Get optimal scheduling for given problem. Return a result schedule.
'''
## Network.
network = import_network(args.net)
batch_size = args.batch
## Resource.
dim_nodes = PhyDim2(*args.nodes)
dim_array = PhyDim2(*args.array)
# Sizes of gbuf and regf are in words.
word = (args.word + 7) // 8
size_gbuf = args.gbuf // word
size_regf = args.regf // word
array_bus_width = args.bus_width // args.word
if not array_bus_width:
array_bus_width = float('inf')
dram_bandwidth = args.dram_bw / word
proc_region = NodeRegion(dim=dim_nodes,
origin=PhyDim2(0, 0),
type=NodeRegion.PROC)
if args.mem_type == '2D':
# Memory nodes are on two sides.
data_region = NodeRegion(dim=PhyDim2(2, 2),
origin=PhyDim2(0, 0),
dist=dim_nodes - PhyDim2(1, 1),
type=NodeRegion.DRAM)
assert data_region.rel2abs(PhyDim2(1, 1)) + PhyDim2(1, 1) \
== proc_region.dim
elif args.mem_type == '3D':
# Memory nodes are on the top.
data_region = NodeRegion(dim=dim_nodes,
origin=PhyDim2(0, 0),
type=NodeRegion.DRAM)
resource = Resource(proc_region=proc_region,
dram_region=data_region,
src_data_region=data_region,
dst_data_region=data_region,
dim_array=dim_array,
size_gbuf=size_gbuf,
size_regf=size_regf,
array_bus_width=array_bus_width,
dram_bandwidth=dram_bandwidth,
no_time_mux=False)
## Cost.
hier_cost = [0] * me.NUM
hier_cost[me.DRAM] = args.hier_cost[0]
hier_cost[me.GBUF] = args.hier_cost[1]
hier_cost[me.ITCN] = args.hier_cost[2]
hier_cost[me.REGF] = args.hier_cost[3]
cost = Cost(mac_op=args.op_cost,
mem_hier=tuple(hier_cost),
noc_hop=args.hop_cost,
idl_unit=args.unit_idle_cost)
## Options.
bypass = [True] * de.NUM
bypass[de.IFM] = 'i' not in args.disable_bypass
bypass[de.OFM] = 'o' not in args.disable_bypass
bypass[de.FIL] = 'f' not in args.disable_bypass
options = Option(sw_gbuf_bypass=tuple(bypass),
sw_solve_loopblocking=args.solve_loopblocking,
hw_access_forwarding=args.enable_access_forwarding,
hw_gbuf_sharing=args.enable_gbuf_sharing,
hw_gbuf_save_writeback=args.enable_save_writeback,
partition_hybrid=args.hybrid_partition,
partition_batch=args.batch_partition,
partition_ifmaps=args.ifmaps_partition,
partition_interlayer=args.interlayer_partition,
layer_pipeline_time_ovhd=args.layer_pipeline_time_overhead,
layer_pipeline_max_degree=args.layer_pipeline_max_degree,
layer_pipeline_opt=not args.disable_interlayer_opt,
opt_goal=args.goal.lower(),
ntops=args.top,
nprocesses=args.processes,
verbose=args.verbose)
## Search schedules.
nnd = NNDataflow(network, batch_size, resource, cost, MapStrategyEyeriss)
tbeg = time.time()
tops, cache_stats = nnd.schedule_search(options)
tend = time.time()
telapsed = tend - tbeg
if not tops:
sys.stderr.write('No valid dataflow found.\n')
return None
top = tops[0]
## Write results.
res_map = OrderedDict()
res_map['version'] = get_version(with_local=True)
res_map['net'] = args.net
res_map['batch'] = args.batch
res_map['resource'] = resource._asdict()
res_map['cost'] = cost._asdict()
res_map['options'] = options._asdict()
res_map['cache_stats'] = cache_stats
res_map['elapsed'] = telapsed
stats = stats_dict(top, cost)
# for key, val in stats.items():
# res_map[key] = val
with open(args.net+'.csv','a+') as file:
file.write(str(stats['total_time'])+','+str(stats['total_cost'])+',')
#print("total_cost:{}".format(stats['total_cost']))
#print("total_time:{}".format(stats['total_time']))
return res_map
def argparser():
''' Argument parser. '''
ap = argparse.ArgumentParser()
ap.add_argument('net',
help='network name, should be a .py file under "nns". '
'Choices: {}.'.format(', '.join(all_networks())))
ap.add_argument('--batch', type=int, required=True,
help='batch size')
ap.add_argument('--word', type=int, default=16,
help='word size in bits')
ap.add_argument('--nodes', type=int, nargs=2, required=True,
metavar=('H', 'W'),
help='Parallel node partitioning dimensions')
ap.add_argument('--array', type=int, nargs=2, required=True,
metavar=('H', 'W'),
help='PE array dimensions')
ap.add_argument('--regf', type=int, required=True,
help='register file size in bytes per PE')
ap.add_argument('--gbuf', type=int, required=True,
help='global buffer size in bytes')
ap.add_argument('--bus-width', type=int, default=0,
help='array bus width in bits. set 0 to ignore')
ap.add_argument('--dram-bw', type=float, default='inf',
help='total DRAM bandwidth in bytes per cycle.')
ap.add_argument('--op-cost', type=float, default=1,
help='cost of arithmetic operation')
ap.add_argument('--hier-cost', type=float, nargs=4, default=[200, 6, 2, 1],
metavar=('DRAM_COST', 'GBUF_COST', 'ITCN_COST',
'REGF_COST'),
help='cost of access to memory hierarchy')
ap.add_argument('--hop-cost', type=float, default=10,
help='cost of access through one NoC hop')
ap.add_argument('--unit-idle-cost', type=float, default=0,
help='static cost over all nodes for unit execution time')
ap.add_argument('--mem-type', default='2D', choices=['2D', '3D'],
help='memory type. "2D" has memory only on edge nodes; '
'"3D" has memory vertially on top of all nodes.')
ap.add_argument('--disable-bypass', nargs='*', default=[],
choices=['i', 'o', 'f'],
help='whether disallowing gbuf bypass for i (input), o '
'(output), or f (filter)')
ap.add_argument('--solve-loopblocking', action='store_true',
help='Use analytical solver to choose loop blocking. '
'Otherwise use exhaustive search.')
ap.add_argument('--enable-access-forwarding', action='store_true',
help='Each node fetches a subset of data and forwards to '
'other nodes.')
ap.add_argument('--enable-gbuf-sharing', action='store_true',
help='Share gbuf capacity across nodes through NoC.')
ap.add_argument('--enable-save-writeback', action='store_true',
help='Allow to save the writeback to memory for the '
'intermediate data between layers if able to '
'store the entire data set in on-chip buffers.')
ap.add_argument('--disable-interlayer-opt',
'--basic-interlayer-partition',
action='store_true',
help='Disable optimizations and only allow basic '
'inter-layer pipeline.')
ap.add_argument('--hybrid-partition',
'--hybrid-partition2d', # deprecated old name
action='store_true',
help='Use hybrid partition for layer for node mapping. '
'Otherwise use naive method based on layer type.')
ap.add_argument('--batch-partition', action='store_true',
help='Allow partitioning batch, i.e., consider data '
'parallelism.')
ap.add_argument('--ifmaps-partition', '--ifmap-partition',
action='store_true',
help='Allow partitioning ifmap channel dimension, which '
'requires extra data synchronization.')
ap.add_argument('--interlayer-partition', '--inter-layer-partition',
action='store_true',
help='Allow partitioning resources across multiple layers '
'and process them simultaneously as an inter-layer '
'pipeline.')
ap.add_argument('--layer-pipeline-time-overhead',
type=float, default=float('inf'),
help='maximum allowed execution time overhead due to '
'layer pipelining.')
ap.add_argument('--layer-pipeline-max-degree',
type=float, default=float('inf'),
help='maximum allowed layer pipelining degree, i.e., '
'number of vertices in a pipeline segment.')
ap.add_argument('-g', '--goal', default='e',
choices=['e', 'd', 'ed', 'E', 'D', 'ED'],
help='Goal of optimization: E(nergy), D(elay), or ED.')
ap.add_argument('-t', '--top', type=int, default=1,
help='Number of top schedules to keep during search.')
ap.add_argument('-p', '--processes', type=int,
default=multiprocessing.cpu_count()//2,
help='Number of parallel processes to use for search.')
ap.add_argument('-v', '--verbose', action='store_true',
help='Show progress and details.')
return ap
def main():
''' Main function. '''
args = argparser().parse_args()
for i in range(1,6,1):
#if not i == 1:
# i = i-1
args.batch = i
# print("batch size:{}".format(args.batch))
res = do_scheduling(args)
# res = do_scheduling(args)
#json.dump(res, sys.stdout, indent=2, default=lambda _: None)
#sys.stdout.write('\n')
return 0 if res else 2
if __name__ == '__main__':
sys.exit(main())
| """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import argparse
import json
import multiprocessing
import sys
import time
from collections import OrderedDict
from nn_dataflow.core import NNDataflow
from nn_dataflow.core import Cost
from nn_dataflow.core import DataCategoryEnum as de
from nn_dataflow.core import MapStrategyEyeriss
from nn_dataflow.core import MemHierEnum as me
from nn_dataflow.core import NodeRegion
from nn_dataflow.core import Option
from nn_dataflow.core import PhyDim2
from nn_dataflow.core import Resource
from nn_dataflow.nns import all_networks
from nn_dataflow.nns import import_network
from nn_dataflow.version import get_version
def stats_dict(dfsch, cost):
'''
Get the stats as an OrderedDict from the NNDataflowScheme.
'''
stats = OrderedDict()
## Basic stats.
stats['total_cost'] = dfsch.total_cost
stats['total_time'] = dfsch.total_time
stats['total_ops'] = dfsch.total_ops
stats['total_accesses'] = dfsch.total_accesses
stats['total_noc_hops'] = dfsch.total_noc_hops
## Cost breakdown.
total_op_cost = dfsch.total_ops * cost.mac_op
total_access_cost = sum(a * c for a, c
in zip(dfsch.total_accesses, cost.mem_hier))
total_noc_cost = dfsch.total_noc_hops * cost.noc_hop
total_static_cost = dfsch.total_time * cost.idl_unit
sum_cost = total_op_cost + total_access_cost + total_noc_cost \
+ total_static_cost
assert abs(sum_cost / dfsch.total_cost - 1) < 0.001
stats['total_op_cost'] = total_op_cost
stats['total_access_cost'] = total_access_cost
stats['total_noc_cost'] = total_noc_cost
stats['total_static_cost'] = total_static_cost
## Other stats.
stats['active_node_pes'] = dfsch.perlayer_stats('active_node_pes')
stats['dram_bandwidth'] = dfsch.perlayer_stats('dram_bandwidth')
stats['segment_time'] = dfsch.segment_time_list()
stats['segment_dram_time'] = dfsch.segment_dram_time_list()
stats['input_layout'] = dfsch.input_layout
stats['ext_layout_dict'] = dfsch.ext_layout_dict
stats['schedules'] = dfsch.res_dict
return stats
def do_scheduling(args):
'''
Get optimal scheduling for given problem. Return a result schedule.
'''
## Network.
network = import_network(args.net)
batch_size = args.batch
## Resource.
dim_nodes = PhyDim2(*args.nodes)
dim_array = PhyDim2(*args.array)
# Sizes of gbuf and regf are in words.
word = (args.word + 7) // 8
size_gbuf = args.gbuf // word
size_regf = args.regf // word
array_bus_width = args.bus_width // args.word
if not array_bus_width:
array_bus_width = float('inf')
dram_bandwidth = args.dram_bw / word
proc_region = NodeRegion(dim=dim_nodes,
origin=PhyDim2(0, 0),
type=NodeRegion.PROC)
if args.mem_type == '2D':
# Memory nodes are on two sides.
data_region = NodeRegion(dim=PhyDim2(2, 2),
origin=PhyDim2(0, 0),
dist=dim_nodes - PhyDim2(1, 1),
type=NodeRegion.DRAM)
assert data_region.rel2abs(PhyDim2(1, 1)) + PhyDim2(1, 1) \
== proc_region.dim
elif args.mem_type == '3D':
# Memory nodes are on the top.
data_region = NodeRegion(dim=dim_nodes,
origin=PhyDim2(0, 0),
type=NodeRegion.DRAM)
resource = Resource(proc_region=proc_region,
dram_region=data_region,
src_data_region=data_region,
dst_data_region=data_region,
dim_array=dim_array,
size_gbuf=size_gbuf,
size_regf=size_regf,
array_bus_width=array_bus_width,
dram_bandwidth=dram_bandwidth,
no_time_mux=False)
## Cost.
hier_cost = [0] * me.NUM
hier_cost[me.DRAM] = args.hier_cost[0]
hier_cost[me.GBUF] = args.hier_cost[1]
hier_cost[me.ITCN] = args.hier_cost[2]
hier_cost[me.REGF] = args.hier_cost[3]
cost = Cost(mac_op=args.op_cost,
mem_hier=tuple(hier_cost),
noc_hop=args.hop_cost,
idl_unit=args.unit_idle_cost)
## Options.
bypass = [True] * de.NUM
bypass[de.IFM] = 'i' not in args.disable_bypass
bypass[de.OFM] = 'o' not in args.disable_bypass
bypass[de.FIL] = 'f' not in args.disable_bypass
options = Option(sw_gbuf_bypass=tuple(bypass),
sw_solve_loopblocking=args.solve_loopblocking,
hw_access_forwarding=args.enable_access_forwarding,
hw_gbuf_sharing=args.enable_gbuf_sharing,
hw_gbuf_save_writeback=args.enable_save_writeback,
partition_hybrid=args.hybrid_partition,
partition_batch=args.batch_partition,
partition_ifmaps=args.ifmaps_partition,
partition_interlayer=args.interlayer_partition,
layer_pipeline_time_ovhd=args.layer_pipeline_time_overhead,
layer_pipeline_max_degree=args.layer_pipeline_max_degree,
layer_pipeline_opt=not args.disable_interlayer_opt,
opt_goal=args.goal.lower(),
ntops=args.top,
nprocesses=args.processes,
verbose=args.verbose)
## Search schedules.
nnd = NNDataflow(network, batch_size, resource, cost, MapStrategyEyeriss)
tbeg = time.time()
tops, cache_stats = nnd.schedule_search(options)
tend = time.time()
telapsed = tend - tbeg
if not tops:
sys.stderr.write('No valid dataflow found.\n')
return None
top = tops[0]
## Write results.
res_map = OrderedDict()
res_map['version'] = get_version(with_local=True)
res_map['net'] = args.net
res_map['batch'] = args.batch
res_map['resource'] = resource._asdict()
res_map['cost'] = cost._asdict()
res_map['options'] = options._asdict()
res_map['cache_stats'] = cache_stats
res_map['elapsed'] = telapsed
stats = stats_dict(top, cost)
# for key, val in stats.items():
# res_map[key] = val
with open(args.net+'.csv','a+') as file:
file.write(str(stats['total_time'])+','+str(stats['total_cost'])+',')
#print("total_cost:{}".format(stats['total_cost']))
#print("total_time:{}".format(stats['total_time']))
return res_map
def argparser():
''' Argument parser. '''
ap = argparse.ArgumentParser()
ap.add_argument('net',
help='network name, should be a .py file under "nns". '
'Choices: {}.'.format(', '.join(all_networks())))
ap.add_argument('--batch', type=int, required=True,
help='batch size')
ap.add_argument('--word', type=int, default=16,
help='word size in bits')
ap.add_argument('--nodes', type=int, nargs=2, required=True,
metavar=('H', 'W'),
help='Parallel node partitioning dimensions')
ap.add_argument('--array', type=int, nargs=2, required=True,
metavar=('H', 'W'),
help='PE array dimensions')
ap.add_argument('--regf', type=int, required=True,
help='register file size in bytes per PE')
ap.add_argument('--gbuf', type=int, required=True,
help='global buffer size in bytes')
ap.add_argument('--bus-width', type=int, default=0,
help='array bus width in bits. set 0 to ignore')
ap.add_argument('--dram-bw', type=float, default='inf',
help='total DRAM bandwidth in bytes per cycle.')
ap.add_argument('--op-cost', type=float, default=1,
help='cost of arithmetic operation')
ap.add_argument('--hier-cost', type=float, nargs=4, default=[200, 6, 2, 1],
metavar=('DRAM_COST', 'GBUF_COST', 'ITCN_COST',
'REGF_COST'),
help='cost of access to memory hierarchy')
ap.add_argument('--hop-cost', type=float, default=10,
help='cost of access through one NoC hop')
ap.add_argument('--unit-idle-cost', type=float, default=0,
help='static cost over all nodes for unit execution time')
ap.add_argument('--mem-type', default='2D', choices=['2D', '3D'],
help='memory type. "2D" has memory only on edge nodes; '
'"3D" has memory vertially on top of all nodes.')
ap.add_argument('--disable-bypass', nargs='*', default=[],
choices=['i', 'o', 'f'],
help='whether disallowing gbuf bypass for i (input), o '
'(output), or f (filter)')
ap.add_argument('--solve-loopblocking', action='store_true',
help='Use analytical solver to choose loop blocking. '
'Otherwise use exhaustive search.')
ap.add_argument('--enable-access-forwarding', action='store_true',
help='Each node fetches a subset of data and forwards to '
'other nodes.')
ap.add_argument('--enable-gbuf-sharing', action='store_true',
help='Share gbuf capacity across nodes through NoC.')
ap.add_argument('--enable-save-writeback', action='store_true',
help='Allow to save the writeback to memory for the '
'intermediate data between layers if able to '
'store the entire data set in on-chip buffers.')
ap.add_argument('--disable-interlayer-opt',
'--basic-interlayer-partition',
action='store_true',
help='Disable optimizations and only allow basic '
'inter-layer pipeline.')
ap.add_argument('--hybrid-partition',
'--hybrid-partition2d', # deprecated old name
action='store_true',
help='Use hybrid partition for layer for node mapping. '
'Otherwise use naive method based on layer type.')
ap.add_argument('--batch-partition', action='store_true',
help='Allow partitioning batch, i.e., consider data '
'parallelism.')
ap.add_argument('--ifmaps-partition', '--ifmap-partition',
action='store_true',
help='Allow partitioning ifmap channel dimension, which '
'requires extra data synchronization.')
ap.add_argument('--interlayer-partition', '--inter-layer-partition',
action='store_true',
help='Allow partitioning resources across multiple layers '
'and process them simultaneously as an inter-layer '
'pipeline.')
ap.add_argument('--layer-pipeline-time-overhead',
type=float, default=float('inf'),
help='maximum allowed execution time overhead due to '
'layer pipelining.')
ap.add_argument('--layer-pipeline-max-degree',
type=float, default=float('inf'),
help='maximum allowed layer pipelining degree, i.e., '
'number of vertices in a pipeline segment.')
ap.add_argument('-g', '--goal', default='e',
choices=['e', 'd', 'ed', 'E', 'D', 'ED'],
help='Goal of optimization: E(nergy), D(elay), or ED.')
ap.add_argument('-t', '--top', type=int, default=1,
help='Number of top schedules to keep during search.')
ap.add_argument('-p', '--processes', type=int,
default=multiprocessing.cpu_count()//2,
help='Number of parallel processes to use for search.')
ap.add_argument('-v', '--verbose', action='store_true',
help='Show progress and details.')
return ap
def main():
''' Main function. '''
args = argparser().parse_args()
for i in range(1,6,1):
#if not i == 1:
# i = i-1
args.batch = i
# print("batch size:{}".format(args.batch))
res = do_scheduling(args)
# res = do_scheduling(args)
#json.dump(res, sys.stdout, indent=2, default=lambda _: None)
#sys.stdout.write('\n')
return 0 if res else 2
if __name__ == '__main__':
sys.exit(main()) | en | 0.752276 | $lic$ Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of Stanford University This program is free software: you can redistribute it and/or modify it under the terms of the Modified BSD-3 License as published by the Open Source Initiative. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received a copy of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. Get the stats as an OrderedDict from the NNDataflowScheme. ## Basic stats. ## Cost breakdown. ## Other stats. Get optimal scheduling for given problem. Return a result schedule. ## Network. ## Resource. # Sizes of gbuf and regf are in words. # Memory nodes are on two sides. # Memory nodes are on the top. ## Cost. ## Options. ## Search schedules. ## Write results. # for key, val in stats.items(): # res_map[key] = val #print("total_cost:{}".format(stats['total_cost'])) #print("total_time:{}".format(stats['total_time'])) Argument parser. # deprecated old name Main function. #if not i == 1: # i = i-1 # print("batch size:{}".format(args.batch)) # res = do_scheduling(args) #json.dump(res, sys.stdout, indent=2, default=lambda _: None) #sys.stdout.write('\n') | 1.705099 | 2 |
script.py | OsiriX-Foundation/DockerEnvironmentVariable | 0 | 6625246 | import dockerfile
import requests
dockerfile_url = {}
dockerfile_url["KheopsAuthorization"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsAuthorization/dev_env_var/docker/Dockerfile"
dockerfile_url["KheopsNginx"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsNginx/master/Dockerfile"
dockerfile_url["KheopsUI"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsUI/master/Dockerfile"
dockerfile_url["KheopsDICOMwebProxy"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsDICOMwebProxy/master/docker/Dockerfile"
dockerfile_url["KheopsZipper"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsZipper/master/docker/Dockerfile"
dockerfile_url["PACS_PEP"] = "https://raw.githubusercontent.com/OsiriX-Foundation/PACSProxyAuthorization/master/hosts/proxy/Dockerfile"
file='README.md'
with open(file, 'w') as filetowrite:
for repo in dockerfile_url:
response = requests.get(dockerfile_url[repo])
print(repo)
filetowrite.write("## "+repo+"\n\n")
for command in dockerfile.parse_string(response.content.decode("utf-8")):
if command.cmd == 'env':
for i in range(0, len(command.value), 2):
if not str(command.value[i+1]):
print(str(command.value[i]) + " this env var is mandatory")
filetowrite.write("`" + str(command.value[i]) + "` this env var is mandatory"+"<br>\n")
elif str(command.value[i+1]) == "\"\"":
print(str(command.value[i]) + " this env var is optional")
filetowrite.write("`" + str(command.value[i]) + "` this env var is optional"+"<br>\n")
else:
print(str(command.value[i]) + " value : " + str(command.value[i+1]))
filetowrite.write("`" + str(command.value[i]) + "` default value : " + str(command.value[i+1])+"<br>\n")
| import dockerfile
import requests
dockerfile_url = {}
dockerfile_url["KheopsAuthorization"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsAuthorization/dev_env_var/docker/Dockerfile"
dockerfile_url["KheopsNginx"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsNginx/master/Dockerfile"
dockerfile_url["KheopsUI"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsUI/master/Dockerfile"
dockerfile_url["KheopsDICOMwebProxy"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsDICOMwebProxy/master/docker/Dockerfile"
dockerfile_url["KheopsZipper"] = "https://raw.githubusercontent.com/OsiriX-Foundation/KheopsZipper/master/docker/Dockerfile"
dockerfile_url["PACS_PEP"] = "https://raw.githubusercontent.com/OsiriX-Foundation/PACSProxyAuthorization/master/hosts/proxy/Dockerfile"
file='README.md'
with open(file, 'w') as filetowrite:
for repo in dockerfile_url:
response = requests.get(dockerfile_url[repo])
print(repo)
filetowrite.write("## "+repo+"\n\n")
for command in dockerfile.parse_string(response.content.decode("utf-8")):
if command.cmd == 'env':
for i in range(0, len(command.value), 2):
if not str(command.value[i+1]):
print(str(command.value[i]) + " this env var is mandatory")
filetowrite.write("`" + str(command.value[i]) + "` this env var is mandatory"+"<br>\n")
elif str(command.value[i+1]) == "\"\"":
print(str(command.value[i]) + " this env var is optional")
filetowrite.write("`" + str(command.value[i]) + "` this env var is optional"+"<br>\n")
else:
print(str(command.value[i]) + " value : " + str(command.value[i+1]))
filetowrite.write("`" + str(command.value[i]) + "` default value : " + str(command.value[i+1])+"<br>\n")
| it | 0.622076 | # "+repo+"\n\n") | 2.363677 | 2 |
Source/boost_1_33_1/libs/python/pyste/tests/smart_ptrUT.py | spxuw/RFIM | 0 | 6625247 | # Copyright <NAME> 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http:#www.boost.org/LICENSE_1_0.txt)
import unittest
from _smart_ptr import *
class BasicExampleTest(unittest.TestCase):
def testIt(self):
c = NewC()
d = NewD()
c.value = 3
d.Set(c)
c1 = d.Get()
c1.value = 6
self.assertEqual(c.value, 6)
a = NewA()
self.assertEqual(GetA(a), 1)
if __name__ == '__main__':
unittest.main()
| # Copyright <NAME> 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http:#www.boost.org/LICENSE_1_0.txt)
import unittest
from _smart_ptr import *
class BasicExampleTest(unittest.TestCase):
def testIt(self):
c = NewC()
d = NewD()
c.value = 3
d.Set(c)
c1 = d.Get()
c1.value = 6
self.assertEqual(c.value, 6)
a = NewA()
self.assertEqual(GetA(a), 1)
if __name__ == '__main__':
unittest.main()
| en | 0.750315 | # Copyright <NAME> 2003. Use, modification and # distribution is subject to the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http:#www.boost.org/LICENSE_1_0.txt) | 2.4069 | 2 |
azext_iot/sdk/digitaltwins/controlplane/models/group_id_information_properties_py3.py | v-andreaco/azure-iot-cli-extension | 0 | 6625248 | <reponame>v-andreaco/azure-iot-cli-extension
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GroupIdInformationProperties(Model):
"""The properties for a group information object.
:param group_id: The group id.
:type group_id: str
:param required_members: The required members for a specific group id.
:type required_members: list[str]
:param required_zone_names: The required DNS zones for a specific group
id.
:type required_zone_names: list[str]
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(self, *, group_id: str=None, required_members=None, required_zone_names=None, **kwargs) -> None:
super(GroupIdInformationProperties, self).__init__(**kwargs)
self.group_id = group_id
self.required_members = required_members
self.required_zone_names = required_zone_names
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GroupIdInformationProperties(Model):
"""The properties for a group information object.
:param group_id: The group id.
:type group_id: str
:param required_members: The required members for a specific group id.
:type required_members: list[str]
:param required_zone_names: The required DNS zones for a specific group
id.
:type required_zone_names: list[str]
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(self, *, group_id: str=None, required_members=None, required_zone_names=None, **kwargs) -> None:
super(GroupIdInformationProperties, self).__init__(**kwargs)
self.group_id = group_id
self.required_members = required_members
self.required_zone_names = required_zone_names | en | 0.589551 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- The properties for a group information object. :param group_id: The group id. :type group_id: str :param required_members: The required members for a specific group id. :type required_members: list[str] :param required_zone_names: The required DNS zones for a specific group id. :type required_zone_names: list[str] | 2.113503 | 2 |
tests/location_factory.py | eddieantonio/ad-hoc-miner | 17 | 6625249 | from sensibility.lexical_analysis import Position, Location
class LocationFactory:
"""
Creates locations, incrementally.
"""
def __init__(self, start: Position) -> None:
self.current = start
def across(self, width: int) -> Location:
start = self.current
self.current = Position(line=start.line, column=start.column + width)
return Location(start=start, end=self.current)
def until(self, end: Position) -> Location:
start = self.current
self.current = end
return Location(start=start, end=end)
def single(self) -> Location:
return self.across(1)
def newline(self) -> Location:
result = self.single()
self.next_line()
return result
def next_line(self, n: int=1) -> 'LocationFactory':
self.current = Position(line=self.current.line + n, column=0)
return self
def space(self, n: int=1) -> 'LocationFactory':
self.current = Position(line=self.current.line,
column=self.current.column + n)
return self
| from sensibility.lexical_analysis import Position, Location
class LocationFactory:
"""
Creates locations, incrementally.
"""
def __init__(self, start: Position) -> None:
self.current = start
def across(self, width: int) -> Location:
start = self.current
self.current = Position(line=start.line, column=start.column + width)
return Location(start=start, end=self.current)
def until(self, end: Position) -> Location:
start = self.current
self.current = end
return Location(start=start, end=end)
def single(self) -> Location:
return self.across(1)
def newline(self) -> Location:
result = self.single()
self.next_line()
return result
def next_line(self, n: int=1) -> 'LocationFactory':
self.current = Position(line=self.current.line + n, column=0)
return self
def space(self, n: int=1) -> 'LocationFactory':
self.current = Position(line=self.current.line,
column=self.current.column + n)
return self
| en | 0.871115 | Creates locations, incrementally. | 3.548568 | 4 |
mergify_engine/tests/functional/test_attributes.py | Divine-D/mergify-engine | 1 | 6625250 | <gh_stars>1-10
# -*- encoding: utf-8 -*-
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pytest
import yaml
from mergify_engine import context
from mergify_engine.tests.functional import base
LOG = logging.getLogger(__name__)
class TestAttributes(base.FunctionalTestBase):
def test_draft(self):
rules = {
"pull_request_rules": [
{
"name": "no-draft",
"conditions": ["draft"],
"actions": {"comment": {"message": "draft pr"}},
}
]
}
self.setup_repo(yaml.dump(rules))
pr, _ = self.create_pr()
ctxt = context.Context(self.cli_integration, pr.raw_data, {})
assert not ctxt.pull_request.draft
pr, _ = self.create_pr(draft=True)
self.run_engine()
self.wait_for("issue_comment", {"action": "created"})
ctxt = context.Context(
self.cli_integration,
{
"number": pr.number,
"base": {
"user": {"login": pr.base.user.login},
"repo": {
"name": pr.base.repo.name,
},
},
},
{},
)
assert ctxt.pull_request.draft
pr.update()
comments = list(pr.get_issue_comments())
self.assertEqual("draft pr", comments[-1].body)
# Test underscore/dash attributes
assert ctxt.pull_request.review_requested == []
with pytest.raises(AttributeError):
assert ctxt.pull_request.foobar
# Test items
assert list(ctxt.pull_request) == list(
context.PullRequest.ATTRIBUTES | context.PullRequest.LIST_ATTRIBUTES
)
assert dict(ctxt.pull_request.items()) == {
"number": pr.number,
"closed": False,
"locked": False,
"assignee": [],
"approved-reviews-by": [],
"files": ["test2"],
"check-neutral": [],
"status-neutral": [],
"commented-reviews-by": [],
"milestone": "",
"label": [],
"body": "Pull request n2 from fork",
"base": self.master_branch_name,
"review-requested": [],
"check-success": ["Summary"],
"status-success": ["Summary"],
"changes-requested-reviews-by": [],
"merged": False,
"head": self.get_full_branch_name("fork/pr2"),
"author": "mergify-test2",
"dismissed-reviews-by": [],
"merged-by": "",
"check-failure": [],
"status-failure": [],
"title": "Pull request n2 from fork",
"conflict": False,
}
| # -*- encoding: utf-8 -*-
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pytest
import yaml
from mergify_engine import context
from mergify_engine.tests.functional import base
LOG = logging.getLogger(__name__)
class TestAttributes(base.FunctionalTestBase):
def test_draft(self):
rules = {
"pull_request_rules": [
{
"name": "no-draft",
"conditions": ["draft"],
"actions": {"comment": {"message": "draft pr"}},
}
]
}
self.setup_repo(yaml.dump(rules))
pr, _ = self.create_pr()
ctxt = context.Context(self.cli_integration, pr.raw_data, {})
assert not ctxt.pull_request.draft
pr, _ = self.create_pr(draft=True)
self.run_engine()
self.wait_for("issue_comment", {"action": "created"})
ctxt = context.Context(
self.cli_integration,
{
"number": pr.number,
"base": {
"user": {"login": pr.base.user.login},
"repo": {
"name": pr.base.repo.name,
},
},
},
{},
)
assert ctxt.pull_request.draft
pr.update()
comments = list(pr.get_issue_comments())
self.assertEqual("draft pr", comments[-1].body)
# Test underscore/dash attributes
assert ctxt.pull_request.review_requested == []
with pytest.raises(AttributeError):
assert ctxt.pull_request.foobar
# Test items
assert list(ctxt.pull_request) == list(
context.PullRequest.ATTRIBUTES | context.PullRequest.LIST_ATTRIBUTES
)
assert dict(ctxt.pull_request.items()) == {
"number": pr.number,
"closed": False,
"locked": False,
"assignee": [],
"approved-reviews-by": [],
"files": ["test2"],
"check-neutral": [],
"status-neutral": [],
"commented-reviews-by": [],
"milestone": "",
"label": [],
"body": "Pull request n2 from fork",
"base": self.master_branch_name,
"review-requested": [],
"check-success": ["Summary"],
"status-success": ["Summary"],
"changes-requested-reviews-by": [],
"merged": False,
"head": self.get_full_branch_name("fork/pr2"),
"author": "mergify-test2",
"dismissed-reviews-by": [],
"merged-by": "",
"check-failure": [],
"status-failure": [],
"title": "Pull request n2 from fork",
"conflict": False,
} | en | 0.826129 | # -*- encoding: utf-8 -*- # # Copyright © 2020 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Test underscore/dash attributes # Test items | 2.002074 | 2 |