id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11452935
|
from c2nl.inputters.vocabulary import EOS_WORD, BOS_WORD
class Summary(object):
"""
Summary containing annotated text, original text, a list of
candidate documents, answers and well formed answers.
"""
def __init__(self, _id=None):
self._id = _id
self._text = None
self._tokens = []
self._type = None # summary, comment etc
@property
def id(self) -> str:
return self._id
@property
def text(self) -> str:
return self._text
@text.setter
def text(self, param: str) -> None:
self._text = param
@property
def tokens(self) -> list:
return self._tokens
@tokens.setter
def tokens(self, param: list) -> None:
assert isinstance(param, list)
self._tokens = param
def append_token(self, tok=EOS_WORD):
assert isinstance(tok, str)
self._tokens.append(tok)
def prepend_token(self, tok=BOS_WORD):
assert isinstance(tok, str)
self._tokens.insert(0, tok)
@property
def type(self) -> str:
return self._type
@type.setter
def type(self, param: str) -> None:
assert isinstance(param, str)
self._type = param
def vectorize(self, word_dict, _type='word') -> list:
if _type == 'word':
return [word_dict[w] for w in self.tokens]
elif _type == 'char':
return [word_dict.word_to_char_ids(w).tolist() for w in self.tokens]
else:
assert False
|
11452967
|
import typing
from urllib.parse import urlencode
class VkException(Exception):
pass
class VkAuthError(VkException):
def __init__(
self, error, description, url: str = "", params: typing.Any = ""
):
self.error = error
self.description = description
self.url = "{}?{}".format(url, urlencode(params))
def __str__(self):
return self.description
class VkCaptchaNeeded(VkException):
def __init__(self, url, sid):
self.url = url
self.sid = sid
def __str__(self):
return "You must enter the captcha"
class VkTwoFactorCodeNeeded(VkException):
def __str__(self):
return (
"In order to confirm that you are the owner of this page "
"please enter the code provided by the code generating app."
)
|
11453051
|
from __future__ import absolute_import, division, print_function
from joblib import delayed, Parallel
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import multiprocessing
import threading
import warnings
warnings.simplefilter('ignore')
# Package imports
from externals.six.moves import range
from feature_selectors import (permutation_test_mc, permutation_test_mi,
permutation_test_dcor, permutation_test_pcor,
permutation_test_rdc)
from feature_selectors import mc_fast, mi, pcor, py_dcor
from scorers import gini_index, mse
from utils import bayes_boot_probs, logger
###################
"""SINGLE MODELS"""
###################
class Node(object):
"""Decision node in tree
Parameters
----------
col : int
Integer indexing the location of feature or column
col_pval : float
Probability value from permutation test for feature selection
threshold : float
Best split found in feature
impurity : float
Impurity measuring quality of split
value : 1d array-like or float
For classification trees, estimate of each class probability
For regression trees, central tendency estimate
left_child : tuple
For left child node, two element tuple with first element a 2d array of
features and second element a 1d array of labels
right_child : tuple
For right child node, two element tuple with first element a 2d array of
features and second element a 1d array of labels
"""
def __init__(self, col=None, col_pval=None, threshold=None, impurity=None,
value=None, left_child=None, right_child=None):
self.col = col
self.col_pval = col_pval
self.threshold = threshold
self.impurity = impurity
self.value = value
self.left_child = left_child
self.right_child = right_child
class CITreeBase(object):
"""Base class for conditional inference tree
Parameters
----------
min_samples_split : int
Minimum samples required for a split
alpha : float
Threshold value for selecting feature with permutation tests. Smaller
values correspond to shallower trees
max_depth : int
Maximum depth to grow tree
max_feats : str or int
Maximum feats to select at each split. String arguments include 'sqrt',
'log', and 'all'
n_permutations : int
Number of permutations during feature selection
early_stopping : bool
Whether to implement early stopping during feature selection. If True,
then as soon as the first permutation test returns a p-value less than
alpha, this feature will be chosen as the splitting variable
muting : bool
Whether to perform variable muting
verbose : bool or int
Controls verbosity of training and testing
n_jobs : int
Number of jobs for permutation testing
random_state : int
Sets seed for random number generator
"""
def __init__(self, min_samples_split=2, alpha=.05, max_depth=-1,
max_feats=-1, n_permutations=100, early_stopping=False,
muting=True, verbose=0, n_jobs=-1, random_state=None):
# Error checking
if alpha <= 0 or alpha > 1:
raise ValueError("Alpha (%.2f) should be in (0, 1]" % alpha)
if n_permutations < 0:
raise ValueError("n_permutations (%d) should be > 0" % \
n_permutations)
if not isinstance(max_feats, int) and max_feats not in ['sqrt', 'log', 'all', -1]:
raise ValueError("%s not a valid argument for max_feats" % \
str(max_feats))
# Define attributes
self.alpha = float(alpha)
self.min_samples_split = max(1, int(min_samples_split))
self.n_permutations = int(n_permutations)
self.max_feats = max_feats
self.early_stopping = early_stopping
self.muting = muting
self.verbose = verbose
self.n_jobs = n_jobs
self.root = None
self.splitter_counter_ = 0
if max_depth == -1:
self.max_depth = np.inf
else:
self.max_depth = int(max(1, max_depth))
if random_state is None:
self.random_state = np.random.randint(1, 9999)
else:
# TODO: ADD CHECK FOR CRAZY LARGE INTEGER?
self.random_state = int(random_state)
def _mute_feature(self, col_to_mute):
"""Removes variable from being selected
Parameters
----------
col_to_mute : int
Integer index of column to remove
"""
# Remove feature from protected features array
idx = np.where(self.available_features_ == col_to_mute)[0]
# Mute feature if not in protected set
if idx in self.protected_features_:
return
else:
self.available_features_ = np.delete(self.available_features_, idx)
# Recalculate actual number for max_feats before fitting
p = self.available_features_.shape[0]
if self.max_feats == 'sqrt':
self.max_feats = int(np.sqrt(p))
elif self.max_feats == 'log':
self.max_feats = int(np.log(p+1))
elif self.max_feats in ['all', -1]:
self.max_feats = p
else:
self.max_feats = int(self.max_feats)
# Check to make sure max_feats is not larger than the number of remaining
# features
if self.max_feats > len(self.available_features_):
self.max_feats = len(self.available_features_)
def _selector(self, X, y, col_idx):
"""Find feature most correlated with label"""
raise NotImplementedError("_splitter method not callable from base class")
def _splitter(self, *args, **kwargs):
"""Finds best split for feature"""
raise NotImplementedError("_splitter method not callable from base class")
def _build_tree(self, X, y, depth=0):
"""Recursively builds tree
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
depth : int
Depth of current recursive call
Returns
-------
Node : object
Child node or terminal node in recursive splitting
"""
n, p = X.shape
# Check for stopping criteria
if n > self.min_samples_split and \
depth < self.max_depth and \
not np.all(y == y[0]):
# Controls randomness of column sampling
self.splitter_counter_ += 1
np.random.seed(self.random_state*self.splitter_counter_)
# Find column with strongest association with outcome
try:
col_idx = np.random.choice(self.available_features_,
size=self.max_feats, replace=False)
except:
col_idx = np.random.choice(self.available_features_,
size=len(self.available_features_),
replace=False)
col, col_pval = self._selector(X, y, col_idx)
# Add selected feature to protected features
if col not in self.protected_features_:
self.protected_features_.append(col)
if self.verbose > 1:
logger("tree", "Added feature %d to protected set, size "
"= %d" % (col, len(self.protected_features_)))
if col_pval <= self.alpha:
# Find best split among selected variable
impurity, threshold, left, right = self._splitter(X, y, n, col)
if left and right and len(left[0]) > 0 and len(right[0]) > 0:
# Build subtrees for the right and left branches
if self.verbose:
logger("tree", "Building left subtree with "
"%d samples at depth %d" % \
(len(left[0]), depth+1))
left_child = self._build_tree(*left, depth=depth+1)
if self.verbose:
logger("tree", "Building right subtree with "
"%d samples at depth %d" % \
(len(right[0]), depth+1))
right_child = self._build_tree(*right, depth=depth+1)
# Return all arguments to constructor except value
return Node(col=col, col_pval=col_pval, threshold=threshold,
left_child=left_child, right_child=right_child,
impurity=impurity)
# Calculate terminal node value
if self.verbose: logger("tree", "Root node reached at depth %d" % depth)
value = self.node_estimate(y)
# Terminal node, no other values to pass to constructor
return Node(value=value)
def fit(self, X, y=None):
"""Trains model
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CITreeBase
Instance of CITreeBase class
"""
if self.verbose:
logger("tree", "Building root node with %d samples" % X.shape[0])
# Calculate actual number for max_feats before fitting
p = X.shape[1]
if self.max_feats == 'sqrt':
self.max_feats = int(np.sqrt(p))
elif self.max_feats == 'log':
self.max_feats = int(np.log(p+1))
elif self.max_feats in ['all', -1]:
self.max_feats = p
else:
self.max_feats = int(self.max_feats)
# Begin recursive build
self.protected_features_ = []
self.available_features_ = np.arange(p, dtype=int)
self.feature_importances_ = np.zeros(p)
self.root = self._build_tree(X, y)
sum_fi = np.sum(self.feature_importances_)
if sum_fi > 0: self.feature_importances_ /= sum_fi
return self
def predict_label(self, X, tree=None):
"""Predicts label
Parameters
----------
X : 2d array-like
Array of features for single sample
tree : CITreeBase
Trained tree
Returns
-------
label : int or float
Predicted label
"""
# If we have a value => return value as the prediction
if tree is None: tree = self.root
if tree.value is not None: return tree.value
# Determine if we will follow left or right branch
feature_value = X[tree.col]
branch = tree.left_child if feature_value <= tree.threshold \
else tree.right_child
# Test subtree
return self.predict_label(X, branch)
def predict(self, *args, **kwargs):
"""Predicts labels on test data"""
raise NotImplementedError("predict method not callable from base class")
def print_tree(self, tree=None, indent=" ", child=None):
"""Prints tree structure
Parameters
----------
tree : CITreeBase
Trained tree model
indent : str
Indent spacing
child : Node
Left or right child node
"""
# If we're at leaf => print the label
if not tree: tree = self.root
if tree.value is not None: print("label:", tree.value)
# Go deeper down the tree
else:
# Print splitting rule
print("X[:,%s] %s %s " % (tree.col,
'<=' if child in [None, 'left'] else '>',
tree.threshold))
# Print the left child
print("%sL: " % (indent), end="")
self.print_tree(tree.left_child, indent + indent, 'left')
# Print the right
print("%sR: " % (indent), end="")
self.print_tree(tree.right_child, indent + indent, 'right')
class CITreeClassifier(CITreeBase, BaseEstimator, ClassifierMixin):
"""Conditional inference tree classifier
Parameters
----------
selector : str
Variable selector for finding strongest association between a feature
and the label
Derived from CITreeBase class; see constructor for parameter definitions
"""
def __init__(self,
min_samples_split=2,
alpha=.05,
selector='mc',
max_depth=-1,
max_feats=-1,
n_permutations=100,
early_stopping=False,
muting=True,
verbose=0,
n_jobs=-1,
random_state=None):
# Define node estimate
self.node_estimate = self._estimate_proba
# Define selector
if selector not in ['mc', 'mi', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"mc, mi, and hybrid")
self.selector = selector
if self.selector != 'hybrid':
# Wrapper correlation selector
self._selector = self._cor_selector
# Permutation test based on correlation measure
if self.selector == 'mc':
self._perm_test = permutation_test_mc
else:
self._perm_test = permutation_test_mi
else:
self._perm_test = None
self._selector = self._hybrid_selector
super(CITreeClassifier, self).__init__(
min_samples_split=min_samples_split,
alpha=alpha,
max_depth=max_depth,
max_feats=max_feats,
n_permutations=n_permutations,
early_stopping=early_stopping,
muting=muting,
verbose=verbose,
n_jobs=n_jobs,
random_state=random_state)
def _hybrid_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a hybrid of multiple correlation and mutual information measures
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
if mc_fast(X[:, col], y, self.n_classes_) >= mi(X[:, col], y):
pval = permutation_test_mc(x=X[:, col],
y=y,
n_classes=self.n_classes_,
B=self.n_permutations,
random_state=self.random_state)
else:
pval = permutation_test_mi(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _splitter(self, X, y, n, col):
"""Splits data set into two child nodes based on optimized weighted
gini index
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
col : list
Column of X to search for best split
Returns
-------
best_impurity : float
Gini index associated with best split
best_threshold : float
X value associated with splitting of data set into two child nodes
left : tuple
Left child node data consisting of two elements: (features, labels)
right : tuple
Right child node data consisting of two elements: (features labels)
"""
if self.verbose > 1:
logger("splitter", "Testing splits on feature %d" % col)
# Initialize variables for splitting
impurity, threshold = 0.0, None
left, right = None, None
# Call sklearn's optimized implementation of decision tree classifiers
# to make split using Gini index
base = DecisionTreeClassifier(
max_depth=1, min_samples_split=self.min_samples_split
).fit(X[:, col].reshape(-1, 1), y).tree_
# Make split based on best threshold
threshold = base.threshold[0]
idx = np.where(X[:, col] <= threshold, 1, 0)
X_left, y_left = X[idx==1], y[idx==1]
X_right, y_right = X[idx==0], y[idx==0]
n_left, n_right = X_left.shape[0], X_right.shape[0]
# Skip small splits
if n_left < self.min_samples_split or n_right < self.min_samples_split:
return impurity, threshold, left, right
# Calculate parent and weighted children impurities
if len(base.impurity) == 3:
node_impurity = base.impurity[0]
left_impurity = base.impurity[1]*(n_left/float(n))
right_impurity = base.impurity[2]*(n_right/float(n))
else:
node_impurity = gini_index(y, self.labels_)
left_impurity = gini_index(y_left, self.labels_)*(n_left/float(n))
right_impurity = gini_index(y_right, self.labels_)*(n_right/float(n))
# Define groups and calculate impurity decrease
left, right = (X_left, y_left), (X_right, y_right)
impurity = node_impurity - (left_impurity + right_impurity)
# Update feature importance (mean decrease impurity)
self.feature_importances_[col] += impurity
return impurity, threshold, left, right
def _cor_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a correlation measure
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
# Mute feature and continue since constant
if np.all(X[:, col] == X[0, col]) and len(self.available_features_) > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "Constant values, muting feature %d" \
% col)
continue
pval = self._perm_test(x=X[:, col],
y=y,
n_classes=self.n_classes_,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _estimate_proba(self, y):
"""Estimates class distribution in node
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
class_probs : 1d array-like
Array of class probabilities
"""
return np.array([np.mean(y == label) for label in self.labels_])
def fit(self, X, y, labels=None):
"""Trains conditional inference tree classifier
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
labels : 1d array-like
Array of unique class labels
Returns
-------
self : CITreeClassifier
Instance of CITreeClassifier class
"""
self.labels_ = labels if labels is not None else np.unique(y)
self.n_classes_ = len(self.labels_)
super(CITreeClassifier, self).fit(X, y)
return self
def predict_proba(self, X):
"""Predicts class probabilities for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
class_probs : 2d array-like
Array of predicted class probabilities
"""
if self.verbose:
logger("test", "Predicting labels for %d samples" % X.shape[0])
return np.array([self.predict_label(sample) for sample in X])
def predict(self, X):
"""Predicts class labels for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
y : 1d array-like
Array of predicted classes
"""
y_proba = self.predict_proba(X)
return np.argmax(y_proba, axis=1)
class CITreeRegressor(CITreeBase, BaseEstimator, RegressorMixin):
"""Conditional inference tree regressor
Parameters
----------
selector : str
Variable selector for finding strongest association between a feature
and the label
Derived from CITreeBase class; see constructor for rest of parameter definitions
"""
def __init__(self,
min_samples_split=2,
alpha=.05,
selector='pearson',
max_depth=-1,
max_feats=-1,
n_permutations=100,
early_stopping=False,
muting=True,
verbose=0,
n_jobs=-1,
random_state=None):
# Define node estimate
self.node_estimate = self._estimate_mean
# Define selector
if selector not in ['pearson', 'distance', 'rdc', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"pearson, distance, rdc, and hybrid")
self.selector = selector
if self.selector != 'hybrid':
# Wrapper correlation selector
self._selector = self._cor_selector
# Permutation test based on correlation measure
if self.selector == 'pearson':
self._perm_test = permutation_test_pcor
elif self.selector == 'distance':
self._perm_test = permutation_test_dcor
else:
self._perm_test = permutation_test_rdc
else:
self._perm_test = None
self._selector = self._hybrid_selector
super(CITreeRegressor, self).__init__(
min_samples_split=min_samples_split,
alpha=alpha,
max_depth=max_depth,
max_feats=max_feats,
n_permutations=n_permutations,
early_stopping=early_stopping,
muting=muting,
verbose=verbose,
n_jobs=n_jobs,
random_state=random_state)
def _hybrid_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a hybrid of pearson and distance correlation measures
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
if abs(pcor(X[:, col], y)) >= abs(py_dcor(X[:, col], y)):
pval = permutation_test_pcor(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
else:
pval = permutation_test_dcor(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _cor_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a correlation measure
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
# Mute feature and continue since constant
if np.all(X[:, col] == X[0, col]) and len(self.available_features_) > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "Constant values, muting feature %d" \
% col)
continue
pval = self._perm_test(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _splitter(self, X, y, n, col):
"""Splits data set into two child nodes based on optimized weighted
mean squared error
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
col : list
Column of X to search for best split
Returns
-------
best_impurity : float
Mean squared error associated with best split
best_threshold : float
X value associated with splitting of data set into two child nodes
left : tuple
Left child node data consisting of two elements: (features, labels)
right : tuple
Right child node data consisting of two elements: (features labels)
"""
if self.verbose > 1:
logger("splitter", "Testing splits on feature %d" % col)
# Initialize variables for splitting
impurity, threshold = 0.0, None
left, right = None, None
# Call sklearn's optimized implementation of decision tree regressors
# to make split using mean squared error
base = DecisionTreeRegressor(
max_depth=1, min_samples_split=self.min_samples_split
).fit(X[:, col].reshape(-1, 1), y).tree_
# Make split based on best threshold
threshold = base.threshold[0]
idx = np.where(X[:, col] <= threshold, 1, 0)
X_left, y_left = X[idx==1], y[idx==1]
X_right, y_right = X[idx==0], y[idx==0]
n_left, n_right = X_left.shape[0], X_right.shape[0]
# Skip small splits
if n_left < self.min_samples_split or n_right < self.min_samples_split:
return impurity, threshold, left, right
# Calculate parent and weighted children impurities
if len(base.impurity) == 3:
node_impurity = base.impurity[0]
left_impurity = base.impurity[1]*(n_left/float(n))
right_impurity = base.impurity[2]*(n_right/float(n))
else:
node_impurity = mse(y)
left_impurity = mse(y_left)*(n_left/float(n))
right_impurity = mse(y_right)*(n_right/float(n))
# Define groups and calculate impurity decrease
left, right = (X_left, y_left), (X_right, y_right)
impurity = node_impurity - (left_impurity + right_impurity)
# Update feature importance (mean decrease impurity)
self.feature_importances_[col] += impurity
return impurity, threshold, left, right
def _estimate_mean(self, y):
"""Estimates mean in node
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
mu : float
Node mean estimate
"""
return np.mean(y)
def fit(self, X, y):
"""Trains conditional inference tree regressor
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CITreeRegressor
Instance of CITreeRegressor class
"""
super(CITreeRegressor, self).fit(X, y)
return self
def predict(self, X):
"""Predicts labels for feature vectors in X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
y_hat : 1d array-like
Array of predicted labels
"""
if self.verbose:
logger("test", "Predicting labels for %d samples" % X.shape[0])
return np.array([self.predict_label(sample) for sample in X])
#####################
"""ENSEMBLE MODELS"""
#####################
def stratify_sampled_idx(random_state, y, bayes):
"""Indices for stratified bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Stratified sampled indices for each class
"""
np.random.seed(random_state)
idx = []
for label in np.unique(y):
# Grab indices for class
tmp = np.where(y==label)[0]
# Bayesian bootstrapping if specified
p = bayes_boot_probs(n=len(tmp)) if bayes else None
idx.append(np.random.choice(tmp, size=len(tmp), replace=True, p=p))
return idx
def stratify_unsampled_idx(random_state, y, bayes):
"""Unsampled indices for stratified bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Stratified unsampled indices for each class
"""
np.random.seed(random_state)
sampled = stratify_sampled_idx(random_state, y, bayes)
idx = []
for i, label in enumerate(np.unique(y)):
idx.append(np.setdiff1d(np.where(y==label)[0], sampled[i]))
return idx
def balanced_sampled_idx(random_state, y, bayes, min_class_p):
"""Indices for balanced bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
min_class_p : float
Minimum proportion of class labels
Returns
-------
idx : list
Balanced sampled indices for each class
"""
np.random.seed(random_state)
idx, n = [], int(np.floor(min_class_p*len(y)))
for i, label in enumerate(np.unique(y)):
# Grab indices for class
tmp = np.where(y==label)[0]
# Bayesian bootstrapping if specified
p = bayes_boot_probs(n=len(tmp)) if bayes else None
idx.append(np.random.choice(tmp, size=n, replace=True, p=p))
return idx
def balanced_unsampled_idx(random_state, y, bayes, min_class_p):
"""Unsampled indices for balanced bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
min_class_p : float
Minimum proportion of class labels
Returns
-------
idx : list
Balanced unsampled indices for each class
"""
np.random.seed(random_state)
sampled = balanced_sampled_idx(random_state, y, bayes, min_class_p)
idx = []
for i, label in enumerate(np.unique(y)):
idx.append(np.setdiff1d(np.where(y==label)[0], sampled[i]))
return idx
def normal_sampled_idx(random_state, n, bayes):
"""Indices for bootstrap sampling
Parameters
----------
random_state : int
Sets seed for random number generator
n : int
Sample size
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Sampled indices
"""
np.random.seed(random_state)
# Bayesian bootstrapping if specified
p = bayes_boot_probs(n=n) if bayes else None
return np.random.choice(np.arange(n, dtype=int), size=n, replace=True, p=p)
def normal_unsampled_idx(random_state, n, bayes):
"""Unsampled indices for bootstrap sampling
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
n : int
Sample size
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Unsampled indices
"""
sampled = normal_sampled_idx(random_state, n, bayes)
counts = np.bincount(sampled, minlength=n)
return np.arange(n, dtype=int)[counts==0]
def _parallel_fit_classifier(tree, X, y, n, tree_idx, n_estimators, bootstrap,
bayes, verbose, random_state, class_weight=None,
min_dist_p=None):
"""Utility function for building trees in parallel
Note: This function can't go locally in a class, because joblib complains
that it cannot pickle it when placed there
Parameters
----------
tree : CITreeClassifier
Instantiated conditional inference tree
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
tree_idx : int
Index of tree in forest
n_estimators : int
Number of total estimators
bootstrap : bool
Whether to perform bootstrap sampling
bayes : bool
If True, performs Bayesian bootstrap sampling
verbose : bool or int
Controls verbosity of training process
random_state : int
Sets seed for random number generator
class_weight : str
Type of sampling during bootstrap, None for regular bootstrapping,
'balanced' for balanced bootstrap sampling, and 'stratify' for
stratified bootstrap sampling
min_class_p : float
Minimum proportion of class labels
Returns
-------
tree : CITreeClassifier
Fitted conditional inference tree
"""
# Print status if conditions met
if verbose and n_estimators >= 10:
denom = n_estimators if verbose > 1 else 10
if (tree_idx+1) % int(n_estimators/denom) == 0:
logger("tree", "Building tree %d/%d" % (tree_idx+1, n_estimators))
# Bootstrap sample if specified
if bootstrap:
random_state = random_state*(tree_idx+1)
if class_weight == 'balanced':
idx = np.concatenate(
balanced_sampled_idx(random_state, y, bayes, min_dist_p)
)
elif class_weight == 'stratify':
idx = np.concatenate(
stratify_sampled_idx(random_state, y, bayes)
)
else:
idx = normal_sampled_idx(random_state, n, bayes)
# Note: We need to pass the classes in the case of the bootstrap
# because not all classes may be sampled and when it comes to prediction,
# the tree models learns a different number of classes across different
# bootstrap samples
tree.fit(X[idx], y[idx], np.unique(y))
else:
tree.fit(X, y)
return tree
def _parallel_fit_regressor(tree, X, y, n, tree_idx, n_estimators, bootstrap,
bayes, verbose, random_state):
"""Utility function for building trees in parallel
Note: This function can't go locally in a class, because joblib complains
that it cannot pickle it when placed there
Parameters
----------
tree : CITreeRegressor
Instantiated conditional inference tree
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
tree_idx : int
Index of tree in forest
n_estimators : int
Number of total estimators
bootstrap : bool
Whether to perform bootstrap sampling
bayes : bool
If True, performs Bayesian bootstrap sampling
verbose : bool or int
Controls verbosity of training process
random_state : int
Sets seed for random number generator
Returns
-------
tree : CITreeRegressor
Fitted conditional inference tree
"""
# Print status if conditions met
if verbose and n_estimators >= 10:
denom = n_estimators if verbose > 1 else 10
if (tree_idx+1) % int(n_estimators/denom) == 0:
logger("tree", "Building tree %d/%d" % (tree_idx+1, n_estimators))
# Bootstrap sample if specified
if bootstrap:
random_state = random_state*(tree_idx+1)
idx = normal_sampled_idx(random_state, n, bayes)
# Train
tree.fit(X[idx], y[idx])
else:
tree.fit(X, y)
return tree
def _accumulate_prediction(predict, X, out, lock):
"""Utility function to aggregate predictions in parallel
Parameters
----------
predict : function handle
Alias to prediction method of class
X : 2d array-like
Array of features
out : 1d or 2d array-like
Array of labels
lock : threading lock
A lock that controls worker access to data structures for aggregating
predictions
Returns
-------
None
"""
prediction = predict(X)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)): out[i] += prediction[i]
class CIForestClassifier(BaseEstimator, ClassifierMixin):
"""Conditional forest classifier
Parameters
----------
min_samples_split : int
Minimum samples required for a split
alpha : float
Threshold value for selecting feature with permutation tests. Smaller
values correspond to shallower trees
selector : str
Variable selector for finding strongest association between a feature
and the label
max_depth : int
Maximum depth to grow tree
max_feats : str or int
Maximum feats to select at each split. String arguments include 'sqrt',
'log', and 'all'
n_permutations : int
Number of permutations during feature selection
early_stopping : bool
Whether to implement early stopping during feature selection. If True,
then as soon as the first permutation test returns a p-value less than
alpha, this feature will be chosen as the splitting variable
muting : bool
Whether to perform variable muting
verbose : bool or int
Controls verbosity of training and testing
bootstrap : bool
Whether to perform bootstrap sampling for each tree
bayes : bool
If True, performs Bayesian bootstrap sampling
class_weight : str
Type of sampling during bootstrap, None for regular bootstrapping,
'balanced' for balanced bootstrap sampling, and 'stratify' for
stratified bootstrap sampling
n_jobs : int
Number of jobs for permutation testing
random_state : int
Sets seed for random number generator
"""
def __init__(self, min_samples_split=2, alpha=.05, selector='mc', max_depth=-1,
n_estimators=100, max_feats='sqrt', n_permutations=100,
early_stopping=True, muting=True, verbose=0, bootstrap=True,
bayes=True, class_weight='balanced', n_jobs=-1, random_state=None):
# Error checking
if alpha <= 0 or alpha > 1:
raise ValueError("Alpha (%.2f) should be in (0, 1]" % alpha)
if selector not in ['mc', 'mi', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"mc, mi, and hybrid")
if n_permutations < 0:
raise ValueError("n_permutations (%s) should be > 0" % \
str(n_permutations))
if not isinstance(max_feats, int) and max_feats not in ['sqrt', 'log', 'all', -1]:
raise ValueError("%s not a valid argument for max_feats" % \
str(max_feats))
if n_estimators < 0:
raise ValueError("n_estimators (%s) must be > 0" % \
str(n_estimators))
# Only for classifier model
if class_weight not in [None, 'balanced', 'stratify']:
raise ValueError("%s not a valid argument for class_weight" % \
str(class_weight))
# Placeholder variable for regression model (not applicable)
if class_weight is None: self.min_class_p = None
# Define attributes
self.alpha = float(alpha)
self.selector = selector
self.min_samples_split = max(1, min_samples_split)
self.n_permutations = int(n_permutations)
if max_depth == -1:
self.max_depth = max_depth
else:
self.max_depth = int(max(1, max_depth))
self.n_estimators = int(max(1, n_estimators))
self.max_feats = max_feats
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.muting = muting
self.n_jobs = n_jobs
self.verbose = verbose
self.class_weight = class_weight
self.bayes = bayes
if random_state is None:
self.random_state = np.random.randint(1, 9999)
else:
# TODO: ADD CHECK FOR CRAZY LARGE INTEGER?
self.random_state = int(random_state)
# Package params for calling CITreeClassifier
self.params = {
'alpha' : self.alpha,
'selector' : self.selector,
'min_samples_split' : self.min_samples_split,
'n_permutations' : self.n_permutations,
'max_feats' : self.max_feats,
'early_stopping' : self.early_stopping,
'muting' : self.muting,
'verbose' : 0,
'n_jobs' : 1,
'random_state' : None,
}
def fit(self, X, y):
"""Fit conditional forest classifier
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CIForestClassifier
Instance of CIForestClassifier
"""
self.labels_ = np.unique(y)
self.n_classes_ = len(self.labels_)
if self.verbose:
logger("tree", "Training ensemble with %d trees on %d samples" % \
(self.n_estimators, X.shape[0]))
# Instantiate base tree models
self.estimators_ = []
for i in range(self.n_estimators):
self.params['random_state'] = self.random_state*(i+1)
self.estimators_.append(CITreeClassifier(**self.params))
# Define class distribution
self.class_dist_p = np.array([
np.mean(y==label) for label in np.unique(y)
])
# Train models
n = X.shape[0]
self.estimators_ = \
Parallel(n_jobs=self.n_jobs, backend='loky')(
delayed(_parallel_fit_classifier)(
self.estimators_[i], X, y, n, i, self.n_estimators,
self.bootstrap, self.bayes, self.verbose, self.random_state,
self.class_weight, np.min(self.class_dist_p)
)
for i in range(self.n_estimators)
)
# Accumulate feature importances (mean decrease impurity)
self.feature_importances_ = np.sum([
tree.feature_importances_ for tree in self.estimators_],
axis=0
)
sum_fi = np.sum(self.feature_importances_)
if sum_fi > 0: self.feature_importances_ /= sum_fi
return self
def predict_proba(self, X):
"""Predicts class probabilities for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
class_probs : 2d array-like
Array of predicted class probabilities
"""
if self.verbose:
logger("test", "Predicting labels for %d samples" % X.shape[0])
# Parallel prediction
all_proba = np.zeros((X.shape[0], self.n_classes_), dtype=np.float64)
lock = threading.Lock()
Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock)
for e in self.estimators_)
# Normalize probabilities
all_proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict(self, X):
"""Predicts class labels for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
y : 1d array-like
Array of predicted classes
"""
y_proba = self.predict_proba(X)
return np.argmax(y_proba, axis=1)
class CIForestRegressor(BaseEstimator, RegressorMixin):
"""Conditional forest regressor
Parameters
----------
min_samples_split : int
Minimum samples required for a split
alpha : float
Threshold value for selecting feature with permutation tests. Smaller
values correspond to shallower trees
selector : str
Variable selector for finding strongest association between a feature
and the label
max_depth : int
Maximum depth to grow tree
max_feats : str or int
Maximum feats to select at each split. String arguments include 'sqrt',
'log', and 'all'
n_permutations : int
Number of permutations during feature selection
early_stopping : bool
Whether to implement early stopping during feature selection. If True,
then as soon as the first permutation test returns a p-value less than
alpha, this feature will be chosen as the splitting variable
muting : bool
Whether to perform variable muting
verbose : bool or int
Controls verbosity of training and testing
bootstrap : bool
Whether to perform bootstrap sampling for each tree
bayes : bool
If True, performs Bayesian bootstrap sampling
n_jobs : int
Number of jobs for permutation testing
random_state : int
Sets seed for random number generator
"""
def __init__(self, min_samples_split=2, alpha=.01, selector='pearson', max_depth=-1,
n_estimators=100, max_feats='sqrt', n_permutations=100,
early_stopping=True, muting=True, verbose=0, bootstrap=True,
bayes=True, n_jobs=-1, random_state=None):
# Error checking
if alpha <= 0 or alpha > 1:
raise ValueError("Alpha (%.2f) should be in (0, 1]" % alpha)
if selector not in ['pearson', 'distance', 'rdc', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"pearson, distance, rdc, hybrid")
if n_permutations < 0:
raise ValueError("n_permutations (%s) should be > 0" % \
str(n_permutations))
if not isinstance(max_feats, int) and max_feats not in ['sqrt', 'log', 'all', -1]:
raise ValueError("%s not a valid argument for max_feats" % \
str(max_feats))
if n_estimators < 0:
raise ValueError("n_estimators (%s) must be > 0" % \
str(n_estimators))
# Define attributes
self.alpha = float(alpha)
self.selector = selector
self.min_samples_split = max(1, min_samples_split)
self.n_permutations = int(n_permutations)
if max_depth == -1:
self.max_depth = max_depth
else:
self.max_depth = int(max(1, max_depth))
self.n_estimators = int(max(1, n_estimators))
self.max_feats = max_feats
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.muting = muting
self.n_jobs = n_jobs
self.verbose = verbose
self.bayes = bayes
if random_state is None:
self.random_state = np.random.randint(1, 9999)
else:
# TODO: ADD CHECK FOR CRAZY LARGE INTEGER?
self.random_state = int(random_state)
# Package params for calling CITreeRegressor
self.params = {
'alpha' : self.alpha,
'selector' : self.selector,
'min_samples_split' : self.min_samples_split,
'n_permutations' : self.n_permutations,
'max_feats' : self.max_feats,
'early_stopping' : self.early_stopping,
'muting' : muting,
'verbose' : 0,
'n_jobs' : 1,
'random_state' : None,
}
def fit(self, X, y):
"""Fit conditional forest regressor
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CIForestRegressor
Instance of CIForestRegressor
"""
if self.verbose:
logger("tree", "Training ensemble with %d trees on %d samples" % \
(self.n_estimators, X.shape[0]))
# Instantiate base tree models
self.estimators_ = []
for i in range(self.n_estimators):
self.params['random_state'] = self.random_state*(i+1)
self.estimators_.append(CITreeRegressor(**self.params))
# Train models
n = X.shape[0]
self.estimators_ = \
Parallel(n_jobs=self.n_jobs, backend='loky')(
delayed(_parallel_fit_regressor)(
self.estimators_[i], X, y, n, i, self.n_estimators,
self.bootstrap, self.bayes, self.verbose, self.random_state
)
for i in range(self.n_estimators)
)
# Accumulate feature importances (mean decrease impurity)
self.feature_importances_ = np.sum([
tree.feature_importances_ for tree in self.estimators_],
axis=0
)
sum_fi = np.sum(self.feature_importances_)
if sum_fi > 0: self.feature_importances_ /= sum_fi
return self
def predict(self, X):
"""Predicts labels for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
labels : 1d array-like
Array of predicted labels
"""
if self.verbose:
logger("test", "Predicting labels for %d samples" % X.shape[0])
# Parallel prediction
results = np.zeros(X.shape[0], dtype=np.float64)
lock = threading.Lock()
Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_accumulate_prediction)(e.predict, X, results, lock)
for e in self.estimators_)
# Normalize predictions
results /= len(self.estimators_)
if len(results) == 1:
return results[0]
else:
return results
|
11453064
|
def sequentialSearch(alist, item):
pos = 0
found = False
while pos < len(alist) and not found:
if alist[pos] == item:
found = True
else:
pos = pos+1
return found
testlist = [1, 2, 32, 8, 17, 19, 42, 13, 0]
print(sequentialSearch(testlist, 3))
print(sequentialSearch(testlist, 13))
|
11453068
|
import cPickle
model=cPickle.load(open('lstm_tanh_relu_[1468202263.38]_2_0.610.p'))
cPickle.dump(model,open('model.bin.nlg','wb'))
|
11453071
|
import time
import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from model.trainer.base import Trainer
from model.trainer.helpers import (
get_dataloader, prepare_model, prepare_optimizer,
)
from model.utils import (
pprint, ensure_path,
Averager, Timer, count_acc,
compute_confidence_interval
)
from tqdm import tqdm
class FSLTrainer(Trainer):
def __init__(self, args):
super().__init__(args)
self.train_loader, self.val_loader, self.test_loader = get_dataloader(args)
self.model, self.para_model = prepare_model(args)
self.optimizer, self.lr_scheduler = prepare_optimizer(self.model, args)
def prepare_label(self):
args = self.args
# prepare one-hot label
label = torch.arange(args.way, dtype=torch.int16).repeat(args.query)
label_aux = torch.arange(args.way, dtype=torch.int8).repeat(args.shot + args.query)
label = label.type(torch.LongTensor)
label_aux = label_aux.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
label_aux = label_aux.cuda()
return label, label_aux
def train(self):
args = self.args
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
# start FSL training
label, label_aux = self.prepare_label()
for epoch in range(1, args.max_epoch + 1):
self.train_epoch += 1
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
tl1 = Averager()
tl2 = Averager()
ta = Averager()
start_tm = time.time()
for batch in self.train_loader:
self.train_step += 1
if torch.cuda.is_available():
data, gt_label = [_.cuda() for _ in batch]
else:
data, gt_label = batch[0], batch[1]
data_tm = time.time()
self.dt.add(data_tm - start_tm)
# get saved centers
logits, reg_logits = self.para_model(data)
if reg_logits is not None:
loss = F.cross_entropy(logits, label)
total_loss = args.balance_1*loss + args.balance_2 * F.cross_entropy(reg_logits, label_aux)
else:
loss = F.cross_entropy(logits, label)
total_loss = F.cross_entropy(logits, label)
tl2.add(loss)
forward_tm = time.time()
self.ft.add(forward_tm - data_tm)
acc = count_acc(logits, label)
tl1.add(total_loss.item())
ta.add(acc)
self.optimizer.zero_grad()
total_loss.backward()
backward_tm = time.time()
self.bt.add(backward_tm - forward_tm)
self.optimizer.step()
optimizer_tm = time.time()
self.ot.add(optimizer_tm - backward_tm)
# refresh start_tm
start_tm = time.time()
self.lr_scheduler.step()
self.try_evaluate(epoch)
print('ETA:{}/{}'.format(
self.timer.measure(),
self.timer.measure(self.train_epoch / args.max_epoch))
)
torch.save(self.trlog, osp.join(args.save_path, 'trlog'))
self.save_model('epoch-last')
def evaluate(self, data_loader):
# restore model args
args = self.args
# evaluation mode
self.model.eval()
record = np.zeros((args.num_eval_episodes, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in enumerate(data_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
# train mode
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
return vl, va, vap
def evaluate_test(self):
# restore model args
args = self.args
self.args.testing = True
self.model.load_state_dict(torch.load(osp.join(self.args.save_path, 'max_acc.pth'))['params'])
self.model.eval()
record = np.zeros((600, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in tqdm(enumerate(self.test_loader, 1)):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
self.trlog['test_acc'] = va
self.trlog['test_acc_interval'] = vap
self.trlog['test_loss'] = vl
print('best epoch {}, best val acc={:.4f} + {:.4f}\n'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
print('Test acc={:.4f} + {:.4f}\n'.format(
self.trlog['test_acc'],
self.trlog['test_acc_interval']))
return vl, va, vap
def final_record(self):
# save the best performance in a txt file
with open(osp.join(self.args.save_path, '{}+{}'.format(self.trlog['test_acc'], self.trlog['test_acc_interval'])), 'w') as f:
f.write('best epoch {}, best val acc={:.4f} + {:.4f}\n'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
f.write('Test acc={:.4f} + {:.4f}\n'.format(
self.trlog['test_acc'],
self.trlog['test_acc_interval']))
|
11453108
|
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks as coroutine
from autobahn.twisted.wamp import Connection
session = ApplicationSession()
@session.on_join
def on_join(session):
print("Session {} has joined".format(session.id))
@session.on_leave
def on_leave(session, details):
print("Session {} has left: {}".format(session.id, details.reason))
@session.register('com.myapp.add2') # registering in on_join
def add2(a, b):
return a + b
@coroutine
def main(transport):
yield session.join(transport, 'myrealm1')
result = yield session.call('com.myapp.add2', 2, 3)
print("Result: {}".format(result))
yield session.leave()
yield transport.close()
if __name__ == '__main__':
connection = Connection(main)
react(connection.start)
|
11453119
|
import torch
import torch.nn as nn
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, device):
"""
:param input_size: 5 which is OHLC + trend
"""
super(EncoderRNN, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size, hidden_size)
# self.lstm = nn.LSTM(input_size, hidden_size)
def forward(self, x):
"""
:param x: if the input x is a batch, its size is of the form [window_size, batch_size, input_size]
thus, the output of GRU would be of shape [window_size, batch_size, hidden_size].
e.g. output[:, 0, :] is the output sequence of the first element in the batch.
The hidden is of the shape [1, batch_size, hidden_size]
"""
if len(x.shape) < 3:
x = x.unsqueeze(1)
hidden = self.initHidden(x.shape[1])
output, hidden = self.gru(x, hidden)
# output, hidden = self.gru(x)
# cell = self.initHidden(x.shape[1])
# output, (hidden, cell) = self.lstm(x, (hidden, cell))
return output, hidden
def initHidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size, device=self.device)
|
11453193
|
import pandas as pd
import numpy as np
from scipy.io.wavfile import read as read_wav
import librosa
import os
from sklearn.preprocessing import MinMaxScaler
def preprocessing_audio(data_info_path, audio_path):
sampleRate = 16000 # Ziel Samplefrequence in Hz
cutAudio = 0.3 # je am Anfang/Ende abgeschnittener Audioanteil (um Pause zu entfernen)
lengthAudio = 1 - 2 * cutAudio # gesamtlänge der Vokaldatei in Prozent/100
audio = [] # Liste für Audiodateien
vocalInfo = [] # Liste für Vokalinfo
_, _, filenames = next(os.walk(data_info_path)) # filenames aus ordner mit .csv entnehmen
# Audio und zugehörigen Vokal in Listen speichern
for i in range(len(filenames)):
name = filenames[i]
data_info = pd.read_csv(data_info_path + "/" + name) # .csv für Audio einlesen
timemarkBeginn = data_info['Beginn'] # Inhalt von .csv aufteilen
timemarkEnde = data_info['Ende']
vokal = data_info['Vokal']
nameAudio = name.replace("csv", "wav") # Name des Audiofiles erstellen
pathAudio = audio_path + "/" + nameAudio # Pfad des Ausiofiles
Fs, _ = read_wav(pathAudio) # SampleRate des Origianl-Audios
for i in range(len(timemarkBeginn)):
timemark1 = timemarkBeginn[i]
timemark2 = timemarkEnde[i]
vocalLength = (timemark2 - timemark1) / Fs # Vokallänge mit Pause in Sekunden
offset1 = (timemark1 / Fs + cutAudio * vocalLength) # in Sekunden, start des Vokals in Sekunden in wav-file
dauer = vocalLength * lengthAudio # in Sekunden, % vorne und hinten abschneiden um Pause abzutrennen
y, _ = librosa.load(path=pathAudio, sr=sampleRate, mono=True, offset=offset1,
duration=dauer) # , dtype=<class 'numpy.float32'>, res_type='kaiser_best')
y = librosa.util.normalize(y)
audio.append(y)
vocalInfo.append(vokal[i])
return audio, vocalInfo, sampleRate
def preprocessing_audio_fb(data_info_path, audio_path): # unterschied: Normierung des Audiosignals, |y|<1 um tanh im esn zu verwenden
sampleRate = 16000 # Ziel Samplefrequence in Hz
cutAudio = 0.3 # je am Anfang/Ende abgeschnittener Audioanteil (um Pause zu entfernen)
lengthAudio = 1 - 2 * cutAudio # gesamtlänge der Vokaldatei in Prozent/100
audio = [] # Liste für Audiodateien
vocalInfo = [] # Liste für Vokalinfo
_, _, filenames = next(os.walk(data_info_path)) # filenames aus ordner mit .csv entnehmen
# Audio und zugehörigen Vokal in Listen speichern
for i in range(len(filenames)):
scaler = MinMaxScaler(feature_range=(0,0.999))
name = filenames[i]
data_info = pd.read_csv(data_info_path + "/" + name) # .csv für Audio einlesen
timemarkBeginn = data_info['Beginn'] # Inhalt von .csv aufteilen
timemarkEnde = data_info['Ende']
vokal = data_info['Vokal']
nameAudio = name.replace("csv", "wav") # Name des Audiofiles erstellen
pathAudio = audio_path + "/" + nameAudio # Pfad des Ausiofiles
Fs, _ = read_wav(pathAudio) # SampleRate des Origianl-Audios
for i in range(len(timemarkBeginn)):
timemark1 = timemarkBeginn[i]
timemark2 = timemarkEnde[i]
vocalLength = (timemark2 - timemark1) / Fs # Vokallänge mit Pause in Sekunden
offset1 = (timemark1 / Fs + cutAudio * vocalLength) # in Sekunden, start des Vokals in Sekunden in wav-file
dauer = vocalLength * lengthAudio # in Sekunden, % vorne und hinten abschneiden um Pause abzutrennen
y, _ = librosa.load(path=pathAudio, sr=sampleRate, mono=True, offset=offset1,
duration=dauer) # , dtype=<class 'numpy.float32'>, res_type='kaiser_best')
y = scaler.fit_transform(y.reshape(-1, 1))
audio.append(y)
vocalInfo.append(vokal[i])
audioVocalOne = []
vocalInfoOne = []
for i in range(len(audio)):
if vocalInfo[i]=='a' or vocalInfo[i]=='u':
audioVocalOne.append(audio[i])
vocalInfoOne.append(vocalInfo[i])
#return audio, vocalInfo, sampleRate
return audioVocalOne, vocalInfoOne, sampleRate
|
11453195
|
from unittest import mock
from django.test import TestCase
from django.test.utils import override_settings
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import translation
from geotrek.feedback.parsers import SuricateParser
from geotrek.feedback.factories import ReportFactory
class FailingEmailBackend(BaseEmailBackend):
"""
This Email Backend is used to test error management when sending email
"""
def send_messages(self, email_messages):
raise Exception('Fake problem')
class EmailSendingTest(TestCase):
def test_a_mail_is_sent_on_report_creation(self):
ReportFactory.create()
self.assertEqual(len(mail.outbox), 1)
@override_settings(SURICATE_REPORT_ENABLED=False)
def test_a_mail_is_not_sent_on_report_modification_no_suricate_mode(self):
r = ReportFactory.create()
self.assertEqual(len(mail.outbox), 1)
r.comment = 'More info about it'
r.save()
self.assertEqual(len(mail.outbox), 1)
@override_settings(SURICATE_REPORT_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_a_mail_is_not_sent_on_report_modification_suricate_mode(self, post_report):
r = ReportFactory.create(uid="027b1b63-fa59-48e1-bfdf-daaefc03dee2")
self.assertEqual(len(mail.outbox), 1)
r.comment = 'More info about it'
r.save()
self.assertEqual(len(mail.outbox), 1)
@override_settings(EMAIL_BACKEND='geotrek.feedback.tests.FailingEmailBackend')
def test_email_failure_does_not_prevent_report_creation(self):
r = ReportFactory.create()
self.assertEqual(len(mail.outbox), 0)
self.assertIsNotNone(r.id)
@override_settings(EMAIL_BACKEND='geotrek.feedback.tests.FailingEmailBackend')
@mock.patch("geotrek.feedback.parsers.logger")
def test_email_failed_logs_and_warns(self, mocked):
self.assertRaises(Exception, SuricateParser().send_managers_new_reports())
mocked.error.assert_called_with("Email could not be sent to managers.")
def test_email_format_and_content(self):
ReportFactory.create(email='<EMAIL>',
comment="This is a 'comment'")
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject,
'[Geotrek] Feedback from <EMAIL>')
self.assertIn("Comment : This is a 'comment'", sent_mail.body)
self.assertIn("Lat : 46.500000 / Lon : 3.000000", sent_mail.body)
def test_email_format_and_content_fr(self):
translation.activate('fr')
ReportFactory.create(email='<EMAIL>',
comment="Ceci est un commentaire")
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject,
'[Geotrek] Signalement de <EMAIL>')
self.assertIn("Commentaire : Ceci est un commentaire", sent_mail.body)
self.assertIn("Lat : 46.500000 / Lon : 3.000000", sent_mail.body)
self.assertIn("http://www.openstreetmap.org/?mlat=46.500000&mlon=3.000000", sent_mail.body)
translation.deactivate()
|
11453212
|
import numpy as np
import dlib
import glob
import cv2
import os
os.chdir('/media/imi-yujun/579e63e9-5852-43a7-9323-8e51241f5d3a/yujun/Course_porject_fac e')
def LoadBase():
predictor_path = 'Network/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
return predictor,detector
def SaveLandmark(shape):
tmp = np.zeros((68,2),dtype=np.uint)
for i in range(68):
tmp[i,0] = shape.part(i).x
tmp[i, 1] = shape.part(i).y
return tmp
def Run():
count =0
predictor, detector = LoadBase()
image_list = glob.glob('CoarseData/CoarseData/*/*.jpg')
for path_ in image_list:
image_= cv2.imread(path_)
#gray_= cv2.cvtColor(image_, cv2.COLOR_BGR2GRAY)
try:
print(count)
count += 1
# dets = detector(gray_, 1)
# shape = predictor(gray_, dets[0])
# tmp = SaveLandmark(shape)
# np.savetxt(path_[:len(path_)-4]+'_landmark.txt',tmp, fmt='%d')
# res = Normalize(image_,tmp)
# cv2.imwrite(path_[:len(path_)-4]+'_224.png',res)
#
landmark = np.loadtxt(path_[:len(path_)-4]+'_landmark.txt')
shape_, exp_, eular_, translate_, scale_ = Get3Dmm(path_[:len(path_)-4]+'.txt')
crop_image, translation, new_scale = Normalize2(image_,landmark,translate_,scale_)
cv2.imwrite(path_[:len(path_) - 4] + '_224.png', crop_image)
label = np.zeros([185])
label[:100] = shape_
label[100:179] = exp_
label[179:182] =eular_
label[182:184] = translate_
label[184] = scale_
np.savetxt(path_[:len(path_) - 4] + '_224.txt',label)
except:
continue
print("error")
def Normalize(image,landmark_):
xmin = np.min(landmark_[:,0])
xmax = np.max(landmark_[:,0])
ymin = np.min(landmark_[:,1])
ymax = np.max(landmark_[:,1])
sub_image = image[ymin:ymax,xmin:xmax]
res = cv2.resize(sub_image, (224,224), interpolation=cv2.INTER_LINEAR)
return res
def Package():
Save_path = 'Input/'
image_list = glob.glob('CoarseData/CoarseData/*/*_224.png')
data = np.zeros((len(image_list),224,224,3),dtype=np.uint8)
label = np.zeros((len(image_list),185))
for i in range(len(image_list)):
print(i)
path_ = image_list[i]
img = cv2.imread(path_)
# f = open(path_[:len(path_)-8]+'.txt')
# content = f.readlines()
# content= [x.strip() for x in content]
# label[i,:100] = np.array(content[0].split(" "),dtype = np.float)
# label[i,100:179] = np.array(content[1].split(" "),dtype = np.float)
# label[i,179:] = np.array(content[2].split(" "),dtype = np.float)
# f.close()
label[i,:] = np.loadtxt(path_[:len(path_)-8]+'_224.txt')
data[i,:,:] = np.array(img,dtype=np.uint8)
np.save(Save_path+'data.npy',data)
np.save(Save_path+'label.npy',label)
def Split():
test_num = 5000
data = np.load('Input/data.npy')
label= np.load('Input/label.npy')
train_data= data[test_num:,:]
test_data = data[:test_num,:]
test_label = label[:test_num,:]
train_label = label[test_num:,:]
np.save('Input/train_data.npy',train_data)
np.save('Input/mean_data.npy',np.mean(data,axis=0))
np.save('Input/mean_label.npy', np.mean(label, axis=0))
np.save('Input/test_data.npy',test_data)
np.save('Input/train_label.npy',train_label)
np.save('Input/test_label.npy',test_label)
np.save('Input/std_label.npy', np.std(label, axis=0))
net_img_size = 224
def Normalize2(image, landmark_, translation=np.array([0,0]), scale=0):
xmin = np.min(landmark_[:, 0])
xmax = np.max(landmark_[:, 0])
ymin = np.min(landmark_[:, 1])
ymax = np.max(landmark_[:, 1])
old_cx = (xmin + xmax) / 2
old_cy = (ymin + ymax) / 2;
cx = (net_img_size - 1) / 2.0
cy = (net_img_size - 1) * 2.0 / 5.0;
length = ((xmax - xmin) ** 2 + (ymax - ymin) ** 2) ** 0.5
length *= 1.2
ori_crop_scale = net_img_size / length
new_scale = scale * ori_crop_scale
image = cv2.resize(image, (0, 0), fx=ori_crop_scale, fy=ori_crop_scale)
old_cx = old_cx * ori_crop_scale
old_cy = old_cy * ori_crop_scale
start_x = int(old_cx - cx)
start_y = int(old_cy - cy)
crop_image = image[start_y:start_y + 224, start_x:start_x + 224]
shape_ = np.shape(crop_image)
tmp = np.zeros((224,224,3),dtype=np.uint8)
tmp[:shape_[0],:shape_[1],:] = crop_image
translation = translation * ori_crop_scale
translation[0] = translation[0] - start_x
translation[1] = translation[1] - (len(image) - 224-start_y)
# landmark_=landmark_*ori_crop_scale
# tmp = np.zeros((224,224),dtype=np.uint8)
# for i in range(68):
# tmp[ int(landmark_[i,1] - start_y),int(landmark_[i,0] - start_x) ] = 255;
# cv2.imwrite("landmarl.jpg",tmp)
return tmp, translation, new_scale
def Get3Dmm(path):
with open(path) as f:
dmm_para = f.readlines()
dmm_para = [x.strip() for x in dmm_para]
shape_ = np.array(dmm_para[0].split(), dtype=np.float)
exp_ = np.array(dmm_para[1].split(), dtype=np.float)
tmp = np.array(dmm_para[2].split(), dtype=np.float)
eular_ = tmp[:3]
translate_ = tmp[3:5]
scale_ = tmp[5]
return shape_,exp_,eular_,translate_,scale_
def Custom():
cap = cv2.VideoCapture('Input/gx.MOV')
sample_num = 150
M = cv2.getRotationMatrix2D((1920 / 2, 1080 / 2), 270, 1)
predictor, detector = LoadBase()
index_=0
data = np.zeros((sample_num,224,224,3))
for i in range(sample_num):
ret,frame = cap.read()
frame = cv2.warpAffine(frame, M, (1920, 1080))
gray_ = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dets = detector(gray_, 1)
shape = predictor(gray_, dets[0])
tmp = SaveLandmark(shape)
res,_,_ = Normalize2(frame,tmp)
data[i,:,:,:] = res
cv2.imwrite('tmp/gx/'+str(index_)+'.jpg', frame)
print(index_)
index_+=1
cap.release()
np.save("Input/gx.npy",data)
Custom()
# Run()
# Package()
# Split()
|
11453254
|
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_1ktrees_job_cancel_many_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [1000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
csvFilename = "parity_128_4_" + str(1000) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
print "kick off jobs, then cancel them"
for trial in range (1,5):
# random 0 or 1 delay
delay = random.uniform(0,1)
time.sleep(delay)
h2o.verboseprint("Trial", trial)
start = time.time()
h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, rfView=False, noPoll=True, timeoutSecs=30, retryDelaySecs=0.25)
print "RF #", trial, "started on ", csvFilename, 'took', time.time() - start, 'seconds'
### h2o_jobs.cancelAllJobs(timeoutSecs=10)
h2o.check_sandbox_for_errors()
# do one last good one
rfView = h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, timeoutSecs=600, retryDelaySecs=3)
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=trial)
if __name__ == '__main__':
h2o.unit_main()
|
11453266
|
import FWCore.ParameterSet.Config as cms
def customizeHLTforMC(process):
"""adapt the HLT to run on MC, instead of data
see Configuration/StandardSequences/Reconstruction_Data_cff.py
which does the opposite, for RECO"""
# PFRecHitProducerHCAL
if 'hltParticleFlowRecHitHCAL' in process.__dict__:
process.hltParticleFlowRecHitHCAL.ApplyPulseDPG = cms.bool(False)
process.hltParticleFlowRecHitHCAL.LongShortFibre_Cut = cms.double(1000000000.0)
# customise hltHbhereco to use the Method 3 time slew parametrization and response correction for Monte Carlo (PR #11091)
if 'hltHbhereco' in process.__dict__:
if process.hltHbhereco._TypedParameterizable__type == 'HcalHitReconstructor':
# 2015-2016 Run 2
process.hltHbhereco.pedestalSubtractionType = cms.int32( 1 )
process.hltHbhereco.pedestalUpperLimit = cms.double( 2.7 )
process.hltHbhereco.timeSlewParsType = cms.int32( 3 )
# new time slew parametrisation
process.hltHbhereco.timeSlewPars = cms.vdouble( 12.2999, -2.19142, 0, 12.2999, -2.19142, 0, 12.2999, -2.19142, 0 )
# old response correction, matching the 2015D 25ns data
process.hltHbhereco.respCorrM3 = cms.double( 1.0 )
elif process.hltHbhereco._TypedParameterizable__type == 'HBHEPhase1Reconstructor':
# 2017 "plan 0"
process.hltHbhereco.algorithm.respCorrM3 = cms.double( 1.0 )
if 'hltHbhePhase1Reco' in process.__dict__:
if process.hltHbhePhase1Reco._TypedParameterizable__type == 'HBHEPhase1Reconstructor':
# 2017 "plan 1"
# assume retuning the pulse shapes will give the same response
# in data and MC for Method 2 and Method 3
pass
return process
|
11453268
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import numpy as np
import cv2
from functools import partial
def get_image(path, torch_img=True):
"""
if torch_img == True:
return image that can be directly used by our CNN model
else:
return numpy resize image
"""
img = cv2.imread(path)
img = cv2.resize(img, (224, 224))
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if torch_img:
img = transform(img)
img.unsqueeze_(0)
return img
def store_feature(model):
def hook(module, input, output, key):
if isinstance(module, nn.MaxPool2d):
# we used return_inde=True when defining our MaxPool layers
# so the first output is the output of MaxPool layer and
# the second output is the indices of the max_locations and
# we store it in a pool_locs variable that we would give to
# the deconvnet model.
model.feature_maps[key] = output[0]
model.pool_locs[key] = output[1]
else:
model.feature_maps[key] = output
for idx, layer in enumerate(model._module.get('features')):
layer.register_forward_hook(partial(hook, key=idx))
def visualize_layer(layer, vgg, vgg_deconv, top_k=9):
"""
We are given a layer, say 5, and we want to get the activations for one of
it's channels/features. By default, I use batch_size=1 for getting the
visualizations and I use CPU for the computations.
Arguments:
layer :- the layer number of which we want to get visulizations (should
be conv layer index)
vgg :- the model returned by get_vgg()
vgg_deconv :- the model returned by get_vgg_deconv()
top_k :- the number of top activations that you want
"""
# Num channels/features in the layer (batch_size, num_channels, height, weidth)
num_feat = vgg.feature_maps[layer].shape[1]
# Create clone of feature map for easy changes
feat_map = vgg.feature_maps[layer].clone()
# Now we want the feature map with maximu activation, which is simply done by
# finding the maximum value in the tensor
activations = []
for i in range(num_feat):
# Sequentially traverse all the feature maps and append the maximum value
# in the feature map to the activations list.
map = feat_map[0, i, :, :]
activation = torch.max(map)
activations.append(activation.item())
# Get the indices for the top_k activations
activations = np.array(activations)
max_activation_idxs = activations.argsort()[-1:-top_k-1:-1]
# Store the visualizations in a list
images = []
for i in max_activation_idxs:
current_map = feat_map[:, i, :, :]
current_max_activation = torch.max(current_map)
tmp_feat_map = vgg.featrue_maps[layer].clone()
# We zero out all the other activation maps/channels except the one we want
# to get the visulizationsf for
if i == 0:
tmp_feat_map[:, 1:, :, :] = 0
else:
tmp_feat_map[:, :i, :, :] = 0
if i != vgg.feature_maps[layer].shape[1] - 1:
tmp_feat_map[:, i+1:, :, :] = 0
# For the current selected activation map, zero out all the non-max values
# and store it the tmp_feat_map array
current_map = torch.where(current_map == current_max_activation,
current_map,
torch.zeros(current_map.shape))
tmp_feat_map[0, i, :, :] = current_map
# Run the deconv model
deconv_output = vgg_deconv(tmp_feat_map, layer, vgg.pool_locs)
# Get the image
img = deconv_output.data.numpy()[0].transpose(1, 2, 0)
img = (img - img.min()) / (img.max() - img.min()) * 255
img = img.astype(np.uint8)
images.append(img, int(current_max_activation))
return images
|
11453284
|
from __future__ import absolute_import, unicode_literals
import logging
import imagehash
import numpy as np
import pytest
from PIL.Image import Image
from psd_tools.api.psd_image import PSDImage
from ..utils import full_name
logger = logging.getLogger(__name__)
QUALITY_TEST_FILES = [
# ('mask-index.psd',), # Transparent region in preview image is wrong...
(
'background-red-opacity-80.psd', ),
('32bit.psd', ),
]
def _calculate_hash_error(image1, image2):
assert isinstance(image1, Image)
assert isinstance(image2, Image)
hash1 = imagehash.average_hash(image1)
hash2 = imagehash.average_hash(image2)
error_count = np.sum(np.bitwise_xor(hash1.hash, hash2.hash))
return error_count / float(hash1.hash.size)
@pytest.mark.parametrize(("filename", ), QUALITY_TEST_FILES)
def test_compose_quality(filename):
psd = PSDImage.open(full_name(filename))
preview = psd.topil()
rendered = psd.compose(force=True)
assert _calculate_hash_error(preview, rendered) <= 0.1
@pytest.mark.parametrize('filename', [
'smartobject-layer.psd',
'type-layer.psd',
'gradient-fill.psd',
'shape-layer.psd',
'pixel-layer.psd',
'solid-color-fill.psd',
'pattern-fill.psd',
])
def test_compose_minimal(filename):
source = PSDImage.open(full_name('layers-minimal/' + filename)).compose()
reference = PSDImage.open(full_name('layers/' + filename)).compose(True)
assert _calculate_hash_error(source, reference) <= 0.172
@pytest.mark.parametrize('colormode, depth', [
('cmyk', 8),
('duotone', 8),
('grayscale', 8),
('index_color', 8),
('rgb', 8),
('lab', 8),
('cmyk', 16),
('grayscale', 16),
('multichannel', 16),
('lab', 16),
('rgb', 16),
('grayscale', 32),
('rgb', 32),
])
def test_compose_colormodes(colormode, depth):
filename = 'colormodes/4x4_%gbit_%s.psd' % (depth, colormode)
psd = PSDImage.open(full_name(filename))
assert isinstance(psd.compose(), Image)
|
11453291
|
import re
from six import string_types # for 2-3 compatibility
import types
from numbers import Number
core_types = [ ]
class SchemaError(Exception):
pass
class SchemaMismatch(Exception):
pass
class SchemaTypeMismatch(SchemaMismatch):
def __init__(self, name, desired_type):
SchemaMismatch.__init__(self, '{0} must be {1}'.format(name, desired_type))
class SchemaValueMismatch(SchemaMismatch):
def __init__(self, name, value):
SchemaMismatch.__init__(self, '{0} must equal {1}'.format(name, value))
class SchemaRangeMismatch(SchemaMismatch):
pass
def indent(text, level=1, whitespace=' '):
return '\n'.join(whitespace*level+line for line in text.split('\n'))
class Util(object):
@staticmethod
def make_range_check(opt):
if not {'min', 'max', 'min-ex', 'max-ex'}.issuperset(opt):
raise ValueError("illegal argument to make_range_check")
if {'min', 'min-ex'}.issubset(opt):
raise ValueError("Cannot define both exclusive and inclusive min")
if {'max', 'max-ex'}.issubset(opt):
raise ValueError("Cannot define both exclusive and inclusive max")
r = opt.copy()
inf = float('inf')
def check_range(value):
return(
r.get('min', -inf) <= value and \
r.get('max', inf) >= value and \
r.get('min-ex', -inf) < value and \
r.get('max-ex', inf) > value
)
return check_range
@staticmethod
def make_range_validator(opt):
check_range = Util.make_range_check(opt)
r = opt.copy()
nan = float('nan')
def validate_range(value, name='value'):
if not check_range(value):
if r.get('min', nan) == r.get('max', nan):
msg = '{0} must equal {1}'.format(name, r['min'])
raise SchemaRangeMismatch(msg)
range_str = ''
if 'min' in r:
range_str = '[{0}, '.format(r['min'])
elif 'min-ex' in r:
range_str = '({0}, '.format(r['min-ex'])
else:
range_str = '(-inf, '
if 'max' in r:
range_str += '{0}]'.format(r['max'])
elif 'max-ex' in r:
range_str += '{0})'.format(r['max-ex'])
else:
range_str += 'inf)'
raise SchemaRangeMismatch(name+' must be in range '+range_str)
return validate_range
class Factory(object):
def __init__(self, register_core_types=True):
self.prefix_registry = {
'': 'tag:codesimply.com,2008:rx/core/',
'.meta': 'tag:codesimply.com,2008:rx/meta/',
}
self.type_registry = {}
if register_core_types:
for t in core_types: self.register_type(t)
@staticmethod
def _default_prefixes(): pass
def expand_uri(self, type_name):
if re.match('^\w+:', type_name): return type_name
m = re.match('^/([-._a-z0-9]*)/([-._a-z0-9]+)$', type_name)
if not m:
raise ValueError("couldn't understand type name '{0}'".format(type_name))
prefix, suffix = m.groups()
if prefix not in self.prefix_registry:
raise KeyError(
"unknown prefix '{0}' in type name '{1}'".format(prefix, type_name)
)
return self.prefix_registry[ prefix ] + suffix
def add_prefix(self, name, base):
if self.prefix_registry.get(name):
raise SchemaError("the prefix '{0}' is already registered".format(name))
self.prefix_registry[name] = base;
def register_type(self, t):
t_uri = t.uri()
if t_uri in self.type_registry:
raise ValueError("type already registered for {0}".format(t_uri))
self.type_registry[t_uri] = t
def learn_type(self, uri, schema):
if self.type_registry.get(uri):
raise SchemaError("tried to learn type for already-registered uri {0}".format(uri))
# make sure schema is valid
# should this be in a try/except?
self.make_schema(schema)
self.type_registry[uri] = { 'schema': schema }
def make_schema(self, schema):
if isinstance(schema, string_types):
schema = { 'type': schema }
if not isinstance(schema, dict):
raise SchemaError('invalid schema argument to make_schema')
uri = self.expand_uri(schema['type'])
if not self.type_registry.get(uri): raise SchemaError("unknown type {0}".format(uri))
type_class = self.type_registry[uri]
if isinstance(type_class, dict):
if not {'type'}.issuperset(schema):
raise SchemaError('composed type does not take check arguments');
return self.make_schema(type_class['schema'])
else:
return type_class(schema, self)
class _CoreType(object):
@classmethod
def uri(self):
return 'tag:codesimply.com,2008:rx/core/' + self.subname()
def __init__(self, schema, rx):
if not {'type'}.issuperset(schema):
raise SchemaError('unknown parameter for //{0}'.format(self.subname()))
def check(self, value):
try:
self.validate(value)
except SchemaMismatch:
return False
return True
def validate(self, value, name='value'):
raise SchemaMismatch('Tried to validate abstract base schema class')
class AllType(_CoreType):
@staticmethod
def subname(): return 'all'
def __init__(self, schema, rx):
if not {'type', 'of'}.issuperset(schema):
raise SchemaError('unknown parameter for //all')
if not(schema.get('of') and len(schema.get('of'))):
raise SchemaError('no alternatives given in //all of')
self.alts = [rx.make_schema(s) for s in schema['of']]
def validate(self, value, name='value'):
error_messages = []
for schema in self.alts:
try:
schema.validate(value, name)
except SchemaMismatch as e:
error_messages.append(str(e))
if len(error_messages) > 1:
messages = indent('\n'.join(error_messages))
message = '{0} failed to meet all schema requirements:\n{1}'
message = message.format(name, messages)
raise SchemaMismatch(message)
elif len(error_messages) == 1:
raise SchemaMismatch(error_messages[0])
class AnyType(_CoreType):
@staticmethod
def subname(): return 'any'
def __init__(self, schema, rx):
self.alts = None
if not {'type', 'of'}.issuperset(schema):
raise SchemaError('unknown parameter for //any')
if 'of' in schema:
if not schema['of']: raise SchemaError('no alternatives given in //any of')
self.alts = [ rx.make_schema(alt) for alt in schema['of'] ]
def validate(self, value, name='value'):
if self.alts is None:
return
error_messages = []
for schema in self.alts:
try:
schema.validate(value, name)
break
except SchemaMismatch as e:
error_messages.append(str(e))
if len(error_messages) == len(self.alts):
messages = indent('\n'.join(error_messages))
message = '{0} failed to meet any schema requirements:\n{1}'
message = message.format(name, messages)
raise SchemaMismatch(message)
class ArrType(_CoreType):
@staticmethod
def subname(): return 'arr'
def __init__(self, schema, rx):
self.length = None
if not {'type', 'contents', 'length'}.issuperset(schema):
raise SchemaError('unknown parameter for //arr')
if not schema.get('contents'):
raise SchemaError('no contents provided for //arr')
self.content_schema = rx.make_schema(schema['contents'])
if schema.get('length'):
self.length = Util.make_range_validator(schema['length'])
def validate(self, value, name='value'):
if not isinstance(value, (list, tuple)):
raise SchemaTypeMismatch(name, 'array')
if self.length:
self.length(len(value), name+' length')
error_messages = []
for i, item in enumerate(value):
try:
self.content_schema.validate(item, 'item '+str(i))
except SchemaMismatch as e:
error_messages.append(str(e))
if len(error_messages) > 1:
messages = indent('\n'.join(error_messages))
message = '{0} sequence contains invalid elements:\n{1}'
message = message.format(name, messages)
raise SchemaMismatch(message)
elif len(error_messages) == 1:
raise SchemaMismatch(name+': '+error_messages[0])
class BoolType(_CoreType):
@staticmethod
def subname(): return 'bool'
def validate(self, value, name='value'):
if not isinstance(value, bool):
raise SchemaTypeMismatch(name, 'boolean')
class DefType(_CoreType):
@staticmethod
def subname(): return 'def'
def validate(self, value, name='value'):
if value is None:
raise SchemaMismatch(name+' must be non-null')
class FailType(_CoreType):
@staticmethod
def subname(): return 'fail'
def check(self, value): return False
def validate(self, value, name='value'):
raise SchemaMismatch(name+' is of fail type, automatically invalid.')
class IntType(_CoreType):
@staticmethod
def subname(): return 'int'
def __init__(self, schema, rx):
if not {'type', 'range', 'value'}.issuperset(schema):
raise SchemaError('unknown parameter for //int')
self.value = None
if 'value' in schema:
if not isinstance(schema['value'], Number) or schema['value'] % 1 != 0:
raise SchemaError('invalid value parameter for //int')
self.value = schema['value']
self.range = None
if 'range' in schema:
self.range = Util.make_range_validator(schema['range'])
def validate(self, value, name='value'):
if not isinstance(value, Number) or isinstance(value, bool) or value%1:
raise SchemaTypeMismatch(name,'integer')
if self.range:
self.range(value, name)
if self.value is not None and value != self.value:
raise SchemaValueMismatch(name, self.value)
class MapType(_CoreType):
@staticmethod
def subname(): return 'map'
def __init__(self, schema, rx):
self.allowed = set()
if not {'type', 'values'}.issuperset(schema):
raise SchemaError('unknown parameter for //map')
if not schema.get('values'):
raise SchemaError('no values given for //map')
self.value_schema = rx.make_schema(schema['values'])
def validate(self, value, name='value'):
if not isinstance(value, dict):
raise SchemaTypeMismatch(name, 'map')
error_messages = []
for key, val in value.items():
try:
self.value_schema.validate(val, key)
except SchemaMismatch as e:
error_messages.append(str(e))
if len(error_messages) > 1:
messages = indent('\n'.join(error_messages))
message = '{0} map contains invalid entries:\n{1}'
message = message.format(name, messages)
raise SchemaMismatch(message)
elif len(error_messages) == 1:
raise SchemaMismatch(name+': '+error_messages[0])
class NilType(_CoreType):
@staticmethod
def subname(): return 'nil'
def check(self, value): return value is None
def validate(self, value, name='value'):
if value is not None:
raise SchemaTypeMismatch(name, 'null')
class NumType(_CoreType):
@staticmethod
def subname(): return 'num'
def __init__(self, schema, rx):
if not {'type', 'range', 'value'}.issuperset(schema):
raise SchemaError('unknown parameter for //num')
self.value = None
if 'value' in schema:
if not isinstance(schema['value'], Number):
raise SchemaError('invalid value parameter for //num')
self.value = schema['value']
self.range = None
if schema.get('range'):
self.range = Util.make_range_validator(schema['range'])
def validate(self, value, name='value'):
if not isinstance(value, Number) or isinstance(value, bool):
raise SchemaTypeMismatch(name, 'number')
if self.range:
self.range(value, name)
if self.value is not None and value != self.value:
raise SchemaValueMismatch(name, self.value)
class OneType(_CoreType):
@staticmethod
def subname(): return 'one'
def validate(self, value, name='value'):
if not isinstance(value, (Number, string_types)):
raise SchemaTypeMismatch(name, 'number or string')
class RecType(_CoreType):
@staticmethod
def subname(): return 'rec'
def __init__(self, schema, rx):
if not {'type', 'rest', 'required', 'optional'}.issuperset(schema):
raise SchemaError('unknown parameter for //rec')
self.known = set()
self.rest_schema = None
if schema.get('rest'): self.rest_schema = rx.make_schema(schema['rest'])
for which in ('required', 'optional'):
setattr(self, which, {})
for field in schema.get(which, {}).keys():
if field in self.known:
raise SchemaError('%s appears in both required and optional' % field)
self.known.add(field)
self.__getattribute__(which)[field] = rx.make_schema(
schema[which][field]
)
def validate(self, value, name='value'):
if not isinstance(value, dict):
raise SchemaTypeMismatch(name, 'record')
unknown = [k for k in value.keys() if k not in self.known]
if unknown and not self.rest_schema:
fields = indent('\n'.join(unknown))
raise SchemaMismatch(name+' contains unknown fields:\n'+fields)
error_messages = []
for field in self.required:
try:
if field not in value:
raise SchemaMismatch('missing required field: '+field)
self.required[field].validate(value[field], field)
except SchemaMismatch as e:
error_messages.append(str(e))
for field in self.optional:
if field not in value: continue
try:
self.optional[field].validate(value[field], field)
except SchemaMismatch as e:
error_messages.append(str(e))
if unknown:
rest = {key: value[key] for key in unknown}
try:
self.rest_schema.validate(rest, name)
except SchemaMismatch as e:
error_messages.append(str(e))
if len(error_messages) > 1:
messages = indent('\n'.join(error_messages))
message = '{0} record is invalid:\n{1}'
message = message.format(name, messages)
raise SchemaMismatch(message)
elif len(error_messages) == 1:
raise SchemaMismatch(name+': '+error_messages[0])
class SeqType(_CoreType):
@staticmethod
def subname(): return 'seq'
def __init__(self, schema, rx):
if not {'type', 'contents', 'tail'}.issuperset(schema):
raise SchemaError('unknown parameter for //seq')
if not schema.get('contents'):
raise SchemaError('no contents provided for //seq')
self.content_schema = [ rx.make_schema(s) for s in schema['contents'] ]
self.tail_schema = None
if (schema.get('tail')):
self.tail_schema = rx.make_schema(schema['tail'])
def validate(self, value, name='value'):
if not isinstance(value, (list, tuple)):
raise SchemaTypeMismatch(name, 'sequence')
if len(value) < len(self.content_schema):
raise SchemaMismatch(name+' is less than expected length')
if len(value) > len(self.content_schema) and not self.tail_schema:
raise SchemaMismatch(name+' exceeds expected length')
error_messages = []
for i, (schema, item) in enumerate(zip(self.content_schema, value)):
try:
schema.validate(item, 'item '+str(i))
except SchemaMismatch as e:
error_messages.append(str(e))
if len(error_messages) > 1:
messages = indent('\n'.join(error_messages))
message = '{0} sequence is invalid:\n{1}'
message = message.format(name, messages)
raise SchemaMismatch(message)
elif len(error_messages) == 1:
raise SchemaMismatch(name+': '+error_messages[0])
if len(value) > len(self.content_schema):
self.tail_schema.validate(value[len(self.content_schema):], name)
class StrType(_CoreType):
@staticmethod
def subname(): return 'str'
def __init__(self, schema, rx):
if not {'type', 'value', 'length'}.issuperset(schema):
raise SchemaError('unknown parameter for //str')
self.value = None
if 'value' in schema:
if not isinstance(schema['value'], string_types):
raise SchemaError('invalid value parameter for //str')
self.value = schema['value']
self.length = None
if 'length' in schema:
self.length = Util.make_range_validator(schema['length'])
def validate(self, value, name='value'):
if not isinstance(value, string_types):
raise SchemaTypeMismatch(name, 'string')
if self.value is not None and value != self.value:
raise SchemaValueMismatch(name, '"{0}"'.format(self.value))
if self.length:
self.length(len(value), name+' length')
core_types = [
AllType, AnyType, ArrType, BoolType, DefType,
FailType, IntType, MapType, NilType, NumType,
OneType, RecType, SeqType, StrType
]
|
11453337
|
from .arrayfuncs import (
reshape_1d_to_2d,
convert_sparse_matrix,
)
from .exceptions import (
DimensionalityError,
UnknownCategoryError,
NotInstalledError,
NotBucketedError,
NotPreBucketedError,
NotBucketObjectError,
BucketingPipelineError,
BucketerTypeError,
)
from .dataframe import detect_types
__all__ = [
"reshape_1d_to_2d",
"convert_sparse_matrix",
"DimensionalityError",
"UnknownCategoryError",
"NotInstalledError",
"NotBucketObjectError",
"detect_types",
"NotBucketedError",
"NotPreBucketedError",
"BucketingPipelineError",
"BucketerTypeError",
]
|
11453344
|
from django.db import models
from django.template.defaultfilters import slugify
from datetime import datetime
# Create your views here.
class Doctorant(models.Model):
nationaliter = models.CharField(max_length=50,null=False)
nom = models.CharField(max_length=50,null=False)
prenom = models.CharField(max_length=50,null=False)
sexe = models.CharField(max_length=10,null=False)
date_naissance = models.DateField()
lieu_naissance = models.CharField(max_length=100,null=False)
addresse = models.CharField(max_length=100,null=False)
email = models.CharField(max_length=50,null=False)
telephone = models.CharField(max_length=15,null=False)
nom_prenom_mere = models.CharField(max_length=50,null=False)
nom_pere = models.CharField(max_length=50,null=False)
password = models.CharField(max_length=50,null=False)
accepted = models.BooleanField(default=False)
slug = models.SlugField()
def __str__(self):
return self.nom
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.nom)
super(Doctorant, self).save(*args, **kwargs)
class Meta:
ordering = ['nom']
class Module(models.Model):
nom = models.CharField(max_length=50)
niveau = models.CharField(max_length=10)
slug = models.SlugField()
def __str__(self):
return self.nom
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.nom)
super(Module, self).save(*args, **kwargs)
class Meta:
ordering = ['nom']
class Recours(models.Model):
doctorant = models.ForeignKey(Doctorant, related_name='recours', on_delete=models.CASCADE)
sujet = models.CharField(max_length=50)
message = models.CharField(max_length=2550)
accepted = models.BooleanField(default=False)
class Meta:
unique_together = ('doctorant', 'sujet')
ordering = ['sujet']
def __unicode__(self):
return '%s: %s' % (self.sujet, self.message)
class Sujet(models.Model):
titre = models.CharField(max_length=50)
description = models.TextField()
accepted = models.BooleanField(default=False)
slug = models.SlugField()
def __str__(self):
return self.titre
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.titre)
super(Sujet, self).save(*args, **kwargs)
class Meta:
ordering = ['titre']
class Reinscription(models.Model):
doctorant = models.ForeignKey(Doctorant, related_name='reinscriptions', on_delete=models.CASCADE)
intitulerPostGrade = models.CharField(max_length=100)
intitulerSujet = models.CharField(max_length=100)
diplomeGraduation = models.CharField(max_length=250)
nomEncadreur = models.CharField(max_length=100)
gradeEncadreur = models.CharField(max_length=100)
nomCoEncadreur = models.CharField(max_length=100)
gradeCoEncadreur = models.CharField(max_length=100)
dateReinscription = models.DateField(auto_now_add=True)
slug = models.SlugField()
def __str__(self):
return self.intitulerSujet
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.id)
super(Reinscription, self).save(*args, **kwargs)
class Meta:
ordering = ['intitulerPostGrade']
class Inscription(models.Model):
doctorant = models.ForeignKey(Doctorant, related_name='inscriptions', on_delete=models.CASCADE)
intitulerPostGrade = models.CharField(max_length=100)
intitulerSujet = models.CharField(max_length=100)
diplomeGraduation = models.CharField(max_length=250)
nomEncadreur = models.CharField(max_length=100)
gradeEncadreur = models.CharField(max_length=100)
nomCoEncadreur = models.CharField(max_length=100)
gradeCoEncadreur = models.CharField(max_length=100)
dateInscription = models.DateField(auto_now_add=True)
slug = models.SlugField()
def __str__(self):
return self.intitulerSujet
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.id)
super(Inscription, self).save(*args, **kwargs)
class Meta:
ordering = ['intitulerPostGrade']
class Enseignant(models.Model):
nom = models.CharField(max_length=50,null=False)
prenom = models.CharField(max_length=50,null=False)
sexe = models.CharField(max_length=10,null=False)
date_naissance = models.DateField()
lieu_naissance = models.CharField(max_length=100,null=False)
addresse = models.CharField(max_length=100,null=False)
email = models.CharField(max_length=50,null=False)
telephone = models.CharField(max_length=15,null=False)
password = models.CharField(max_length=50,null=False)
grade = models.CharField(max_length=50,null=False)
slug = models.SlugField()
def __str__(self):
return self.nom
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.nom)
super(Enseignant, self).save(*args, **kwargs)
class Meta:
ordering = ['nom']
class PassageGrade(models.Model):
enseignant = models.ForeignKey(Enseignant, related_name='passagegrades', on_delete=models.CASCADE)
gradeVoulu = models.CharField(max_length=50)
argument = models.TextField()
class Meta:
unique_together = ('enseignant', 'gradeVoulu')
ordering = ['gradeVoulu']
def __unicode__(self):
return '%s: %s' % (self.gradeVoulu, self.argument)
|
11453383
|
import os
import pdb
from buche import BucheDb
global_interactor = None
if os.environ.get("BUCHE"):
class BuDb(BucheDb):
def __init__(self, **kw):
super().__init__(None)
self.interactor = global_interactor
def set_trace(self, frame=None):
self.interactor.show(synchronous=True)
self.repl = self.interactor.repl
super().set_trace(frame)
def interaction(self, frame, tb):
self.interactor.show(synchronous=True)
self.repl = self.interactor.repl
super().interaction(frame, tb)
else:
BuDb = pdb.Pdb
|
11453412
|
import sys
from collections import *
files=[("base","results/marvin_results_base.txt"),("large","results/marvin_results_large.txt")]
by_model={}
conditions=set()
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
next(lines)
continue
res,c1,c2,w1,w2,s = line.split(None, 5)
c1 = c1.replace("inanim","anim")
conditions.add(c1)
results[c1][res]+=1
print("skipped:",skipped)
print("condition & base & large & count \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
print(" & ".join(map(str,[cond, sb, sl, sum(rb.values())])),"\\\\")
|
11453439
|
import time
import numpy as np
import pinocchio as pin
from numpy.linalg import det, eig, inv, norm, pinv, svd
import vizutils
# COST 3D #####################################################################
class Cost3d:
def __init__(self, rmodel, rdata, frameIndex=None, ptarget=None, viz=None):
self.rmodel = rmodel
self.rdata = rdata
self.ptarget = ptarget if ptarget is not None else np.array([0.5, 0.1, 0.27])
self.frameIndex = frameIndex if frameIndex is not None else robot.model.nframes - 1
self.viz = viz
def residual(self, q):
pin.framesForwardKinematics(self.rmodel, self.rdata, q)
M = self.rdata.oMf[self.frameIndex]
return M.translation - self.ptarget
def calc(self, q):
return sum(self.residual(q) ** 2)
# --- Callback
def callback(self, q):
if self.viz is None:
return
pin.framesForwardKinematics(self.rmodel, self.rdata, q)
M = self.rdata.oMf[self.frameIndex]
vizutils.applyViewerConfiguration(self.viz, 'world/ball', pin.SE3ToXYZQUATtuple(M))
vizutils.applyViewerConfiguration(self.viz, 'world/box', self.ptarget.tolist() + [1, 0, 0, 0])
self.viz.display(q)
time.sleep(1e-2)
def calcDiff(self, q):
pin.framesForwardKinematics(self.rmodel, self.rdata, q)
pin.computeJointJacobians(self.rmodel, self.rdata, q)
M = self.rdata.oMf[self.frameIndex]
J = pin.getFrameJacobian(self.rmodel, self.rdata, self.frameIndex, pin.LOCAL_WORLD_ALIGNED)[:3, :]
return 2 * J.T @ (M.translation - self.ptarget)
# COST 6D #####################################################################
class Cost6d:
def __init__(self, rmodel, rdata, frameIndex=None, Mtarget=None, viz=None):
self.rmodel = rmodel
self.rdata = rdata
self.Mtarget = Mtarget if Mtarget is not None else pin.SE3(pin.utils.rotate('x', np.pi / 4),
np.array([0.5, 0.1, 0.27])) # x, y, z
self.frameIndex = frameIndex if frameIndex is not None else robot.model.nframes-1
self.viz = viz
def residual(self, q):
'''Compute score from a configuration'''
pin.forwardKinematics(self.rmodel, self.rdata, q)
M = pin.updateFramePlacement(self.rmodel, self.rdata, self.frameIndex)
self.deltaM = self.Mtarget.inverse() * M
return pin.log(self.deltaM).vector
def calc(self, q):
return sum(self.residual(q) ** 2)
def callback(self, q):
if self.viz is None:
return
vizutils.applyViewerConfiguration(self.viz, 'world/ball', pin.SE3ToXYZQUATtuple(M))
vizutils.applyViewerConfiguration(self.viz, 'world/box', pin.SE3ToXYZQUATtuple(self.Mtarget))
self.viz.display(q)
time.sleep(1e-2)
def calcDiff(self, q):
J = pin.computeFrameJacobian(self.rmodel, self.rdata, q, self.frameIndex)
r = self.residual(q)
Jlog = pin.Jlog6(self.deltaM)
return 2 * J.T @ Jlog.T @ r
# COST Posture #####################################################################
class CostPosture:
def __init__(self, rmodel, rdata, qref=None, viz=None):
# viz, rmodel and rdata are taken to respect the API but are not useful.
self.qref = qref if qref is not None else pin.randomConfiguration(rmodel)
self.removeFreeFlyer = robot.model.joints[1].nq == 7 # Don't penalize the free flyer if any.
def residual(self, q):
if self.removeFreeFlyer:
return (q - self.qref)[7:]
else:
return q - self.qref
def calc(self, q):
return sum(self.residual(q) ** 2)
def calcDiff(self, q):
if self.removeFreeFlyer:
g = np.zeros(robot.model.nv)
g[6:] = 2 * self.residual(q)
return g
else:
return 2 * self.residual(q)
# COST Gravity #####################################################################
class CostGravity:
def __init__(self, rmodel, rdata, viz=None):
self.rmodel = rmodel
self.rdata = rdata
self.viz = viz
def residual(self, q):
return pin.computeGeneralizedGravity(self.rmodel, self.rdata, q)
def calc(self, q):
return sum(self.residual(q) ** 2)
def calcDiff(self, q):
g = self.residual(q)
G = pin.computeGeneralizedGravityDerivatives(self.rmodel, self.rdata, q)
return 2 * G.T @ g
# COST Weighted Gravity #############################################################
class CostWeightedGravity:
"""
return g.T*inv(M)*g = g(q).T * aba(q, 0, 0) = rnea(q, 0, 0).T*aba(q, 0, 0)
"""
def __init__(self, rmodel, rdata, viz=None):
self.rmodel = rmodel
self.rdata = rdata
self.viz = viz
self.v0 = np.zeros(self.rmodel.nv) # for convenience in the evaluation
def calc(self, q):
g = pin.computeGeneralizedGravity(self.rmodel, self.rdata, q)
taugrav = -pin.aba(self.rmodel, self.rdata, q, self.v0, self.v0)
return np.dot(taugrav, g)
def calcDiff(self, q):
pin.computeABADerivatives(self.rmodel, self.rdata, q, self.v0, self.v0)
pin.computeRNEADerivatives(self.rmodel, self.rdata, q, self.v0, self.v0)
return -self.rdata.dtau_dq.T @ self.rdata.ddq - self.rdata.ddq_dq.T @ self.rdata.tau
# COST Posture (renewed) ###########################################################
class CostPostureDiff:
def __init__(self, rmodel, rdata, qref=None, viz=None):
self.rmodel = rmodel
self.qref = qref if qref is not None else pin.randomConfiguration(rmodel)
def residual(self, q):
return pin.difference(self.rmodel, q, self.qref)
def calc(self, q):
return sum(self.residual(q) ** 2)
def calcDiff(self, q):
J, _ = pin.dDifference(self.rmodel, q, self.qref)
return 2 * J.T @ self.residual(q)
# TESTS ###
# TESTS ###
# TESTS ###
if __name__ == "__main__":
import example_robot_data as robex
import copy
def Tdiff1(func, exp, nv, q, eps=1e-6):
'''
Num diff for a function whose input is in a manifold
'''
f0 = copy.copy(func(q))
fs = []
v = np.zeros(nv)
for k in range(nv):
v[k] = eps
qk = exp(q, v)
fs.append((func(qk)-f0)/eps)
v[k] -= eps
if isinstance(fs[0], np.ndarray) and len(fs[0]) > 1:
return np.stack(fs, axis=1)
else:
return np.array(fs)
# Num diff checking, for each cost.
robot = robex.loadTalos()
q = pin.randomConfiguration(robot.model)
# Tdiffq is used to compute the tangent application in the configuration space.
def Tdiffq(f, q):
return Tdiff1(f, lambda q, v: pin.integrate(robot.model, q, v), robot.model.nv, q)
# Test Cost3d
CostClass = Cost3d
cost = CostClass(robot.model, robot.data)
Tg = cost.calcDiff(q)
Tgn = Tdiffq(cost.calc, q)
assert norm(Tg - Tgn) < 1e-4
# Test Cost6d
CostClass = Cost6d
cost = CostClass(robot.model, robot.data)
Tg = cost.calcDiff(q)
Tgn = Tdiffq(cost.calc, q)
assert norm(Tg - Tgn) < 1e-4
# Test CostPosture
CostClass = CostPosture
cost = CostClass(robot.model, robot.data)
Tg = cost.calcDiff(q)
Tgn = Tdiffq(cost.calc, q)
assert norm(Tg - Tgn) < 1e-4
### Test CostGravity
CostClass = CostGravity
cost = CostClass(robot.model, robot.data)
Tg = cost.calcDiff(q)
Tgn = Tdiffq(cost.calc, q)
assert norm(Tg - Tgn) / cost.calc(q) < 1e-4
# Test CostWeightedGravity
CostClass = CostWeightedGravity
cost = CostClass(robot.model, robot.data)
Tg = cost.calcDiff(q)
Tgn = Tdiffq(cost.calc, q)
assert norm(Tg - Tgn) / cost.calc(q) < 1e-4
# Test CostPostureDiff
CostClass = CostPostureDiff
cost = CostClass(robot.model, robot.data)
Tg = cost.calcDiff(q)
Tgn = Tdiffq(cost.calc, q)
assert norm(Tg - Tgn) / cost.calc(q) < 1e-4
|
11453449
|
import ast
from pyflakes.checker import Checker as PyFlakesChecker
from wemake_python_styleguide.checker import Checker
code_that_breaks = '''
def current_session(
telegram_id: int,
for_update: bool = True,
) -> TelegramSession:
"""
Was triggering `AttributeError`.
See: https://github.com/wemake-services/wemake-python-styleguide/issues/112
"""
try:
query = TelegramSession.objects.all()
if for_update: # Try to comment this `if` to fix everything
query = query.select_for_update()
return query.get(
uid=telegram_id,
is_verified=True,
)
except TelegramSession.DoesNotExist:
raise AuthenticationException('Session is missing')
'''
def test_regression112(default_options):
"""
There was a conflict between ``pyflakes`` and our plugin.
We were fighting for ``parent`` property.
Now we use a custom prefix.
See: https://github.com/wemake-services/wemake-python-styleguide/issues/112
"""
module = ast.parse(code_that_breaks)
Checker.parse_options(default_options)
# Now we create modifications to the tree:
Checker(tree=module, file_tokens=[], filename='custom.py')
# It was failing on this line:
# AttributeError: 'ExceptHandler' object has no attribute 'depth'
flakes = PyFlakesChecker(module)
assert module.wps_context is None # augmentation happened!
assert flakes.root
|
11453479
|
import base64
import hashlib
import importlib
import json
import logging
from urllib.parse import parse_qs
from urllib.parse import urlparse
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
import uuid
from cryptography.fernet import Fernet
from cryptojwt import as_unicode
from cryptojwt.utils import as_bytes
from oidcop.session.info import SessionInfo
from oidcop.exception import OidcEndpointError
logger = logging.getLogger(__name__)
OAUTH2_NOCACHE_HEADERS = [("Pragma", "no-cache"), ("Cache-Control", "no-store")]
def modsplit(s):
"""Split importable"""
if ":" in s:
c = s.split(":")
if len(c) != 2:
raise ValueError(f"Syntax error: {s}")
return c[0], c[1]
else:
c = s.split(".")
if len(c) < 2:
raise ValueError(f"Syntax error: {s}")
return ".".join(c[:-1]), c[-1]
def importer(name):
"""Import by name"""
c1, c2 = modsplit(name)
module = importlib.import_module(c1)
return getattr(module, c2)
def build_endpoints(conf, server_get, issuer):
"""
conf typically contains::
'provider_config': {
'path': '.well-known/openid-configuration',
'class': ProviderConfiguration,
'kwargs': {}
},
:param conf:
:param server_get: Callback function
:param issuer:
:return:
"""
if issuer.endswith("/"):
_url = issuer[:-1]
else:
_url = issuer
endpoint = {}
for name, spec in conf.items():
kwargs = spec.get("kwargs", {})
if isinstance(spec["class"], str):
_instance = importer(spec["class"])(server_get=server_get, **kwargs)
else:
_instance = spec["class"](server_get=server_get, **kwargs)
try:
_path = spec["path"]
except KeyError:
# Should there be a default ?
raise
_instance.endpoint_path = _path
_instance.full_path = "{}/{}".format(_url, _path)
if _instance.endpoint_name:
try:
_instance.endpoint_info[_instance.endpoint_name] = _instance.full_path
except TypeError:
_instance.endpoint_info = {_instance.endpoint_name: _instance.full_path}
endpoint[_instance.name] = _instance
return endpoint
class JSONDictDB(object):
def __init__(self, filename):
with open(filename, "r") as f:
self._db = json.load(f)
def __getitem__(self, item):
return self._db[item]
def __contains__(self, item):
return item in self._db
def instantiate(cls, **kwargs):
if isinstance(cls, str):
return importer(cls)(**kwargs)
else:
return cls(**kwargs)
def lv_pack(*args):
"""
Serializes using length:value format
:param args: values
:return: string
"""
s = []
for a in args:
s.append("{}:{}".format(len(a), a))
return "".join(s)
def lv_unpack(txt):
"""
Deserializes a string of the length:value format
:param txt: The input string
:return: a list og values
"""
txt = txt.strip()
res = []
while txt:
l, v = txt.split(":", 1)
res.append(v[: int(l)])
txt = v[int(l):]
return res
class Crypt(object):
def __init__(self, password, mode=None):
self.key = base64.urlsafe_b64encode(hashlib.sha256(password.encode("utf-8")).digest())
self.core = Fernet(self.key)
def encrypt(self, text):
# Padding to block size of AES
text = as_bytes(text)
if len(text) % 16:
text += b" " * (16 - len(text) % 16)
return self.core.encrypt(as_bytes(text))
def decrypt(self, ciphertext):
dec_text = self.core.decrypt(ciphertext)
dec_text = dec_text.rstrip(b" ")
return as_unicode(dec_text)
def get_http_params(config):
_verify_ssl = config.get("verify")
if _verify_ssl is None:
_verify_ssl = config.get("verify_ssl")
if _verify_ssl in [True, False]:
params = {"verify": _verify_ssl}
else:
params = {}
_cert = config.get("client_cert")
_key = config.get("client_key")
if _cert:
if _key:
params["cert"] = (_cert, _key)
else:
params["cert"] = _cert
elif _key:
raise ValueError("Key without cert is no good")
return params
def split_uri(uri):
p = urlsplit(uri)
if p.fragment:
p = p._replace(fragment="")
if p.query:
o = p._replace(query="")
base = urlunsplit(o)
return base, parse_qs(p.query)
else:
base = urlunsplit(p)
return base, ""
def allow_refresh_token(endpoint_context):
# Are there a refresh_token handler
refresh_token_handler = endpoint_context.session_manager.token_handler.handler["refresh_token"]
# Is refresh_token grant type supported
_token_supported = False
_cap = endpoint_context.conf.get("capabilities")
if _cap:
if "refresh_token" in _cap["grant_types_supported"]:
# self.allow_refresh = kwargs.get("allow_refresh", True)
_token_supported = True
if refresh_token_handler and _token_supported:
return True
elif refresh_token_handler:
logger.warning("Refresh Token handler available but grant type not supported")
elif _token_supported:
logger.error(
"refresh_token grant type to be supported but no refresh_token handler available"
)
raise OidcEndpointError('Grant type "refresh_token" lacks support')
return False
def sector_id_from_redirect_uris(uris):
if not uris:
return ""
_parts = urlparse(uris[0])
hostname = _parts.netloc
scheme = _parts.scheme
for uri in uris[1:]:
parsed = urlparse(uri)
if scheme != parsed.scheme or hostname != parsed.netloc:
raise ValueError(
"All redirect_uris must have the same hostname in order to generate sector_id."
)
return urlunsplit((scheme, hostname, "", "", ""))
def get_logout_id(endpoint_context, user_id, client_id):
_item = SessionInfo()
_item.user_id = user_id
_item.client_id = client_id
# Note that this session ID is not the session ID the session manager is using.
# It must be possible to map from one to the other.
logout_session_id = uuid.uuid4().hex
# Store the map
_mngr = endpoint_context.session_manager
_mngr.set([logout_session_id], _item)
return logout_session_id
|
11453495
|
import sys
if sys.platform == 'darwin':
from ..lib.platform.darwin.link_opener import *
elif sys.platform == 'win32':
from ..lib.platform.win32.link_opener import *
elif sys.platform in ('linux', 'linux2'):
from ..lib.platform.linux.link_opener import *
else:
from ..lib.platform.unsupported.link_opener import *
def open_browser(ident):
return _open_browser(ident)
def open_browser_url(url):
return _open_browser_url(url)
def open_copilot(ident):
return _open_copilot(ident)
def open_copilot_root(path):
return _open_copilot_root(path)
|
11453523
|
import os
import scipy.sparse as sp
DIR_NAME = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'data',
'noisy_diff')
NOISY_DIFF_PATH = {
'citeseer': {
'2500': os.path.join(
DIR_NAME, 'citeseer-diff-2500.npz'),
'5000': os.path.join(
DIR_NAME, 'citeseer-diff-5000.npz'),
'10000': os.path.join(
DIR_NAME, 'citeseer-diff-10000.npz')},
'cora': {
'2500': os.path.join(
DIR_NAME, 'cora-diff-2500.npz'),
'5000': os.path.join(
DIR_NAME, 'cora-diff-5000.npz'),
'10000': os.path.join(
DIR_NAME, 'cora-diff-10000.npz')},
'pubmed': {
'10000': os.path.join(
DIR_NAME, 'pubmed-diff-10000.npz'),
'25000': os.path.join(
DIR_NAME, 'pubmed-diff-25000.npz')},
}
def _load_noisy_diff(dataset_name):
if dataset_name == 'citeseer':
fname = NOISY_DIFF_PATH['citeseer']['5000']
elif dataset_name == 'cora':
fname = NOISY_DIFF_PATH['cora']['2500']
elif dataset_name == 'pubmed':
fname = NOISY_DIFF_PATH['pubmed']['25000']
else:
raise ValueError('invalid dataset name: {}'.format(dataset_name))
diff = sp.load_npz(fname)
return diff
def add_noise(adj, dataset_name):
diff = _load_noisy_diff(dataset_name)
return adj + diff
|
11453554
|
import abc
from typing import get_type_hints
from flash.core.serve._compat import cached_property
class BaseType(metaclass=abc.ABCMeta):
"""Base class for Types.
Any Grid Types must be inherited from this class and must implement abstract
methods. The constructor (or the initializer for pythonistas) can take parameters
and customize the behaviour of serialization/deserialization process.
Notes
-----
* The argument to the :meth:`deserialize` method must be type annotated. This
information will be used to construct the API endpoint. For instance, if you are
making a custom ``Text`` type, you might expect the end user to pass text string
and the language, you could define this method like this:
.. code-block:: python
def deserialize(self, text: str, language: str):
pass
* This will be translated to an API endpoint (automatically and transparently -
no explicit coding required from you) that takes a dictionary that would look
like this:
.. code-block:: python
{"text": "some string", "language": "en"}
"""
@cached_property
def type_hints(self):
"""Fetch the output hints from serialize and input hints from deserialize."""
input_types = get_type_hints(self.deserialize)
input_types.pop("return", None)
try:
output_types = get_type_hints(self.serialize)["return"]
except KeyError: # pragma: no cover
raise RuntimeError("Did you forget to type annotate " "the `serialize` method?")
return {"output_args": output_types, "input_args": input_types}
@abc.abstractmethod
def serialize(self, data): # pragma: no cover
"""Serialize the incoming data to send it through the network."""
raise NotImplementedError
@abc.abstractmethod
def deserialize(self, *args, **kwargs): # pragma: no cover
"""Take the inputs from the network and deserilize/convert them them.
Output from this method will go to the exposed method as arguments.
"""
raise NotImplementedError
def packed_deserialize(self, kwargs):
"""Unpacks data (assuming kwargs) and calls deserialize method of child class.
While it does not seem to be doing much, and always does one thing, the benefit comes when building
sophisticated datatypes (such as Repeated) where the developer wants to dictate how the unpacking happens. For
simple cases like Image or Bbox etc, developer would never need to know the existence of this. Task graph would
never call deserialize directly but always call this method.
"""
return self.deserialize(**kwargs)
|
11453558
|
class AbstractEventHandler:
"""
Abstract class for basic functionality required of an event handler.
Inherit from this when creating your custom event handlers!
"""
def __init__(self):
self.instance = None
self.debug = False
def setup(self, instance, debug: bool):
self.instance = instance
self.debug = debug
async def strangerDisconnected(self, data):
"""
Automatically move to the next person
"""
await self.instance.disconnect()
await self.instance.connect()
|
11453577
|
from . import (
db,
definitions,
hyper_params,
logger,
music,
slider,
timing,
utils
)
|
11453579
|
import pika
import multiprocessing
import threading
import os
import signal
import sys
import json
import socket
from init_client import handle_config,rabbitmq_detail
key = '<KEY>'
handle_config(key)
from time import sleep
from connection import manage_connection
from database_management import manage_database, manage_local_ids
from interface_package.interface import init_gui
from interface_package.login_interface import start_interface
from listen_server import start_listening
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
try:
config = handle_config.read_config_json()
config["IP"] = ip
handle_config.write_config_json(config)
except Exception as Error:
print(str(Error))
sys.exit()
try:
# Basic credentials for login to RabbitMQ Server
rabbitmq_username = config["rabbitmq_username"]
rabbitmq_password = config["<PASSWORD>"]
host = config["host"]
except Exception as Error:
print(str(Error))
sys.exit()
rabbitmq_detail.fill_detail(rabbitmq_username,rabbitmq_password,host)
def main():
#################################
# Initialize the database and returns the cursor
print("[ SETUP ] INITIALISING DATABASE ............")
try:
conn, cursor = manage_database.initialize_table()
manage_local_ids.initialize_local_id(cursor)
except Exception as error:
ex_type,ex_obj, ex_tb = sys.exc_info()
f_name = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1]
print(ex_type,f_name,ex_tb.tb_lineno)
##################################
# Create variables/lists that will be shared between processes
data_changed_flags = multiprocessing.Array('i', 10)
queue = multiprocessing.Queue()
scoreboard = multiprocessing.Queue()
for i in range(10):
data_changed_flags[i] = 0
# index value meaning
# 0 0/1/2/3/4 Contest Not Started/Contest has been started/Running/Contest Stopped/Time Up
# 1 0/1/2 Initialize/Verdict Not received/Verdict Received
# 2 0/1/2 Initiaize/Query response Not received/Query response received
# 3 1 Server NOt Accepting Submission
# 4 0/1/3 Timer Stopped/ Timer running/Update Timer
# 5 0/1/2 Proper Connection/Single Client Disconnected/All Clients Disconnected
# 6 1 Leader Board Update
# 7 1 Problem Edited
# 8 1 Blocked User
# 9 1 Run ID Received
##################################
# Makes connection with RabbitMQ
# And returns channel,connection
print("[ SETUP ] ESTABLISHING CONNECTION .........")
try:
channel,connection = manage_connection.initialize_connection(
rabbitmq_username,
rabbitmq_password,
host
)
channel1 = connection.channel()
channel2 = connection.channel()
except:
ex_type,ex_obj, ex_tb = sys.exc_info()
f_name = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1]
print(ex_type,f_name,ex_tb.tb_lineno)
try:
print("----------------BitsOJ v1.0----------------")
# Starting GUI for login portal
start_interface(data_changed_flags, queue)
print("[ LOGIN ] Successful")
except Exception as error:
print("[ CRITICAL ] GUI could not be loaded! " + str(error))
try:
# Manage Threads
print('[ SETUP ] Initialising threads....')
listen_thread = manage_process(
rabbitmq_username,
rabbitmq_password,
host ,
data_changed_flags,
queue,
scoreboard,
channel2
)
listen_thread.start()
except Exception as error:
print('[ CRITICAL ] Could not initialize threads : ' + str(error))
# After successful login
try:
# Starting Main GUI
print('Main GUI Loading')
init_gui(channel1,data_changed_flags, queue,scoreboard)
except Exception as error:
ex_type,ex_obj, ex_tb = sys.exc_info()
f_name = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1]
print(ex_type,f_name,ex_tb.tb_lineno)
# try:
listen_thread.join()
channel.close()
channel1.close()
channel2.close()
manage_connection.terminate_connection()
# except Exception as error:
# ex_type,ex_obj, ex_tb = sys.exc_info()
# f_name = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1]
# print('[ ERROR ] : ', ex_type,f_name,ex_tb.tb_lineno)
sys.exit(0)
print("[EXIT] Signal Passed")
# os.kill(listen_pid, signal.SIGINT)
sleep(1)
print(" ################################################")
print(" #----------ClIENT CLOSED SUCCESSFULLY----------#")
print(" ################################################")
# Manageing process
def manage_process(
rabbitmq_username,
rabbitmq_password,
host,
data_changed_flags,
queue,
scoreboard,
channel2,
):
# this is from continuously listening from the server
listen_from_server = threading.Thread(
target = start_listening.listen_server,
args = (rabbitmq_username,rabbitmq_password, host, data_changed_flags, queue, scoreboard, channel2, )
)
return listen_from_server
# listen_pid = listen_from_server.pid
# returning process id
# return listen_pid
if __name__ == '__main__':
main()
|
11453585
|
from ariadne import InterfaceType
from graphql import GraphQLObjectType
from .base import RelayConnectionType
class RelayInterfaceType(RelayConnectionType, InterfaceType):
def bind_resolvers_to_graphql_type(
self, graphql_type: GraphQLObjectType, replace_existing: bool = True
) -> None:
super().bind_resolvers_to_graphql_type( # type: ignore
graphql_type,
replace_existing,
)
self.bind_connection_resolvers_to_graphql_type(graphql_type, replace_existing)
|
11453620
|
import stgfunc as stg
import kagefunc as kgf
import vapoursynth as vs
from typing import Tuple
core = vs.core
def get_detail_mask(clip_y: vs.VideoNode) -> vs.VideoNode:
return core.std.Expr([
stg.mask.generate_detail_mask(clip_y, 0.013),
kgf.kirsch(clip_y)
], "x y -")
def get_linemasks(clip_y: vs.VideoNode) -> Tuple[vs.VideoNode, vs.VideoNode]:
lineart_hard = stg.mask.linemask(clip_y)
lineart_hard = lineart_hard.std.Binarize(75 << 8).std.Minimum().std.Deflate()
lineart_light = kgf.kirsch(clip_y).std.Binarize(255 << 8)
return (lineart_hard, lineart_light)
|
11453633
|
from rest_framework import serializers
from downloads.models import OS, Release, ReleaseFile
class OSSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = OS
fields = ('name', 'slug', 'resource_uri')
class ReleaseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Release
fields = (
'name',
'slug',
'version',
'is_published',
'is_latest',
'release_date',
'pre_release',
'release_page',
'release_notes_url',
'show_on_download_page',
'resource_uri',
)
class ReleaseFileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ReleaseFile
fields = (
'name',
'slug',
'os',
'release',
'description',
'is_source',
'url',
'gpg_signature_file',
'md5_sum',
'filesize',
'download_button',
'resource_uri',
)
|
11453642
|
from orm_choices import choices
@choices
class ArticleStatus:
class Meta:
UNPUBLISHED = [1, "Not published yet"]
PUBLISHED = [2, "Published"]
REVIEW_REQUIRED = [3, "To be reviewed"]
DELETED = [4, "Deleted"]
|
11453659
|
import pandas as pd
import yaml
class Dict2Class(object):
def __init__(self, dvar, def_topvar):
if not isinstance(dvar, dict) or not dvar:
setattr(self, def_topvar, None)
return
for key in dvar:
if isinstance(dvar[key], list):
nested_dclass = []
for ele in dvar[key]:
newele = Dict2Class(ele, def_topvar=ele)
nested_dclass.append(newele)
setattr(self, key.replace('-', '_'), nested_dclass)
else:
setattr(self, key.replace('-', '_'), dvar[key])
class Yaml2Class(object):
def __init__(self, yaml_file, def_topvar='transform'):
with open(yaml_file, 'r') as f:
dvar = yaml.safe_load(f.read())
self.transform = Dict2Class(dvar, def_topvar)
def assert_df_equal(expected_df, got_df, ignore_cols) -> None:
'''Compare the dataframes for equality
Comparing data frames is tough. The main thing we're concerned about
here is that the contents are identical. We're less concerned about the
order. The reason for the lack of comparison of order is because the
order could change as a consequence of coalescing or some other change.
We work our way from the simplest and fastest attempts to compare equality
to slower ways to compare equality. Trying to ignore the sort is the hardest
part. First we attempt to sort the two and reset the index to avoid index
mismatches. When one of the columns is a list in which case we resort to
deriving tuples of the expected and obtained dataframes, stripping the Index
column (should be a range Index only), and then verifying that a row is
present in the other dataframe. We even use sets to attempt a quicker tuple
comparison which can again fail due to the presence of a list.
Real failures are hopefully caught quickly while less clear ones are run
through a wringer to verify that there's a real problem.
'''
if (expected_df.empty and got_df.empty):
return
elif (not expected_df.empty and got_df.empty):
assert False, 'Got unexpected empty dataframe'
elif (expected_df.empty and not got_df.empty):
assert False, 'Got unexpected non-empty dataframe'
# Drop any columns to be ignored
if ignore_cols:
if not got_df.empty:
got_df = got_df.drop(columns=ignore_cols, errors='ignore')
if not expected_df.empty:
expected_df = expected_df.drop(
columns=ignore_cols, errors='ignore')
try:
if isinstance(got_df.index, pd.RangeIndex):
expected_df = expected_df \
.sort_values(by=expected_df.columns.tolist()) \
.reset_index(drop=True)
else:
expected_df = expected_df \
.sort_values(by=expected_df.columns.tolist())
if isinstance(expected_df.index, pd.RangeIndex):
got_df = got_df.sort_values(by=got_df.columns.tolist()) \
.reset_index(drop=True)
else:
got_df = got_df.sort_values(by=got_df.columns.tolist())
except Exception:
sortcols = [x
for x in ['namespace', 'hostname', 'ifname', 'vrf',
'peer', 'prefix', 'ipAddress', 'vlan', 'macaddr']
if x in expected_df.columns]
if sortcols:
expected_df = expected_df.sort_values(by=sortcols) \
.reset_index(drop=True)
got_df = got_df.sort_values(by=sortcols).reset_index(drop=True)
if got_df.shape != expected_df.shape:
if 'count' in expected_df.columns and (
got_df.shape[0] == expected_df.shape[0]):
# This is the old unique issue
assert got_df.shape == expected_df.shape, 'old unique'
else:
if 'namespace' in expected_df.columns:
assert got_df.shape == expected_df.shape, \
f'expected/{expected_df.shape} != got/{got_df.shape}\n' \
f'{expected_df.namespace.value_counts()} \nVS\n{got_df.namespace.value_counts()}'
elif 'hostname' in expected_df.columns:
assert got_df.shape == expected_df.shape, \
f'expected/{expected_df.shape} != got/{got_df.shape}\n' \
f'{expected_df.hostname.value_counts()} \nVS\n{got_df.hostname.value_counts()}'
else:
assert got_df.shape == expected_df.shape, \
f'expected/{expected_df.shape} != got/{got_df.shape}'
assert (got_df.columns == expected_df.columns).all(
), 'shapes match, column names/order do not'
# We assume the asssert failure prevents the code from continuing
try:
rslt_df = expected_df.compare(got_df, keep_equal=True)
if not rslt_df.empty:
# Check if its just the timestamps that are different, as would be the
# case if we had a new capture
maincols = [x[0] for x in rslt_df.columns.tolist()]
if all(x in ['timestamp', 'lastChangeTime', 'bootupTimestamp']
for x in maincols):
assert False, 'Only differ in timestamps'
matches = True
# If there are lists in the values, their order maybe causing
# the failure. Pass if the problem is the order but they're
# equal
for row in rslt_df.itertuples():
if isinstance(row._1, list) and isinstance(row._2, list):
if set(row._1) != set(row._2):
matches = False
break
else:
matches = False
break
if not matches:
# This could be because of mismatch in sorted columns
# We have assured that the shape is identical already, so try a
# manual compare. Skip the index number in this case,
# assuming range index, since the sort messes up this order
if isinstance(got_df.index, pd.RangeIndex):
got_tuples = [x[1:] for x in got_df.itertuples()]
else:
got_tuples = [x for x in got_df.itertuples()]
if isinstance(expected_df.index, pd.RangeIndex):
expected_tuples = [x[1:] for x in expected_df.itertuples()]
else:
expected_tuples = [x for x in expected_df.itertuples()]
try:
assert (set(got_tuples) == set(
expected_tuples)), f'{rslt_df}'
except TypeError:
matches = True
for item in expected_tuples:
if item not in got_tuples:
matches = False
assert rslt_df.empty, f'{rslt_df}'
if not matches:
print(rslt_df)
assert rslt_df.empty, f'{rslt_df}'
except ValueError:
# This happens when the two dataframes don't have the same shape
# such as what happens if the return is an error. So, compare fails
# and we have to try a different technique
try:
rslt_df = pd.merge(got_df,
expected_df,
how='outer',
indicator=True)
if not got_df.empty:
assert (not rslt_df.empty and rslt_df.query(
'_merge != "both"').empty), 'Merge compare failed'
except (Exception, AssertionError, TypeError):
assert(got_df.shape == expected_df.shape)
assert('Unable to compare' == '')
|
11453680
|
def test_shocksine():
""" tests against expected sharpclaw results """
import shocksine
from clawpack.pyclaw.util import test_app, check_diff
def verify_shocksine(controller):
""" given an expected value, returns a verification function """
import numpy as np
import os
test_solution = controller.solution.state.get_q_global()
if test_solution is not None:
thisdir = os.path.dirname(__file__)
expected_density = np.loadtxt(os.path.join(thisdir,'shocksine_regression_density.txt'))
test_density = test_solution[0,:]
test_err = np.linalg.norm(expected_density-test_density)
return check_diff(0, test_err, abstol=1.e-4)
return test_app(shocksine.setup, verify_shocksine, {})
if __name__=="__main__":
import nose
nose.main()
|
11453686
|
import unittest
from deployer.query import Q, Query, QueryResult
from deployer.node import Node, Env
from deployer.exceptions import QueryException, ActionException
from .our_hosts import LocalHost
def get_query_result(query, instance):
return query._execute_query(instance).result
class ExpressionTest(unittest.TestCase):
def test_literals(self):
# Literals
q = Q('string')
self.assertEqual(get_query_result(q, None), 'string')
q = Q(55)
self.assertEqual(get_query_result(q, None), 55)
q = Q(True)
self.assertEqual(get_query_result(q, None), True)
q = Q(False)
self.assertEqual(get_query_result(q, None), False)
def test_operator_overloads(self):
# Simple operator overloads (Both Q objects)
q = Q('a') + Q('b')
self.assertEqual(get_query_result(q, None), 'ab')
q = Q(1) + Q(2)
self.assertEqual(get_query_result(q, None), 3)
q = Q(2) - Q(1)
self.assertEqual(get_query_result(q, None), 1)
q = Q(3) * Q(4)
self.assertEqual(get_query_result(q, None), 12)
q = Q(12) / Q(4)
self.assertEqual(get_query_result(q, None), 3)
# Simple operator overloads (Q object on the left.)
q = Q('a') + 'b'
self.assertEqual(get_query_result(q, None), 'ab')
q = Q(1) + 2
self.assertEqual(get_query_result(q, None), 3)
q = Q(2) - 1
self.assertEqual(get_query_result(q, None), 1)
q = Q(3) * 4
self.assertEqual(get_query_result(q, None), 12)
q = Q(12) / 4
self.assertEqual(get_query_result(q, None), 3)
# Simple operator overloads (Q object on the right.)
q = 'a' + Q('b')
self.assertEqual(get_query_result(q, None), 'ab')
q = 1 + Q(2)
self.assertEqual(get_query_result(q, None), 3)
q = 2 - Q(1)
self.assertEqual(get_query_result(q, None), 1)
q = 3 * Q(4)
self.assertEqual(get_query_result(q, None), 12)
q = 12 / Q(4)
self.assertEqual(get_query_result(q, None), 3)
def test_string_interpolation(self):
# String interpolation
q = Q('before %s after') % 'value'
self.assertEqual(get_query_result(q, None), 'before value after')
def test_booleans(self):
# And/or/not
q = Q(True) & Q(True)
self.assertEqual(get_query_result(q, None), True)
q = Q(True) & Q(False)
self.assertEqual(get_query_result(q, None), False)
q = Q(True) | Q(False)
self.assertEqual(get_query_result(q, None), True)
q = Q(False) | Q(False)
self.assertEqual(get_query_result(q, None), False)
q = ~ Q(False)
self.assertEqual(get_query_result(q, None), True)
q = ~ Q(True)
self.assertEqual(get_query_result(q, None), False)
# Combinations
q = Q(False) | ~ Q(False)
self.assertEqual(get_query_result(q, None), True)
def test_resolve_types(self):
q = Q(Q('a'))
self.assertEqual(get_query_result(q, None), 'a')
q = Q([Q('%s') % 'a'])
self.assertEqual(get_query_result(q, None), ['a'])
q = Q('%s: %s') % ('a', 'b')
self.assertEqual(get_query_result(q, None), 'a: b')
q = Q('%s: %s') % (Q('a'), Q('b'))
self.assertEqual(get_query_result(q, None), 'a: b')
q = Q(['a', 'b']) + ['c', 'd']
self.assertEqual(get_query_result(q, None), ['a', 'b', 'c', 'd'])
q = Q(['a', 'b']) + [Q('c'), Q('d')]
self.assertEqual(get_query_result(q, None), ['a', 'b', 'c', 'd'])
q = Q('%(A)s: %(B)s') % {'A': 'a', 'B': 'b'}
self.assertEqual(get_query_result(q, None), 'a: b')
q = Q('%(A)s: %(B)s') % {Q('A'): Q('a'), Q('B'): Q('b')}
self.assertEqual(get_query_result(q, None), 'a: b')
class ReprTest(unittest.TestCase):
def test_reprs(self):
# Operators
self.assertEqual(repr(Q(4) + Q(5)), '4 + 5')
self.assertEqual(repr(Q(4) - Q(5)), '4 - 5')
self.assertEqual(repr(Q(4) * Q(5)), '4 * 5')
self.assertEqual(repr(Q(4) / Q(5)), '4 / 5')
# Booleans
self.assertEqual(repr(Q(4) | ~ Q(5)), '4 | ~ 5')
self.assertEqual(repr(Q(4) & ~ Q(5)), '4 & ~ 5')
# Attributes and calls
self.assertEqual(repr(Q.a), 'Q.a')
self.assertEqual(repr(Q.b['lookup']), "Q.b['lookup']")
self.assertEqual(repr(Q.a.call('param') + Q.b['lookup']), "Q.a.call('param') + Q.b['lookup']")
self.assertEqual(repr(Q.a.call('param', 'p2', key='value')), "Q.a.call('param', 'p2', key='value')")
self.assertEqual(repr(Q.a.call(Q.a)), "Q.a.call(Q.a)")
class InContextTest(unittest.TestCase):
"""
Evaluation of expressions, on a context object.
(The context is usually a Node in practise.)
"""
def setUp(self):
class Obj(object):
value = 'value'
def action(self):
return 'action-result'
def __getitem__(self, item):
return 'item %s' % item
def true(self): return True
def false(self): return False
obj = Obj()
obj.nested_obj = obj
self.obj = obj
def test_q_attribute_selection(self):
# Combinations of attribute lookups, __getitem__ and calling.
q = Q.value
self.assertEqual(get_query_result(q, self.obj), 'value')
q = Q['attr']
self.assertEqual(get_query_result(q, self.obj), 'item attr')
q = Q.action()
self.assertEqual(get_query_result(q, self.obj), 'action-result')
q = Q.nested_obj.action()
self.assertEqual(get_query_result(q, self.obj), 'action-result')
q = Q.nested_obj.action()
self.assertEqual(get_query_result(q, self.obj), 'action-result')
q = Q.nested_obj.nested_obj['attr']
self.assertEqual(get_query_result(q, self.obj), 'item attr')
# Add some operators
q = Q.nested_obj.nested_obj.value + '-' + Q.value
self.assertEqual(get_query_result(q, self.obj), 'value-value')
q = ~ Q.true()
self.assertEqual(get_query_result(q, self.obj), False)
q = Q.true() & Q.nested_obj.true()
self.assertEqual(get_query_result(q, self.obj), True)
q = Q.true() | Q.nested_obj.false()
self.assertEqual(get_query_result(q, self.obj), True)
def test_query_result(self):
"""
Analysis of the following hierarchical query.
# Q | <q_object_test.Obj object at 0x976d64c>
# Q.true | <bound method Obj.true of <q_object_test.Obj object at 0x976d64c>>
# Q.true() | True
# Q | <q_object_test.Obj object at 0x976d64c>
# Q.nested_obj | <q_object_test.Obj object at 0x976d64c>
# Q.nested_obj.false | <bound method Obj.false of <q_object_test.Obj object at 0x976d64c>>
# Q.nested_obj.false() | False
# Q.true() | Q.nested_obj.false() | True
"""
def count(query):
result = query._execute_query(self.obj)
return len(list(result.walk_through_subqueries()))
# Check subquery count
q = Q
self.assertEqual(count(q), 1)
q = Q.true
self.assertEqual(count(q), 2)
q = Q.true()
self.assertEqual(count(q), 3)
q = Q.nested_obj
self.assertEqual(count(q), 2)
q = Q.nested_obj.false
self.assertEqual(count(q), 3)
q = Q.nested_obj.false()
self.assertEqual(count(q), 4)
q = Q.true() | Q.nested_obj.false()
self.assertEqual(count(q), 8)
# Check subquery order.
q = Q.true() | Q.nested_obj.false()
result = q._execute_query(self.obj)
self.assertIsInstance(result, QueryResult)
# The first parameter contains all the subqueries that are executed.
queries = [ r[0] for r in result.walk_through_subqueries() ]
self.assertEqual(map(repr, queries), [
'Q',
'Q.true',
'Q.true()',
'Q',
'Q.nested_obj',
'Q.nested_obj.false',
'Q.nested_obj.false()',
'Q.true() | Q.nested_obj.false()' ])
for q in queries:
self.assertIsInstance(q, Query)
# The second parameter contains the results for the respective subqueries.
results = [ r[1] for r in result.walk_through_subqueries() ]
self.assertEqual(results[2], True)
self.assertEqual(results[6], False)
self.assertEqual(results[7], True)
class InActionTest(unittest.TestCase):
def test_q_navigation(self):
this = self
class DummyException(Exception):
pass
class MyNode(Node):
class Hosts:
host = LocalHost
# 1. Normal query from node.
attr = 'value'
query = Q.attr
query2 = Q.attr + Q.attr
def my_action(self):
return self.query
# 2. Exception in query from node.
@property
def this_raises(self):
raise DummyException
query3 = Q.this_raises + Q.attr
def my_action2(self):
# Calling query3 raises a QueryException
# The inner exception that one is the DummyException
try:
return self.query3
except Exception as e:
this.assertIsInstance(e, ActionException)
this.assertIsInstance(e.inner_exception, QueryException)
this.assertIsInstance(e.inner_exception.node, MyNode)
this.assertIsInstance(e.inner_exception.query, Query)
this.assertIsInstance(e.inner_exception.inner_exception, DummyException)
# Raising the exception again at this point, will turn it
# into an action exception.
raise
s = MyNode()
env = Env(s)
# 1.
self.assertEqual(env.my_action(), 'value')
self.assertEqual(env.query2, 'valuevalue')
# 2.
self.assertRaises(ActionException, env.my_action2)
try:
env.my_action2()
except Exception as e:
self.assertIsInstance(e, ActionException)
|
11453696
|
import os
import unittest
import docker
# The DOCKER_IMAGE envvar is needed to specify what
# image to test
# The `docker` library gives all output from containers as raw bytes, so
# we need to use byte string literals for comparison/membership tests
SOCKET_PATH = b"/tmp/statsd.socket"
COMMON_ENVIRONMENT = [
"DD_DD_URL=http://dummy",
"DD_API_KEY=dummy",
]
ENVIRONMENTS = {
"udp": [],
"uds": ["DD_DOGSTATSD_SOCKET=" + SOCKET_PATH.decode('utf-8'), "DD_DOGSTATSD_PORT=0"],
"both": ["DD_DOGSTATSD_SOCKET=" + SOCKET_PATH.decode('utf-8'), "DD_DOGSTATSD_PORT=8125"],
}
containers = {}
client = {}
def setUpModule():
global containers
global client
client = docker.from_env()
for name, env in ENVIRONMENTS.items():
containers[name] = client.containers.run(
os.environ.get('DOCKER_IMAGE'), detach=True, environment=COMMON_ENVIRONMENT + env, auto_remove=True
)
def tearDownModule():
global containers
global client
for _, container in containers.items():
container.stop()
def waitUntilListening(container, retries=20):
for _ in range(0, retries):
out = container.exec_run(cmd="netstat -a").output
if b":8125" in out or SOCKET_PATH in out:
return True
return False
def isUDPListening(container):
out = container.exec_run(cmd="netstat -a").output
return b":8125" in out
def isUDSListening(container):
out = container.exec_run(cmd="netstat -a").output
return SOCKET_PATH in out
class DSDStaticTest(unittest.TestCase):
def setUp(self):
self.assertIsNotNone(os.environ.get('DOCKER_IMAGE'), "DOCKER_IMAGE envvar needed")
def test_static_binary(self):
'''Fails if /dogstatsd is not a static binary, build options are likely broken'''
global client
fileOutput = client.containers.run(
os.environ.get('DOCKER_IMAGE'),
environment=COMMON_ENVIRONMENT,
auto_remove=True,
stdout=True,
command='sh -c "apk add --no-cache file && file /dogstatsd"',
)
self.assertIn(b"statically linked", fileOutput)
class DSDListeningTest(unittest.TestCase):
def setUp(self):
self.assertIsNotNone(os.environ.get('DOCKER_IMAGE'), "DOCKER_IMAGE envvar needed")
def test_udp(self):
self.assertTrue(waitUntilListening(containers["udp"]))
self.assertTrue(isUDPListening(containers["udp"]))
self.assertFalse(isUDSListening(containers["udp"]))
def test_uds(self):
self.assertTrue(waitUntilListening(containers["uds"]))
self.assertFalse(isUDPListening(containers["uds"]))
self.assertTrue(isUDSListening(containers["uds"]))
def test_both(self):
self.assertTrue(waitUntilListening(containers["both"]))
self.assertTrue(isUDPListening(containers["both"]))
self.assertTrue(isUDSListening(containers["both"]))
if __name__ == '__main__':
unittest.main()
|
11453703
|
s = input()
upper = []
lower = []
odd_digit = []
even_digit = []
for i in s:
if i.isupper():
upper.append(i)
elif i.islower():
lower.append(i)
elif i.isdigit():
if int(i) % 2 == 0:
even_digit.append(i)
else:
odd_digit.append(i)
l = [upper, lower, odd_digit, even_digit]
for i in l:
i.sort()
"".join(i)
final = lower + upper + odd_digit + even_digit
"".join(final)
for i in final:
print(i, end="")
# Problem : arrage in following order
# lowercase > uppercase > odd_digit > even_digit
# Input:
# Sorting1234
# Output:
# ginortS1324
|
11453722
|
import os
import re
from copy import deepcopy
import numpy as np
from snapgene_reader import snapgene_file_to_seqrecord
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
try:
# Biopython <1.78
from Bio.Alphabet import DNAAlphabet
has_dna_alphabet = True
except ImportError:
# Biopython >=1.78
has_dna_alphabet = False
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio import SeqIO
complements_dict = {"A": "T", "T": "A", "C": "G", "G": "C"}
def random_dna_sequence(length, probas=None, seed=None):
"""Return a random DNA sequence ("ATGGCGT...") with the specified length.
Parameters
----------
length
Length of the DNA sequence.
proba
Frequencies for the different nucleotides, for instance
``probas={"A":0.2, "T":0.3, "G":0.3, "C":0.2}``.
If not specified, all nucleotides are equiprobable (p=0.25).
seed
The seed to feed to the random number generator. When a seed is provided
the random results depend deterministically on the seed, thus enabling
reproducibility.
"""
if seed is not None:
np.random.seed(seed)
if probas is None:
sequence = np.random.choice(list("ATCG"), length)
else:
bases, probas = zip(*probas.items())
sequence = np.random.choice(bases, length, p=probas)
return "".join(sequence)
formats_dict = {".fa": "fasta", ".gb": "genbank", ".gbk": "genbank", ".dna": "snapgene"}
def load_record(filename, linear=True, name="unnamed", capitalize=True):
no_extension, extension = os.path.splitext(filename)
fmt = formats_dict[extension]
if fmt == "snapgene":
record = snapgene_file_to_seqrecord(filename)
else:
record = SeqIO.read(filename, fmt)
if capitalize:
record.seq = record.seq.upper()
record.linear = linear
record.id = name
record.name = name.replace(" ", "_")[:20]
return record
def load_records(path, capitalize=True):
if isinstance(path, (list, tuple)):
return [record for p in path for record in load_records(p)]
no_extension, extension = os.path.splitext(path)
fmt = formats_dict[extension]
if fmt == "snapgene":
records = [snapgene_file_to_seqrecord(path)]
else:
records = list(SeqIO.parse(path, fmt))
for i, record in enumerate(records):
if capitalize:
record.seq = record.seq.upper()
if str(record.id) in ["None", "", "<unknown id>", ".", " "]:
record.id = path.replace("/", "_").replace("\\", "_")
if len(records) > 1:
record.id += "_%04d" % i
return records
def complement(sequence):
return "".join(complements_dict[c] for c in sequence)
def reverse_complement(sequence):
return complement(sequence)[::-1]
def sequence_to_record(sequence, features=()):
if has_dna_alphabet:
seq = Seq(sequence, alphabet=DNAAlphabet())
else:
seq = Seq(sequence)
seqrecord = SeqRecord(seq, features=list(features))
seqrecord.annotations["molecule_type"] = "DNA"
return seqrecord
def annotate_record(
seqrecord, location="full", feature_type="feature", margin=0, **qualifiers
):
"""Add a feature to a Biopython SeqRecord.
Parameters
----------
seqrecord
The biopython seqrecord to be annotated.
location
Either (start, end) or (start, end, strand). (strand defaults to +1).
feature_type
The type associated with the feature.
margin
Number of extra bases added on each side of the given location.
qualifiers
Dictionary that will be the Biopython feature's `qualifiers` attribute.
"""
if location == "full":
location = (margin, len(seqrecord) - margin)
strand = location[2] if len(location) == 3 else 1
seqrecord.features.append(
SeqFeature(
FeatureLocation(location[0], location[1], strand),
qualifiers=qualifiers,
type=feature_type,
)
)
def sanitize_string(
string, max_length=15, replacements=(("'", "p"), ("*", "s"), ("-", "_"))
):
for old, new in replacements:
string = string.replace(old, new)
string = re.sub(r"[^a-zA-Z\d\S]", "_", string)
return string[:max_length]
def sanitize_and_uniquify(
strings, max_length=15, replacements=(("'", "p"), ("*", "s"), ("-", "_"))
):
dejavu = set()
table = {}
for string in strings:
newstring = sanitize_string(
string, max_length=max_length, replacements=replacements
)
i = 1
while newstring in dejavu:
i += 1
newstring = newstring[:-1] + str(i)
dejavu.add(newstring)
table[string] = newstring
return table
def write_record(record, target, fmt="genbank"):
"""Write a record as genbank, fasta, etc. via Biopython, with fixes."""
record = deepcopy(record)
record.name = record.name[:20]
if has_dna_alphabet:
if str(record.seq.alphabet.__class__.__name__) != "DNAAlphabet":
record.seq.alphabet = DNAAlphabet()
record.annotations["molecule_type"] = "DNA"
if hasattr(target, "open"):
target = target.open("w")
SeqIO.write(record, target, fmt)
|
11453741
|
import pytest
from tests.communication.test_CommBase import TestComm as base_class
class TestForkComm(base_class):
r"""Tests for ForkComm communication class."""
test_error_send = None
test_error_recv = None
test_work_comm = None
test_send_recv_raw = None
@pytest.fixture(scope="class", autouse=True)
def python_class(self):
r"""Python class that is being tested."""
from yggdrasil.communication.ForkComm import ForkComm
return ForkComm
@pytest.fixture(scope="class", autouse=True)
def commtype(self):
r"""Communicator type being tested."""
return "fork"
@pytest.fixture(scope="class", autouse=True)
def ncomm(self):
r"""Number of communicators to include in the fork."""
return 2
@pytest.fixture(scope="class", autouse=True,
params=['broadcast', 'cycle', 'scatter'])
def send_pattern(self, request):
r"""Pattern in which to send messages to fork communicators."""
return request.param
@pytest.fixture(scope="class", autouse=True)
def recv_pattern(self, send_pattern):
r"""Pattern in which to recv messages to fork communicators."""
pattern_map = {'broadcast': 'cycle',
'cycle': 'cycle',
'scatter': 'gather'}
return pattern_map[send_pattern]
@pytest.fixture(scope="class", autouse=True)
def duplicate_msg(self, ncomm, send_pattern, recv_pattern):
def wrapped_duplicate_msg(out, direction='send'):
r"""Copy a message for 'scatter' communication pattern."""
if ((((direction == 'send') and (send_pattern == 'scatter'))
or ((direction == 'recv') and (recv_pattern == 'gather')))):
out = [out for _ in range(ncomm)]
return out
return wrapped_duplicate_msg
@pytest.fixture(scope="class")
def testing_options(self, python_class, options, ncomm, send_pattern,
recv_pattern, duplicate_msg):
r"""Testing options."""
out = python_class.get_testing_options(**options)
out['kwargs'].update(ncomm=ncomm, pattern=send_pattern,
commtype='ForkComm')
out.setdefault('recv_kwargs', {})
out['recv_kwargs'].update(pattern=recv_pattern,
commtype='ForkComm')
out['msg'] = duplicate_msg(out['msg'])
return out
@pytest.fixture(scope="class")
def process_send_message(self, duplicate_msg):
r"""Factory for method to finalize messages for sending."""
def wrapped_process_send_message(obj):
return duplicate_msg(obj)
return wrapped_process_send_message
@pytest.fixture(scope="class")
def map_sent2recv(self, ncomm, send_pattern, recv_pattern):
r"""Factory for method to convert sent messages to received."""
def wrapped_map_sent2recv(obj):
r"""Convert a sent object into a received one."""
if (((send_pattern == 'scatter')
and isinstance(obj, list) and obj)):
single_obj = obj[0]
else:
single_obj = obj
if recv_pattern == 'gather':
if obj in [b'', []]:
return []
return [single_obj for _ in range(ncomm)]
return single_obj
return wrapped_map_sent2recv
@pytest.fixture(scope="class")
def n_msg_expected(self, ncomm, send_pattern, recv_pattern):
r"""Number of expected messages."""
if (((send_pattern in ['broadcast', 'scatter'])
and (recv_pattern == 'cycle'))):
return ncomm
return 1
def test_send_recv_eof_no_close(self, send_comm, recv_comm, do_send_recv):
r"""Test send/recv of EOF message with no close."""
recv_comm.close_on_eof_recv = False
for x in recv_comm.comm_list:
x.close_on_eof_recv = False
do_send_recv(send_comm, recv_comm,
send_params={'method': 'send_eof'})
def test_send_recv_filter_eof(self, run_once, filtered_comms, send_comm,
recv_comm, do_send_recv, timeout):
r"""Test send/recv of EOF with filter."""
do_send_recv(send_comm, recv_comm,
send_params={'method': 'send_eof'},
recv_params={'flag': False,
'kwargs': {'timeout': 2 * timeout}})
assert(recv_comm.is_closed)
def test_send_recv_filter_recv_filter(self, filtered_comms,
msg_filter_recv, send_comm,
recv_comm, polling_interval,
do_send_recv):
r"""Test send/recv with filter that blocks recv."""
do_send_recv(send_comm, recv_comm, msg_filter_recv,
recv_params={'message': recv_comm.empty_obj_recv,
# Don't need to skip since
# filter is evaluated after
# receipt for fork comm
'skip_wait': False,
'count': 1,
'kwargs': {'timeout': 10 * polling_interval}})
def test_send_recv_after_close(self, commtype, send_comm, recv_comm,
testing_options, ncomm):
r"""Test that opening twice dosn't cause errors and that send/recv
after close returns false."""
send_comm.open()
recv_comm.open()
if 'rmq' in commtype:
send_comm.bind()
recv_comm.bind()
send_comm.close()
recv_comm.close()
assert(send_comm.is_closed)
assert(recv_comm.is_closed)
flag = send_comm.send([testing_options['msg'] for _ in range(ncomm)])
assert(not flag)
flag, msg_recv = recv_comm.recv()
assert(not flag)
def test_async_gather(self, testing_options, send_pattern, recv_pattern,
send_comm, recv_comm, map_sent2recv, timeout):
r"""Test scatter-gather w/ intermittent send."""
if (send_pattern, recv_pattern) != ('scatter', 'gather'):
pytest.skip("Only valid for scatter/gather pattern")
test_msg = testing_options['msg']
send_comm.comm_list[0].send(test_msg[0])
flag, msg_recv = recv_comm.recv()
assert(flag)
assert(recv_comm.is_empty_recv(msg_recv))
for msg_send, comm in zip(test_msg[1:], send_comm.comm_list[1:]):
assert(comm.send(msg_send))
flag, msg_recv = recv_comm.recv(timeout=timeout)
assert(flag)
assert(msg_recv == map_sent2recv(test_msg))
class TestForkCommList(TestForkComm):
r"""Tests for ForkComm communication class with construction from address."""
@pytest.fixture(scope="class", autouse=True)
def send_pattern(self):
r"""Pattern in which to send messages to fork communicators."""
return 'broadcast'
@pytest.fixture(scope="class")
def testing_options(self, python_class, options, ncomm, send_pattern,
recv_pattern, duplicate_msg):
r"""Testing options."""
out = python_class.get_testing_options(**options)
out['kwargs'].update(ncomm=ncomm, pattern=send_pattern,
commtype='ForkComm')
out.setdefault('recv_kwargs', {})
out['recv_kwargs'].update(
pattern=recv_pattern, commtype='ForkComm',
# To force test of construction from addresses
comm_list=None)
out['msg'] = duplicate_msg(out['msg'])
return out
|
11453758
|
from flask import Flask
# Ozellikle POST metodunda gonderilen body icerigini almak icin isimize yarayacak
from flask import request
import jsonschema
from jsonschema import validate # JSON schema kontrolu icin kullanabiliriz
from flask import jsonify # JSONlastirma destegi icin
app = Flask(__name__)
# baslangic icin ornek kategori listemiz
categories = [
{'id': 1, 'name': 'Kitap', 'count': 15},
{'id': 2, 'name': 'Robotik Oyuncak', 'count': 5},
{'id': 3, 'name': 'DVD Film', 'count': 20},
{'id': 4, 'name': 'Bilgisayar', 'count': 10},
]
# JSON formatindaki bir category nesnesinin schema tanimi
categorySchema = {
"type": "object",
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"count": {"type": "number"},
},
}
# categories url path'i icin route tanimi ve fonksiyon
@app.route("/categories/")
def get_all_categories():
"""Tum kategorileri verir"""
return jsonify(categories)
# route tanimindaki id degerine gore bir kategori dondurulur
@app.route("/categories/<int:id>")
def get_category(id):
"""Id bazli kategoriyi verir"""
return jsonify([c for c in categories if c['id'] == id]) # basit bir sorgu ile id bazli kategoriyi bulup json'lastirip donduruyoruz.
@app.route("/categories", methods=['POST']) # HTTP Post talebini tanimladik
def add_category():
"""Yeni bir kategori ekler"""
content = request.get_json() # talebin govdesinden gelen JSON icerigini al
# print(content)
categories.append(content) # icerigi listeye ekle
return jsonify(content) # eklenen icerigi geri dondur
# category/id seklinde gelen HTTP PUT taleplerini fonksiyona yonlendiriyoruz
@app.route("/categories/<int:id>", methods=['PUT'])
def edit_category(id):
"""ID ile verilen kategoriyi gunceller"""
body = request.get_json() # talep govdesindeki json icerigini al
# category json schema'sina uyuyor mu?
# id'den category'yi bul
category = [c for c in categories if c['id'] == id]
if category: # category bulunduysa islemleri yap
# print(category)
category[0]['name'] = body['name']
category[0]['count'] = body['count']
return jsonify(body)
else:
return jsonify("{'result':'category not found'}")
# Uygulama calismaya basladiginda localhost 4446 nolu porttan hizmet verecek.
# Debug ozelligi de acik
if __name__ == '__main__':
app.run(host='localhost', port=4446, debug=True)
|
11453763
|
from PIL import Image, ImageDraw
import numpy as np
from glob import glob
from os import path
import csv
def tile_intersect_patch(x0, y0, sx0, sy0, x1, y1, sx1, sy1):
if x0 > x1 + sx1 or x1 > x0 + sx0:
return False
if y0 > y1 + sy1 or y1 > y0 + sy0:
return False
return True
def info2patch_xy(segmentation_polygon_folder, x, y, pw):
fns = []
mpp = 0.25
for fn in glob(path.join(segmentation_polygon_folder, '*.csv')):
# 100001_100001_4000_3162_0.2277_1-features.csv
px, py, pw_x, pw_y, mpp, _ = path.basename(fn).split('_')
px = int(px)
py = int(py)
pw_x = int(pw_x)
pw_y = int(pw_y)
mpp = float(mpp)
if tile_intersect_patch(px, py, pw_x, pw_y, x, y, pw, pw):
fns.append(fn)
return fns, x, y, pw, mpp
def patch_xy2mask(patch_xy, scale_to_40X=True):
if not patch_xy:
return None
poly_paths, x, y, pw, mpp = patch_xy
scale_f = (mpp * 4) if scale_to_40X else 1.0
pw = int(pw * scale_f)
mask = Image.fromarray(np.zeros((pw, pw), dtype=np.int16));
draw = ImageDraw.Draw(mask);
nuc_idx = 1
for poly_path in poly_paths:
with open(poly_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
coors = [float(n) for n in row["Polygon"][1:-1].split(':')]
for i in range(0, len(coors), 2):
coors[i] = scale_f * (coors[i] - x)
for i in range(1, len(coors), 2):
coors[i] = scale_f * (coors[i] - y)
coors += [coors[0], coors[1]]
if min(coors[0::2]) > pw or max(coors[0::2]) < 0 or \
min(coors[1::2]) > pw or max(coors[1::2]) < 0:
continue
draw.polygon(coors, fill=(nuc_idx))
nuc_idx += 1
return np.array(mask)
def extract_segmentation_mask(
segmentation_polygon_folder, x, y, patch_width, scale_to_40X=True):
'''
Extract segmentation mask
Args:
segmentation_polygon_folder: path to a segmentation output folder.
x: x coordinate of the patch you want to extract.
y: y coordinate of the patch.
patch_width: size of the patch.
Returns:
Instance-level mask as numpy array.
'''
patch_xy = info2patch_xy(
segmentation_polygon_folder, x, y, patch_width)
return patch_xy2mask(patch_xy, scale_to_40X)
|
11453825
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import (
CONF_BUS_VOLTAGE,
CONF_CURRENT,
CONF_ID,
CONF_POWER,
CONF_SHUNT_RESISTANCE,
CONF_SHUNT_VOLTAGE,
DEVICE_CLASS_VOLTAGE,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT,
UNIT_VOLT,
UNIT_AMPERE,
UNIT_WATT,
)
DEPENDENCIES = ["i2c"]
CONF_CHANNEL_1 = "channel_1"
CONF_CHANNEL_2 = "channel_2"
CONF_CHANNEL_3 = "channel_3"
ina3221_ns = cg.esphome_ns.namespace("ina3221")
INA3221Component = ina3221_ns.class_(
"INA3221Component", cg.PollingComponent, i2c.I2CDevice
)
INA3221_CHANNEL_SCHEMA = cv.Schema(
{
cv.Optional(CONF_BUS_VOLTAGE): sensor.sensor_schema(
unit_of_measurement=UNIT_VOLT,
accuracy_decimals=2,
device_class=DEVICE_CLASS_VOLTAGE,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_SHUNT_VOLTAGE): sensor.sensor_schema(
unit_of_measurement=UNIT_VOLT,
accuracy_decimals=2,
device_class=DEVICE_CLASS_VOLTAGE,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_CURRENT): sensor.sensor_schema(
unit_of_measurement=UNIT_AMPERE,
accuracy_decimals=2,
device_class=DEVICE_CLASS_CURRENT,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_POWER): sensor.sensor_schema(
unit_of_measurement=UNIT_WATT,
accuracy_decimals=2,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_SHUNT_RESISTANCE, default=0.1): cv.All(
cv.resistance, cv.Range(min=0.0, max=32.0)
),
}
)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(INA3221Component),
cv.Optional(CONF_CHANNEL_1): INA3221_CHANNEL_SCHEMA,
cv.Optional(CONF_CHANNEL_2): INA3221_CHANNEL_SCHEMA,
cv.Optional(CONF_CHANNEL_3): INA3221_CHANNEL_SCHEMA,
}
)
.extend(cv.polling_component_schema("60s"))
.extend(i2c.i2c_device_schema(0x40))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
for i, channel in enumerate([CONF_CHANNEL_1, CONF_CHANNEL_2, CONF_CHANNEL_3]):
if channel not in config:
continue
conf = config[channel]
if CONF_SHUNT_RESISTANCE in conf:
cg.add(var.set_shunt_resistance(i, conf[CONF_SHUNT_RESISTANCE]))
if CONF_BUS_VOLTAGE in conf:
sens = await sensor.new_sensor(conf[CONF_BUS_VOLTAGE])
cg.add(var.set_bus_voltage_sensor(i, sens))
if CONF_SHUNT_VOLTAGE in conf:
sens = await sensor.new_sensor(conf[CONF_SHUNT_VOLTAGE])
cg.add(var.set_shunt_voltage_sensor(i, sens))
if CONF_CURRENT in conf:
sens = await sensor.new_sensor(conf[CONF_CURRENT])
cg.add(var.set_current_sensor(i, sens))
if CONF_POWER in conf:
sens = await sensor.new_sensor(conf[CONF_POWER])
cg.add(var.set_power_sensor(i, sens))
|
11453835
|
import sys
sys.path.append('..')
import os
import scipy.io as scio
import numpy as np
import theano
import theano.tensor as T
import lasagne
import h5py
import shutil
import json
from time import time
from PIL import Image
from lib.data_utils import processing_img, convert_img_back, convert_img, Batch, shuffle, iter_data, ImgRescale, OneHot
from lib.theano_utils import floatX, sharedX
from lib.rng import py_rng, np_rng, t_rng
from models import models_uncond
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def create_G(loss_type=None, discriminator=None, lr=0.0002, b1=0.5, ngf=64):
noise = T.matrix('noise')
generator = models_uncond.build_generator_64(noise,ngf=ngf)
Tgimgs = lasagne.layers.get_output(generator)
Tfake_out = lasagne.layers.get_output(discriminator, Tgimgs)
if loss_type == 'trickLogD':
generator_loss = lasagne.objectives.binary_crossentropy(Tfake_out, 1).mean()
elif loss_type == 'minimax':
generator_loss = -lasagne.objectives.binary_crossentropy(Tfake_out, 0).mean()
elif loss_type == 'ls':
generator_loss = T.mean(T.sqr((Tfake_out - 1)))
generator_params = lasagne.layers.get_all_params(generator, trainable=True)
updates_g = lasagne.updates.adam(generator_loss, generator_params, learning_rate=lr, beta1=b1)
train_g = theano.function([noise],
generator_loss,
updates=updates_g)
gen_fn = theano.function([noise],
lasagne.layers.get_output(generator,
deterministic=True))
return train_g, gen_fn, generator
def main():
# Parameters
data_path = '../datasets/'
task = 'bedroom'
name = '64'
start = 0
stop = 3032640
input_nc = 3
loss_type = ['trickLogD','minimax','ls']
nloss = 3
shuffle_ = True
batchSize = 32
fineSize = 64
flip = True
ncandi = 1 # # of survived childern
kD = 3 # # of discrim updates for each gen update
kG = 1 # # of discrim updates for each gen update
ntf = batchSize*kD
b1 = 0.5 # momentum term of adam
nz = 100 # # of dim for Z
ngf = 128 # # of gen filters in first conv layer
ndf = 128 # # of discrim filters in first conv layer
niter = 6 # # of iter at starting learning rate
lr = 0.0002 # initial learning rate for adam G
lrd = 0.0002 # initial learning rate for adam D
beta = 0.001 # the hyperparameter that balance fitness score
GP_norm = False # if use gradients penalty on discriminator
LAMBDA = 2. # hyperparameter of GP
save_freq = 5000
show_freq = 500
begin_save = 0
# Load the dataset
print("Loading data...")
f = h5py.File(data_path+'bedroom_train_64.hdf5','r')
trX = f['data']
ids = range(start, stop)
################## MODEL D #######################
print("Building model and compiling functions...")
# Prepare Theano variables for inputs and targets
real_imgs = T.tensor4('real_imgs')
fake_imgs = T.tensor4('fake_imgs')
discriminator = models_uncond.build_discriminator_64(ndf=ndf)
# Create expression for passing real data through the discriminator
real_out = lasagne.layers.get_output(discriminator, real_imgs)
# Create expression for passing fake data through the discriminator
fake_out = lasagne.layers.get_output(discriminator, fake_imgs)
# Create loss expressions
discriminator_loss = (lasagne.objectives.binary_crossentropy(real_out, 1)
+ lasagne.objectives.binary_crossentropy(fake_out, 0)).mean()
# Gradients penalty norm
if GP_norm is True:
alpha = t_rng.uniform((batchSize,1,1,1), low=0.,high=1.)
differences = fake_imgs - real_imgs
interpolates = real_imgs + (alpha*differences)
gradients = theano.grad(lasagne.layers.get_output(discriminator, interpolates).sum(), wrt=interpolates)
slopes = T.sqrt(T.sum(T.sqr(gradients), axis=(1,2,3)))
gradient_penalty = T.mean((slopes-1.)**2)
D_loss = discriminator_loss + LAMBDA*gradient_penalty
b1_d = 0.
else:
D_loss = discriminator_loss
b1_d = b1
# Create update expressions for training
discriminator_params = lasagne.layers.get_all_params(discriminator, trainable=True)
lrtd = theano.shared(lasagne.utils.floatX(lrd))
updates_d = lasagne.updates.adam(
D_loss, discriminator_params, learning_rate=lrtd, beta1=b1_d)
lrt = theano.shared(lasagne.utils.floatX(lr))
# Diversity fitnees
Fd = theano.gradient.grad(discriminator_loss, discriminator_params)
Fd_score = beta*T.log(sum(T.sum(T.sqr(x)) for x in Fd))
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_d = theano.function([real_imgs, fake_imgs],
discriminator_loss,
updates=updates_d)
# Compile another function generating some data
disft_fn = theano.function([real_imgs,fake_imgs],
[(real_out).mean(),
(fake_out).mean(),
Fd_score])
# Finally, launch the training loop.
print("Starting training...")
desc = task + '_' + name
print desc
if not os.path.isdir('logs'):
os.mkdir(os.path.join('logs'))
f_log = open('logs/%s.ndjson'%desc, 'wb')
if not os.path.isdir('samples'):
os.mkdir(os.path.join('samples/'))
if not os.path.isdir('samples/'+desc):
os.mkdir(os.path.join('samples/',desc))
if not os.path.isdir('models'):
os.mkdir(os.path.join('models/'))
if not os.path.isdir('models/'+desc):
os.mkdir(os.path.join('models/',desc))
gen_new_params = []
n_updates = 0
# We iterate over epochs:
for epoch in range(niter):
if shuffle_ is True:
ids = shuffle(ids)
for index_ in iter_data(ids, size=batchSize*kD):
index = sorted(index_)
xmb = trX[index,:,:,:]
xmb = Batch(xmb,fineSize,input_nc,flip=flip)
xmb = processing_img(xmb, center=True, scale=True, convert=False)
# For testing right rate
sample_id = np_rng.randint(0,stop-ncandi*ntf,1)[0]
sample_xmb = floatX(trX[sample_id:sample_id+ncandi*ntf,:,:,:])
sample_xmb = processing_img(sample_xmb, center=True, scale=True, convert=False)
# initial G cluster
if epoch + n_updates == 0:
for can_i in range(0,ncandi):
train_g, gen_fn, generator = create_G(
loss_type=loss_type[can_i%nloss],
discriminator=discriminator, lr=lr, b1=b1, ngf=ngf)
for _ in range(0,kG):
zmb = floatX(np_rng.uniform(-1., 1., size=(batchSize, nz)))
cost = train_g(zmb)
sample_zmb = floatX(np_rng.uniform(-1., 1., size=(ntf, nz)))
gen_imgs = gen_fn(sample_zmb)
gen_new_params.append(lasagne.layers.get_all_param_values(generator))
if can_i == 0:
g_imgs_old=gen_imgs
fmb = gen_imgs[0:batchSize/ncandi*kD,:,:,:]
else:
g_imgs_old = np.append(g_imgs_old,gen_imgs,axis=0)
fmb = np.append(fmb,gen_imgs[0:batchSize/ncandi*kD,:,:,:],axis=0)
#print gen_new_params
# MODEL G
noise = T.matrix('noise')
generator = models_uncond.build_generator_64(noise,ngf=ngf)
Tgimgs = lasagne.layers.get_output(generator)
Tfake_out = lasagne.layers.get_output(discriminator, Tgimgs)
g_loss_logD = lasagne.objectives.binary_crossentropy(Tfake_out, 1).mean()
g_loss_minimax = -lasagne.objectives.binary_crossentropy(Tfake_out, 0).mean()
g_loss_ls = T.mean(T.sqr((Tfake_out - 1)))
g_params = lasagne.layers.get_all_params(generator, trainable=True)
up_g_logD = lasagne.updates.adam(g_loss_logD, g_params, learning_rate=lrt, beta1=b1)
up_g_minimax = lasagne.updates.adam(g_loss_minimax, g_params, learning_rate=lrt, beta1=b1)
up_g_ls = lasagne.updates.adam(g_loss_ls, g_params, learning_rate=lrt, beta1=b1)
train_g = theano.function([noise],g_loss_logD,updates=up_g_logD)
train_g_minimax = theano.function([noise],g_loss_minimax,updates=up_g_minimax)
train_g_ls = theano.function([noise],g_loss_ls,updates=up_g_ls)
gen_fn = theano.function([noise], lasagne.layers.get_output(
generator,deterministic=True))
else:
gen_old_params = gen_new_params
for can_i in range(0,ncandi):
for type_i in range(0,nloss):
lasagne.layers.set_all_param_values(generator, gen_old_params[can_i])
if loss_type[type_i] == 'trickLogD':
for _ in range(0,kG):
zmb = floatX(np_rng.uniform(-1., 1., size=(batchSize, nz)))
cost = train_g(zmb)
elif loss_type[type_i] == 'minimax':
for _ in range(0,kG):
zmb = floatX(np_rng.uniform(-1., 1., size=(batchSize, nz)))
cost = train_g_minimax(zmb)
elif loss_type[type_i] == 'ls':
for _ in range(0,kG):
zmb = floatX(np_rng.uniform(-1., 1., size=(batchSize, nz)))
cost = train_g_ls(zmb)
sample_zmb = floatX(np_rng.uniform(-1., 1., size=(ntf, nz)))
gen_imgs = gen_fn(sample_zmb)
_, fr_score, fd_score = disft_fn(sample_xmb[0:ntf],gen_imgs)
fit = fr_score - fd_score
if can_i*nloss + type_i < ncandi:
idx = can_i*nloss + type_i
gen_new_params[idx]=lasagne.layers.get_all_param_values(generator)
fitness[idx]=fit
fake_rate[idx]=fr_score
g_imgs_old[idx*ntf:(idx+1)*ntf,:,:,:]=gen_imgs
fmb[idx*batchSize/ncandi*kD:(idx+1)*batchSize/ncandi*kD,:,:,:] = \
gen_imgs[0:batchSize/ncandi*kD,:,:,:]
else:
fit_com = fitness - fit
if min(fit_com) < 0:
ids_replace = np.where(fit_com==min(fit_com))
idr = ids_replace[0][0]
fitness[idr]=fit
fake_rate[idr]=fr_score
gen_new_params[idr] = lasagne.layers.get_all_param_values(generator)
g_imgs_old[idr*ntf:(idr+1)*ntf,:,:,:]=gen_imgs
fmb[idr*batchSize/ncandi*kD:(idr+1)*batchSize/ncandi*kD,:,:,:] = \
gen_imgs[0:batchSize/ncandi*kD,:,:,:]
print fake_rate, fitness
f_log.write(str(fake_rate) + ' '+str(fd_score) +' ' + str(fitness)+ '\n')
# train D
for xreal,xfake in iter_data(xmb, shuffle(fmb), size=batchSize):
cost = train_d(xreal, xfake)
for i in range(0, ncandi):
xfake = g_imgs_old[i*ntf:(i+1)*ntf,:,:,:]
xreal = sample_xmb[i*ntf:(i+1)*ntf,:,:,:]
tr, fr, fd = disft_fn(xreal,xfake)
if i == 0:
fake_rate = np.array([fr])
fitness = np.array([0.])
real_rate = np.array([tr])
FDL = np.array([fd])
else:
fake_rate = np.append(fake_rate,fr)
fitness = np.append(fitness,[0.])
real_rate = np.append(real_rate,tr)
FDL = np.append(FDL,fd)
print fake_rate, FDL
print (n_updates, epoch,real_rate.mean())
n_updates += 1
f_log.write(str(fake_rate)+' '+str(FDL)+ '\n'+ str(epoch)+' '+str(n_updates)+' '+str(real_rate.mean())+'\n')
f_log.flush()
if n_updates%show_freq == 0:
blank_image = Image.new("RGB",(fineSize*8+9,fineSize*8+9))
for i in range(8):
for ii in range(8):
img = g_imgs_old[i*8+ii,:,:,:]
img = ImgRescale(img, center=True, scale=True, convert_back=True)
blank_image.paste(Image.fromarray(img),(ii*fineSize+ii+1,i*fineSize+i+1))
blank_image.save('samples/%s/%s_%d.png'%(desc,desc,n_updates/save_freq))
if n_updates%save_freq == 0 and epoch > begin_save - 1:
# Optionally, you could now dump the network weights to a file like this:
np.savez('models/%s/gen_%d.npz'%(desc,n_updates/save_freq), *lasagne.layers.get_all_param_values(generator))
np.savez('models/%s/dis_%d.npz'%(desc,n_updates/save_freq), *lasagne.layers.get_all_param_values(discriminator))
if __name__ == '__main__':
#if ('--help' in sys.argv) or ('-h' in sys.argv):
# print("Trains a DCGAN on MNIST using Lasagne.")
# print("Usage: %s [EPOCHS]" % sys.argv[0])
# print()
# print("EPOCHS: number of training epochs to perform (default: 100)")
#else:
# kwargs = {}
# if len(sys.argv) > 1:
# kwargs['num_epochs'] = int(sys.argv[1])
main()
|
11453890
|
from nltk import word_tokenize
from marmot.features.feature_extractor import FeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
class PseudoReferenceFeatureExtractor(FeatureExtractor):
'''
A feature that extracts the pseudo-reference feature
for pseudo-references provided in a file
(as an alternative to GoogleTranslateFeatureExtractor)
'''
def __init__(self, ref_file):
self.pseudo_references = []
for line in open(ref_file):
self.pseudo_references.append(word_tokenize(line[:-1].decode('utf-8')))
def get_features(self, context_obj):
if 'sentence_id' not in context_obj:
raise NoDataError('sentence_id', context_obj, 'PseudoReferenceFeatureExtractor')
out = 1 if context_obj['token'] in self.pseudo_references[context_obj['sentence_id']] else 0
return [out]
def get_feature_names(self):
return ["pseudo-reference"]
|
11453921
|
from docs_snippets_crag.concepts.assets.materialization_jobs import my_user_model_job
def test_pipelines_compile_and_execute():
jobs = [my_user_model_job]
for job in jobs:
result = job.execute_in_process()
assert result.success
|
11453937
|
from pathlib import Path
from hyperstyle.src.python.review.inspectors.inspector_type import InspectorType
from hyperstyle.src.python.review.inspectors.issue import CodeIssue, IssueDifficulty, IssueType
from hyperstyle.src.python.review.reviewers.utils.issues_filter import filter_duplicate_issues
def test_filter_duplicate_issues_when_single_inspector() -> None:
issues = [
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
inspector_type=InspectorType.FLAKE8,
type=IssueType.CODE_STYLE,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=11,
description='',
inspector_type=InspectorType.FLAKE8,
type=IssueType.CODE_STYLE,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=11,
description='',
inspector_type=InspectorType.FLAKE8,
type=IssueType.CODE_STYLE,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=11,
description='',
type=IssueType.CODE_STYLE,
inspector_type=InspectorType.FLAKE8,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
]
filtered_issues = filter_duplicate_issues(issues)
assert set(filtered_issues) == set(issues)
def test_filter_duplicate_issues_when_several_inspectors() -> None:
issues = [
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
inspector_type=InspectorType.PYLINT,
column_no=1,
origin_class='',
type=IssueType.COMPLEXITY,
difficulty=IssueDifficulty.HARD,
),
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
inspector_type=InspectorType.FLAKE8,
column_no=1,
origin_class='',
type=IssueType.COMPLEXITY,
difficulty=IssueDifficulty.HARD,
),
CodeIssue(
file_path=Path('code.py'),
line_no=11,
description='',
type=IssueType.CODE_STYLE,
inspector_type=InspectorType.PYLINT,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=11,
description='',
type=IssueType.BEST_PRACTICES,
inspector_type=InspectorType.FLAKE8,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.MEDIUM,
),
]
filtered_issues = filter_duplicate_issues(issues)
assert set(filtered_issues) == {issues[0], issues[2], issues[3]}
def test_filter_duplicate_issues_when_several_issues_in_line_no() -> None:
issues = [
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
type=IssueType.CODE_STYLE,
inspector_type=InspectorType.PYLINT,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
type=IssueType.CODE_STYLE,
inspector_type=InspectorType.FLAKE8,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
type=IssueType.CODE_STYLE,
inspector_type=InspectorType.FLAKE8,
column_no=1,
origin_class='',
difficulty=IssueDifficulty.EASY,
),
CodeIssue(
file_path=Path('code.py'),
line_no=10,
description='',
inspector_type=InspectorType.FLAKE8,
column_no=1,
origin_class='',
type=IssueType.COMPLEXITY,
difficulty=IssueDifficulty.HARD,
),
]
filtered_issues = filter_duplicate_issues(issues)
assert set(filtered_issues) == {issues[1], issues[2], issues[3]}
|
11454033
|
import time
import logging
from botocore.exceptions import ClientError
import boto3
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def codepipeline_success(job_id):
"""
Puts CodePipeline Success Result
"""
try:
codepipeline = boto3.client('codepipeline')
codepipeline.put_job_success_result(jobId=job_id)
LOGGER.info('===SUCCESS===')
return True
except ClientError as err:
LOGGER.error("Failed to PutJobSuccessResult for CodePipeline!\n%s", err)
return False
def codepipeline_failure(job_id, message):
try:
codepipeline = boto3.client('codepipeline')
codepipeline.put_job_failure_result(
jobId=job_id,
failureDetails={'type': 'JobFailed', 'message': message}
)
LOGGER.info('===FAILURE===')
return True
except ClientError as err:
LOGGER.error("Failed to PutJobFailureResult for CodePipeline!\n%s", err)
return False
def lambda_handler(event, context):
LOGGER.info(event)
try:
job_id = event['CodePipeline.job']['id']
distId = event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters']
client = boto3.client('cloudfront')
invalidation = client.create_invalidation(DistributionId=distId,
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': ['/*']
},
'CallerReference': str(time.time())
})
codepipeline_success(job_id)
except KeyError as err:
LOGGER.error("Could not retrieve CodePipeline Job ID!\n%s", err)
return False
codepipeline_failure(job_id, err)
|
11454048
|
import numpy as np
from pytheas import Scatt3D
# import numpy.testing as npt
from testutils import *
def model(verbose=False):
fem = Scatt3D()
fem.rm_tmp_dir()
fem.eps_des = 2 - 0 * 1j
fem.hx_des = 3
fem.hy_des = 3
fem.hz_des = 0.5
fem.hx_box = fem.hx_des * 1.1
fem.hy_box = fem.hy_des * 1.1
fem.hz_box = fem.hz_des * 3
fem.R_sph = 0.05
fem.z_sph = -fem.hz_des / 2 * 1.2 - fem.R_sph
fem.parmesh = 2
fem.parmesh_des = 2
fem.parmesh_pml = fem.parmesh * 2 / 3
fem.parmesh_host = 3
fem.recomb_des = False
fem.matprop_pattern = [1.4, 2 - 0.02 * 1j, 3 - 0.01j] # refractive index values
if verbose:
fem.getdp_verbose = 4
fem.gmsh_verbose = 4
fem.python_verbose = 1
fem.initialize()
fem.make_mesh()
return fem
def test_scatt3D(verbose=False):
fem = model(verbose=verbose)
mat = pattern(xsym=True, ysym=True)
fem.register_pattern(mat.pattern, mat._threshold_val)
fem.compute_solution()
return fem
|
11454052
|
import pytest
from unittest.mock import Mock
from collections import OrderedDict
from nbformat.v4 import new_code_cell
from .. import translators
from ..exceptions import PapermillException
from ..models import Parameter
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, '{"foo": "bar"}'),
({"foo": '"bar"'}, '{"foo": "\\"bar\\""}'),
({"foo": ["bar"]}, '{"foo": ["bar"]}'),
({"foo": {"bar": "baz"}}, '{"foo": {"bar": "baz"}}'),
({"foo": {"bar": '"baz"'}}, '{"foo": {"bar": "\\"baz\\""}}'),
(["foo"], '["foo"]'),
(["foo", '"bar"'], '["foo", "\\"bar\\""]'),
([{"foo": "bar"}], '[{"foo": "bar"}]'),
([{"foo": '"bar"'}], '[{"foo": "\\"bar\\""}]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(float('nan'), "float('nan')"),
(float('-inf'), "float('-inf')"),
(float('inf'), "float('inf')"),
(True, 'True'),
(False, 'False'),
(None, 'None'),
],
)
def test_translate_type_python(test_input, expected):
assert translators.PythonTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
({"foo": True}, '# Parameters\nfoo = True\n'),
({"foo": 5}, '# Parameters\nfoo = 5\n'),
({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = {"bar": "baz"}\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = ["buz"]\n',
),
],
)
def test_translate_codify_python(parameters, expected):
assert translators.PythonTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
)
def test_translate_comment_python(test_input, expected):
assert translators.PythonTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("a = 2", [Parameter("a", "None", "2", "")]),
("a: int = 2", [Parameter("a", "int", "2", "")]),
("a = 2 # type:int", [Parameter("a", "int", "2", "")]),
("a = False # Nice variable a", [Parameter("a", "None", "False", "Nice variable a")]),
("a: float = 2.258 # type: int Nice variable a", [Parameter("a", "float", "2.258", "Nice variable a")]), # noqa
(
"a = 'this is a string' # type: int Nice variable a",
[Parameter("a", "int", "'this is a string'", "Nice variable a")]
),
(
"a: List[str] = ['this', 'is', 'a', 'string', 'list'] # Nice variable a",
[Parameter("a", "List[str]", "['this', 'is', 'a', 'string', 'list']", "Nice variable a")]
),
(
"a: List[str] = [\n 'this', # First\n 'is',\n 'a',\n 'string',\n 'list' # Last\n] # Nice variable a", # noqa
[Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a")]
),
(
"a: List[str] = [\n 'this',\n 'is',\n 'a',\n 'string',\n 'list'\n] # Nice variable a", # noqa
[Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a")]
),
(
"""a: List[str] = [
'this', # First
'is',
'a',
'string',
'list' # Last
] # Nice variable a
b: float = -2.3432 # My b variable
""",
[
Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a"),
Parameter("b", "float", "-2.3432", "My b variable"),
]
),
]
)
def test_inspect_python(test_input, expected):
cell = new_code_cell(source=test_input)
assert translators.PythonTranslator.inspect(cell) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'list("foo" = "bar")'),
({"foo": '"bar"'}, 'list("foo" = "\\"bar\\"")'),
({"foo": ["bar"]}, 'list("foo" = list("bar"))'),
({"foo": {"bar": "baz"}}, 'list("foo" = list("bar" = "baz"))'),
({"foo": {"bar": '"baz"'}}, 'list("foo" = list("bar" = "\\"baz\\""))'),
(["foo"], 'list("foo")'),
(["foo", '"bar"'], 'list("foo", "\\"bar\\"")'),
([{"foo": "bar"}], 'list(list("foo" = "bar"))'),
([{"foo": '"bar"'}], 'list(list("foo" = "\\"bar\\""))'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(True, 'TRUE'),
(False, 'FALSE'),
(None, 'NULL'),
],
)
def test_translate_type_r(test_input, expected):
assert translators.RTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
)
def test_translate_comment_r(test_input, expected):
assert translators.RTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
({"foo": True}, '# Parameters\nfoo = TRUE\n'),
({"foo": 5}, '# Parameters\nfoo = 5\n'),
({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\nfoo = list("bar", "baz")\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = list("bar" = "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = list("buz")\n',
),
# Underscores remove
({"___foo": 5}, '# Parameters\nfoo = 5\n'),
],
)
def test_translate_codify_r(parameters, expected):
assert translators.RTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'Map("foo" -> "bar")'),
({"foo": '"bar"'}, 'Map("foo" -> "\\"bar\\"")'),
({"foo": ["bar"]}, 'Map("foo" -> Seq("bar"))'),
({"foo": {"bar": "baz"}}, 'Map("foo" -> Map("bar" -> "baz"))'),
({"foo": {"bar": '"baz"'}}, 'Map("foo" -> Map("bar" -> "\\"baz\\""))'),
(["foo"], 'Seq("foo")'),
(["foo", '"bar"'], 'Seq("foo", "\\"bar\\"")'),
([{"foo": "bar"}], 'Seq(Map("foo" -> "bar"))'),
([{"foo": '"bar"'}], 'Seq(Map("foo" -> "\\"bar\\""))'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(2147483648, '2147483648L'),
(-2147483649, '-2147483649L'),
(True, 'true'),
(False, 'false'),
(None, 'None'),
],
)
def test_translate_type_scala(test_input, expected):
assert translators.ScalaTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[("", '//'), ("foo", '// foo'), ("['best effort']", "// ['best effort']")],
)
def test_translate_comment_scala(test_input, expected):
assert translators.ScalaTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[
("foo", '""', 'val foo = ""'),
("foo", '"bar"', 'val foo = "bar"'),
("foo", 'Map("foo" -> "bar")', 'val foo = Map("foo" -> "bar")'),
],
)
def test_translate_assign_scala(input_name, input_value, expected):
assert translators.ScalaTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '// Parameters\nval foo = "bar"\n'),
({"foo": True}, '// Parameters\nval foo = true\n'),
({"foo": 5}, '// Parameters\nval foo = 5\n'),
({"foo": 1.1}, '// Parameters\nval foo = 1.1\n'),
({"foo": ['bar', 'baz']}, '// Parameters\nval foo = Seq("bar", "baz")\n'),
({"foo": {'bar': 'baz'}}, '// Parameters\nval foo = Map("bar" -> "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'// Parameters\nval foo = "bar"\nval baz = Seq("buz")\n',
),
],
)
def test_translate_codify_scala(parameters, expected):
assert translators.ScalaTranslator.codify(parameters) == expected
# C# section
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'new Dictionary<string,Object>{ { "foo" , "bar" } }'),
({"foo": '"bar"'}, 'new Dictionary<string,Object>{ { "foo" , "\\"bar\\"" } }'),
(["foo"], 'new [] { "foo" }'),
(["foo", '"bar"'], 'new [] { "foo", "\\"bar\\"" }'),
([{"foo": "bar"}], 'new [] { new Dictionary<string,Object>{ { "foo" , "bar" } } }'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(2147483648, '2147483648L'),
(-2147483649, '-2147483649L'),
(True, 'true'),
(False, 'false'),
],
)
def test_translate_type_csharp(test_input, expected):
assert translators.CSharpTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[("", '//'), ("foo", '// foo'), ("['best effort']", "// ['best effort']")],
)
def test_translate_comment_csharp(test_input, expected):
assert translators.CSharpTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[("foo", '""', 'var foo = "";'), ("foo", '"bar"', 'var foo = "bar";')],
)
def test_translate_assign_csharp(input_name, input_value, expected):
assert translators.CSharpTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '// Parameters\nvar foo = "bar";\n'),
({"foo": True}, '// Parameters\nvar foo = true;\n'),
({"foo": 5}, '// Parameters\nvar foo = 5;\n'),
({"foo": 1.1}, '// Parameters\nvar foo = 1.1;\n'),
({"foo": ['bar', 'baz']}, '// Parameters\nvar foo = new [] { "bar", "baz" };\n'),
(
{"foo": {'bar': 'baz'}},
'// Parameters\nvar foo = new Dictionary<string,Object>{ { "bar" , "baz" } };\n',
),
],
)
def test_translate_codify_csharp(parameters, expected):
assert translators.CSharpTranslator.codify(parameters) == expected
# Powershell section
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{`"foo`": `"bar`"}"'),
({"foo": "bar"}, '@{"foo" = "bar"}'),
({"foo": '"bar"'}, '@{"foo" = "`"bar`""}'),
({"foo": ["bar"]}, '@{"foo" = @("bar")}'),
({"foo": {"bar": "baz"}}, '@{"foo" = @{"bar" = "baz"}}'),
({"foo": {"bar": '"baz"'}}, '@{"foo" = @{"bar" = "`"baz`""}}'),
(["foo"], '@("foo")'),
(["foo", '"bar"'], '@("foo", "`"bar`"")'),
([{"foo": "bar"}], '@(@{"foo" = "bar"})'),
([{"foo": '"bar"'}], '@(@{"foo" = "`"bar`""})'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(float('nan'), "[double]::NaN"),
(float('-inf'), "[double]::NegativeInfinity"),
(float('inf'), "[double]::PositiveInfinity"),
(True, '$True'),
(False, '$False'),
(None, '$Null'),
],
)
def test_translate_type_powershell(test_input, expected):
assert translators.PowershellTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\n$foo = "bar"\n'),
({"foo": True}, '# Parameters\n$foo = $True\n'),
({"foo": 5}, '# Parameters\n$foo = 5\n'),
({"foo": 1.1}, '# Parameters\n$foo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\n$foo = @("bar", "baz")\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\n$foo = @{"bar" = "baz"}\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\n$foo = "bar"\n$baz = @("buz")\n',
),
],
)
def test_translate_codify_powershell(parameters, expected):
assert translators.PowershellTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[("foo", '""', '$foo = ""'), ("foo", '"bar"', '$foo = "bar"')],
)
def test_translate_assign_powershell(input_name, input_value, expected):
assert translators.PowershellTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
)
def test_translate_comment_powershell(test_input, expected):
assert translators.PowershellTranslator.comment(test_input) == expected
# F# section
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, '[ ("foo", "bar" :> IComparable) ] |> Map.ofList'),
({"foo": '"bar"'}, '[ ("foo", "\\"bar\\"" :> IComparable) ] |> Map.ofList'),
(["foo"], '[ "foo" ]'),
(["foo", '"bar"'], '[ "foo"; "\\"bar\\"" ]'),
([{"foo": "bar"}], '[ [ ("foo", "bar" :> IComparable) ] |> Map.ofList ]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(2147483648, '2147483648L'),
(-2147483649, '-2147483649L'),
(True, 'true'),
(False, 'false'),
],
)
def test_translate_type_fsharp(test_input, expected):
assert translators.FSharpTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[("", '(* *)'), ("foo", '(* foo *)'), ("['best effort']", "(* ['best effort'] *)")],
)
def test_translate_comment_fsharp(test_input, expected):
assert translators.FSharpTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[("foo", '""', 'let foo = ""'), ("foo", '"bar"', 'let foo = "bar"')],
)
def test_translate_assign_fsharp(input_name, input_value, expected):
assert translators.FSharpTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '(* Parameters *)\nlet foo = "bar"\n'),
({"foo": True}, '(* Parameters *)\nlet foo = true\n'),
({"foo": 5}, '(* Parameters *)\nlet foo = 5\n'),
({"foo": 1.1}, '(* Parameters *)\nlet foo = 1.1\n'),
({"foo": ['bar', 'baz']}, '(* Parameters *)\nlet foo = [ "bar"; "baz" ]\n'),
(
{"foo": {'bar': 'baz'}},
'(* Parameters *)\nlet foo = [ ("bar", "baz" :> IComparable) ] |> Map.ofList\n',
),
],
)
def test_translate_codify_fsharp(parameters, expected):
assert translators.FSharpTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'Dict("foo" => "bar")'),
({"foo": '"bar"'}, 'Dict("foo" => "\\"bar\\"")'),
({"foo": ["bar"]}, 'Dict("foo" => ["bar"])'),
({"foo": {"bar": "baz"}}, 'Dict("foo" => Dict("bar" => "baz"))'),
({"foo": {"bar": '"baz"'}}, 'Dict("foo" => Dict("bar" => "\\"baz\\""))'),
(["foo"], '["foo"]'),
(["foo", '"bar"'], '["foo", "\\"bar\\""]'),
([{"foo": "bar"}], '[Dict("foo" => "bar")]'),
([{"foo": '"bar"'}], '[Dict("foo" => "\\"bar\\"")]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(True, 'true'),
(False, 'false'),
(None, 'nothing'),
],
)
def test_translate_type_julia(test_input, expected):
assert translators.JuliaTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
({"foo": True}, '# Parameters\nfoo = true\n'),
({"foo": 5}, '# Parameters\nfoo = 5\n'),
({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = Dict("bar" => "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = ["buz"]\n',
),
],
)
def test_translate_codify_julia(parameters, expected):
assert translators.JuliaTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ('["best effort"]', '# ["best effort"]')]
)
def test_translate_comment_julia(test_input, expected):
assert translators.JuliaTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{""foo"": ""bar""}"'),
({1: "foo"}, 'containers.Map({\'1\'}, {"foo"})'),
({1.0: "foo"}, 'containers.Map({\'1.0\'}, {"foo"})'),
({None: "foo"}, 'containers.Map({\'None\'}, {"foo"})'),
({True: "foo"}, 'containers.Map({\'True\'}, {"foo"})'),
({"foo": "bar"}, 'containers.Map({\'foo\'}, {"bar"})'),
({"foo": '"bar"'}, 'containers.Map({\'foo\'}, {"""bar"""})'),
({"foo": ["bar"]}, 'containers.Map({\'foo\'}, {{"bar"}})'),
(
{"foo": {"bar": "baz"}},
'containers.Map({\'foo\'}, {containers.Map({\'bar\'}, {"baz"})})',
),
(
{"foo": {"bar": '"baz"'}},
'containers.Map({\'foo\'}, {containers.Map({\'bar\'}, {"""baz"""})})',
),
(["foo"], '{"foo"}'),
(["foo", '"bar"'], '{"foo", """bar"""}'),
([{"foo": "bar"}], '{containers.Map({\'foo\'}, {"bar"})}'),
([{"foo": '"bar"'}], '{containers.Map({\'foo\'}, {"""bar"""})}'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(True, 'true'),
(False, 'false'),
(None, 'NaN'),
],
)
def test_translate_type_matlab(test_input, expected):
assert translators.MatlabTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '% Parameters\nfoo = "bar";\n'),
({"foo": True}, '% Parameters\nfoo = true;\n'),
({"foo": 5}, '% Parameters\nfoo = 5;\n'),
({"foo": 1.1}, '% Parameters\nfoo = 1.1;\n'),
({"foo": ['bar', 'baz']}, '% Parameters\nfoo = {"bar", "baz"};\n'),
({"foo": {'bar': 'baz'}}, '% Parameters\nfoo = containers.Map({\'bar\'}, {"baz"});\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'% Parameters\nfoo = "bar";\nbaz = {"buz"};\n',
),
],
)
def test_translate_codify_matlab(parameters, expected):
assert translators.MatlabTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '%'), ("foo", '% foo'), ("['best effort']", "% ['best effort']")]
)
def test_translate_comment_matlab(test_input, expected):
assert translators.MatlabTranslator.comment(test_input) == expected
def test_find_translator_with_exact_kernel_name():
my_new_kernel_translator = Mock()
my_new_language_translator = Mock()
translators.papermill_translators.register("my_new_kernel", my_new_kernel_translator)
translators.papermill_translators.register("my_new_language", my_new_language_translator)
assert (
translators.papermill_translators.find_translator("my_new_kernel", "my_new_language")
is my_new_kernel_translator
)
def test_find_translator_with_exact_language():
my_new_language_translator = Mock()
translators.papermill_translators.register("my_new_language", my_new_language_translator)
assert (
translators.papermill_translators.find_translator("unregistered_kernel", "my_new_language")
is my_new_language_translator
)
def test_find_translator_with_no_such_kernel_or_language():
with pytest.raises(PapermillException):
translators.papermill_translators.find_translator(
"unregistered_kernel", "unregistered_language"
)
def test_translate_uses_str_representation_of_unknown_types():
class FooClass:
def __str__(self):
return "foo"
obj = FooClass()
assert translators.Translator.translate(obj) == '"foo"'
def test_translator_must_implement_translate_dict():
class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
MyNewTranslator.translate_dict({"foo": "bar"})
def test_translator_must_implement_translate_list():
class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
MyNewTranslator.translate_list(["foo", "bar"])
def test_translator_must_implement_comment():
class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
MyNewTranslator.comment("foo")
|
11454076
|
import torch
from mmseg.models.utils import DropPath
def test_drop_path():
# zero drop
layer = DropPath()
# input NLC format feature
x = torch.randn((1, 16, 32))
layer(x)
# input NLHW format feature
x = torch.randn((1, 32, 4, 4))
layer(x)
# non-zero drop
layer = DropPath(0.1)
# input NLC format feature
x = torch.randn((1, 16, 32))
layer(x)
# input NLHW format feature
x = torch.randn((1, 32, 4, 4))
layer(x)
|
11454125
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Canny
def Canny(img):
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# Gaussian filter for grayscale
def gaussian_filter(img, K_size=3, sigma=1.3):
if len(img.shape) == 3:
H, W, C = img.shape
gray = False
else:
img = np.expand_dims(img, axis=-1)
H, W, C = img.shape
gray = True
## Zero padding
pad = K_size // 2
out = np.zeros([H + pad * 2, W + pad * 2, C], dtype=np.float)
out[pad : pad + H, pad : pad + W] = img.copy().astype(np.float)
## prepare Kernel
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y + pad, x + pad] = np.exp( - (x ** 2 + y ** 2) / (2 * sigma * sigma))
#K /= (sigma * np.sqrt(2 * np.pi))
K /= (2 * np.pi * sigma * sigma)
K /= K.sum()
tmp = out.copy()
# filtering
for y in range(H):
for x in range(W):
for c in range(C):
out[pad + y, pad + x, c] = np.sum(K * tmp[y : y + K_size, x : x + K_size, c])
out = np.clip(out, 0, 255)
out = out[pad : pad + H, pad : pad + W]
out = out.astype(np.uint8)
if gray:
out = out[..., 0]
return out
# sobel filter
def sobel_filter(img, K_size=3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
H, W = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad : pad + H, pad : pad + W] = img.copy().astype(np.float)
tmp = out.copy()
out_v = out.copy()
out_h = out.copy()
## Sobel vertical
Kv = [[1., 2., 1.],[0., 0., 0.], [-1., -2., -1.]]
## Sobel horizontal
Kh = [[1., 0., -1.],[2., 0., -2.],[1., 0., -1.]]
# filtering
for y in range(H):
for x in range(W):
out_v[pad + y, pad + x] = np.sum(Kv * (tmp[y : y + K_size, x : x + K_size]))
out_h[pad + y, pad + x] = np.sum(Kh * (tmp[y : y + K_size, x : x + K_size]))
out_v = np.clip(out_v, 0, 255)
out_h = np.clip(out_h, 0, 255)
out_v = out_v[pad : pad + H, pad : pad + W]
out_v = out_v.astype(np.uint8)
out_h = out_h[pad : pad + H, pad : pad + W]
out_h = out_h.astype(np.uint8)
return out_v, out_h
def get_edge_angle(fx, fy):
# get edge strength
edge = np.sqrt(np.power(fx.astype(np.float32), 2) + np.power(fy.astype(np.float32), 2))
edge = np.clip(edge, 0, 255)
fx = np.maximum(fx, 1e-10)
#fx[np.abs(fx) <= 1e-5] = 1e-5
# get edge angle
angle = np.arctan(fy / fx)
return edge, angle
def angle_quantization(angle):
angle = angle / np.pi * 180
angle[angle < -22.5] = 180 + angle[angle < -22.5]
_angle = np.zeros_like(angle, dtype=np.uint8)
_angle[np.where(angle <= 22.5)] = 0
_angle[np.where((angle > 22.5) & (angle <= 67.5))] = 45
_angle[np.where((angle > 67.5) & (angle <= 112.5))] = 90
_angle[np.where((angle > 112.5) & (angle <= 157.5))] = 135
return _angle
def non_maximum_suppression(angle, edge):
H, W = angle.shape
_edge = edge.copy()
for y in range(H):
for x in range(W):
if angle[y, x] == 0:
dx1, dy1, dx2, dy2 = -1, 0, 1, 0
elif angle[y, x] == 45:
dx1, dy1, dx2, dy2 = -1, 1, 1, -1
elif angle[y, x] == 90:
dx1, dy1, dx2, dy2 = 0, -1, 0, 1
elif angle[y, x] == 135:
dx1, dy1, dx2, dy2 = -1, -1, 1, 1
if x == 0:
dx1 = max(dx1, 0)
dx2 = max(dx2, 0)
if x == W-1:
dx1 = min(dx1, 0)
dx2 = min(dx2, 0)
if y == 0:
dy1 = max(dy1, 0)
dy2 = max(dy2, 0)
if y == H-1:
dy1 = min(dy1, 0)
dy2 = min(dy2, 0)
if max(max(edge[y, x], edge[y + dy1, x + dx1]), edge[y + dy2, x + dx2]) != edge[y, x]:
_edge[y, x] = 0
return _edge
def hysterisis(edge, HT=100, LT=30):
H, W = edge.shape
# Histeresis threshold
edge[edge >= HT] = 255
edge[edge <= LT] = 0
_edge = np.zeros((H + 2, W + 2), dtype=np.float32)
_edge[1 : H + 1, 1 : W + 1] = edge
## 8 - Nearest neighbor
nn = np.array(((1., 1., 1.), (1., 0., 1.), (1., 1., 1.)), dtype=np.float32)
for y in range(1, H+2):
for x in range(1, W+2):
if _edge[y, x] < LT or _edge[y, x] > HT:
continue
if np.max(_edge[y-1:y+2, x-1:x+2] * nn) >= HT:
_edge[y, x] = 255
else:
_edge[y, x] = 0
edge = _edge[1:H+1, 1:W+1]
return edge
# grayscale
gray = BGR2GRAY(img)
# gaussian filtering
gaussian = gaussian_filter(gray, K_size=5, sigma=1.4)
# sobel filtering
fy, fx = sobel_filter(gaussian, K_size=3)
# get edge strength, angle
edge, angle = get_edge_angle(fx, fy)
# angle quantization
angle = angle_quantization(angle)
# non maximum suppression
edge = non_maximum_suppression(angle, edge)
# hysterisis threshold
out = hysterisis(edge, 50, 20)
return out
# Morphology Dilate
def Morphology_Dilate(img, Erode_time=1):
H, W = img.shape
out = img.copy()
# kernel
MF = np.array(((0, 1, 0),
(1, 0, 1),
(0, 1, 0)), dtype=np.int)
# each erode
for i in range(Erode_time):
tmp = np.pad(out, (1, 1), 'edge')
# erode
for y in range(1, H+1):
for x in range(1, W+1):
if np.sum(MF * tmp[y-1:y+2, x-1:x+2]) < 255*4:
out[y-1, x-1] = 0
return out
# Morphology Erode
def Morphology_Erode(img, Dil_time=1):
H, W = img.shape
# kernel
MF = np.array(((0, 1, 0),
(1, 0, 1),
(0, 1, 0)), dtype=np.int)
# each dilate time
out = img.copy()
for i in range(Dil_time):
tmp = np.pad(out, (1, 1), 'edge')
for y in range(1, H+1):
for x in range(1, W+1):
if np.sum(MF * tmp[y-1:y+2, x-1:x+2]) >= 255:
out[y-1, x-1] = 255
return out
# Morphology Closing
def Morphology_Closing(img, time=1):
out = Morphology_Erode(img, Dil_time=time)
out = Morphology_Dilate(out, Erode_time=time)
return out
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
# Canny
canny = Canny(img)
# Morphology - opening
out = Morphology_Closing(canny, time=1)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
11454137
|
class Dog:
def __init__(self, name):
self.name = name
def speak(self):
print("Woof! My name is", self.name)
dog_one = Dog('Rover')
dog_two = Dog('Rex')
dog_one.speak()
dog_two.speak()
|
11454145
|
import abc
import tempfile
from .. import Subject, ScalarImage
from ..utils import get_torchio_cache_dir
from ..download import download_and_extract_archive
class VisibleHuman(abc.ABC, Subject):
URL = 'https://mri.radiology.uiowa.edu/website_documents/visible_human_tar_files/{}{}.tar.gz' # noqa: E501, FS003
def __init__(self, part: str):
self.part = self._parse_part(part)
if not self.cache_part_dir.is_dir():
tempdir = tempfile.gettempdir()
filename = f'{self.__class__.__name__}-{self.part}.tar.gz'
download_and_extract_archive(
self.url,
tempdir,
filename=filename,
extract_root=self.cache_class_dir,
remove_finished=True,
)
super().__init__({self.part.lower(): ScalarImage(self.cache_part_dir)})
@property
def cache_class_dir(self):
return get_torchio_cache_dir() / self.__class__.__name__
@property
def cache_part_dir(self):
return self.cache_class_dir / self.part
@property
def url(self):
return self.URL.format(self.PREFIX, self.part)
def _parse_part(self, part: str) -> None:
part_capital = part.capitalize()
if part_capital not in self.PARTS:
message = f'Part "{part}" not in available parts: {self.PARTS}'
raise ValueError(message)
return part_capital
class VisibleMale(VisibleHuman):
"""Visible Male CT Datasets.
Args:
part: Can be ``'Head'``, ``'Hip'``, ``'Pelvis'`` or ``'Shoulder'``.
"""
PREFIX = 'VHMCT1mm_'
PARTS = (
'Head',
'Hip',
'Pelvis',
'Shoulder',
)
class VisibleFemale(VisibleHuman):
"""Visible Female CT Datasets.
Args:
part: Can be ``'Ankle'``, ``'Head'``, ``'Hip'``, ``'Knee'``,
``'Pelvis'`` or ``'Shoulder'``.
"""
PREFIX = 'VHF-'
PARTS = VisibleMale.PARTS + (
'Ankle',
'Knee',
)
|
11454205
|
from typing import Optional, Set, Dict, Type, Union
from .base import Base
from .list import List
from .map import Map, MapWrapper
from .reference import Reference, ReferenceXOR
from .string import String
from .version import Version
class Entity(MapWrapper):
# This must be overridden in derived classes
ATTRS: Dict[str, Union[Type[Base], Base, Map, List, Reference, ReferenceXOR]] = {}
# This can be overridden in derived classes
REQUIRED: Set[str] = set()
@classmethod
def validate(cls, yaml_node):
if cls.ATTRS == {}:
raise AssertionError()
if not isinstance(cls.REQUIRED, set):
raise AssertionError()
if not isinstance(yaml_node.value, dict):
cls.abort("Expected map.", yaml_node.loc)
data_keys = set()
for k in yaml_node.value:
if not isinstance(k.value, str):
cls.abort("Expected string", k.loc)
data_keys.add(k.value)
missing_keys = cls.REQUIRED - data_keys
if missing_keys:
cls.abort(f"Missing required fields: {', '.join(missing_keys)}", yaml_node.loc)
extra_keys = data_keys - cls.attrs().keys()
if extra_keys:
cls.abort(f"Invalid keys: {', '.join(extra_keys)}", yaml_node.loc)
@classmethod
def build(cls, yaml_node):
classes = cls.attrs()
data = {
k.value: classes[k.value].parse(v)
for k, v in yaml_node.value.items()
}
return cls(data, yaml_node.loc)
@classmethod
def attrs(cls):
return cls.ATTRS
def __getattr__(self, key):
try:
return self.data[key]
except KeyError as e:
raise AttributeError(key) from e
class TypeEntity(Entity):
REFERENCE: Optional[Reference] = None # Override in subclasses
@classmethod
def validate(cls, yaml_node):
super().validate(yaml_node)
for key in yaml_node.value:
if key.value == "derived_from":
return
cls.abort("Type is missing derived_from key.", yaml_node.loc)
@classmethod
def attrs(cls):
if not isinstance(cls.REFERENCE, Reference):
raise AssertionError(f"Override REFERENCE in {cls.__name__} with Reference.")
attributes = cls.ATTRS.copy()
attributes.update(
derived_from=cls.REFERENCE,
description=String,
metadata=Map(String),
version=Version,
)
return attributes
|
11454228
|
from integration.helpers.base_test import BaseTest
class TestFunctionWithHttpApi(BaseTest):
def test_function_with_http_api(self):
self.create_and_verify_stack("combination/function_with_http_api")
stack_outputs = self.get_stack_outputs()
base_url = stack_outputs["ApiUrl"]
self.verify_get_request_response(base_url + "some/path", 200)
self.verify_get_request_response(base_url + "something", 404)
self.verify_get_request_response(base_url + "another/endpoint", 404)
|
11454240
|
import numpy as np
from flare.kernels.kernels import (
force_helper,
force_energy_helper,
grad_helper,
three_body_fe_perm,
three_body_ee_perm,
three_body_se_perm,
three_body_ff_perm,
three_body_sf_perm,
three_body_ss_perm,
three_body_grad_perm,
grad_constants,
)
from numba import njit
from flare.env import AtomicEnvironment
from typing import Callable
import flare.kernels.cutoffs as cf
from math import exp
class ThreeBodyKernel:
def __init__(
self,
hyperparameters: "ndarray",
cutoff: float,
cutoff_func: Callable = cf.quadratic_cutoff,
):
self.hyperparameters = hyperparameters
self.signal_variance = hyperparameters[0]
self.length_scale = hyperparameters[1]
self.cutoff = cutoff
self.cutoff_func = cutoff_func
def energy_energy(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return energy_energy(*args)
def force_energy(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return force_energy(*args)
def stress_energy(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return stress_energy(*args)
def force_force(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return force_force(*args)
def stress_force(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return stress_force(*args)
def stress_stress(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return stress_stress(*args)
def force_force_gradient(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return force_force_gradient(*args)
def efs_energy(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return efs_energy(*args)
def efs_force(self, env1: AtomicEnvironment, env2: AtomicEnvironment):
args = self.get_args(env1, env2)
return efs_force(*args)
def efs_self(self, env1: AtomicEnvironment):
return efs_self(
env1.bond_array_3,
env1.ctype,
env1.etypes,
env1.cross_bond_inds,
env1.cross_bond_dists,
env1.triplet_counts,
self.signal_variance,
self.length_scale,
self.cutoff,
self.cutoff_func,
)
def get_args(self, env1, env2):
return (
env1.bond_array_3,
env1.ctype,
env1.etypes,
env2.bond_array_3,
env2.ctype,
env2.etypes,
env1.cross_bond_inds,
env2.cross_bond_inds,
env1.cross_bond_dists,
env2.cross_bond_dists,
env1.triplet_counts,
env2.triplet_counts,
self.signal_variance,
self.length_scale,
self.cutoff,
self.cutoff_func,
)
@njit
def energy_energy(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between two local energies accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 3-body local energy kernel.
"""
kern = 0
sig2 = sig * sig
ls2 = 1 / (2 * ls * ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
fi1, _ = cutoff_func(r_cut, ri1, 0)
ei1 = etypes1[m]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
fi2, _ = cutoff_func(r_cut, ri2, 0)
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1 * fi2 * fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
ej1 = etypes2[p]
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + q + 1]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
ej2 = etypes2[ind2]
rj3 = cross_bond_dists_2[p, p + q + 1]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1 * fj2 * fj3
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
kern += three_body_ee_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
ls2,
sig2,
)
return kern / 9
@njit
def force_energy(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between a force component and a local
energy accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 3-body force/energy kernel.
"""
kern = np.zeros(3)
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ei1 = etypes1[m]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
ej1 = etypes2[p]
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + q + 1]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
ej2 = etypes2[ind2]
rj3 = cross_bond_dists_2[p, p + q + 1]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1 * fj2 * fj3
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi = fdi1 * fi2 * fi3 + fi1 * fdi2 * fi3
kern[d1] += three_body_fe_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
ls1,
ls2,
sig2,
)
return kern / 3
@njit
def stress_energy(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between a force component and a local
energy accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 3-body force/energy kernel.
"""
kern = np.zeros(6)
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
fi1, _ = cutoff_func(r_cut, ri1, 0)
ei1 = etypes1[m]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
fi2, _ = cutoff_func(r_cut, ri2, 0)
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1 * fi2 * fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
ej1 = etypes2[p]
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + q + 1]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
ej2 = etypes2[ind2]
rj3 = cross_bond_dists_2[p, p + q + 1]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1 * fj2 * fj3
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
stress_count = 0
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fdi_p1 = fdi1 * fi2 * fi3
fdi_p2 = fi1 * fdi2 * fi3
fdi = fdi_p1 + fdi_p2
for d2 in range(d1, 3):
coord1 = bond_array_1[m, d2 + 1] * ri1
coord2 = bond_array_1[ind1, d2 + 1] * ri2
kern[stress_count] += three_body_se_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
ls1,
ls2,
sig2,
coord1,
coord2,
fdi_p1,
fdi_p2,
)
stress_count += 1
return kern / 6
@njit
def force_force(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between two force components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 3-body kernel.
"""
kern = np.zeros((3, 3))
# pre-compute constants that appear in the inner loop
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
# first loop over the first 3-body environment
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ei1 = etypes1[m]
# second loop over the first 3-body environment
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
# first loop over the second 3-body environment
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
ej1 = etypes2[p]
# second loop over the second 3-body environment
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + 1 + q]
rj2 = bond_array_2[ind2, 0]
rj3 = cross_bond_dists_2[p, p + 1 + q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
ej2 = etypes2[ind2]
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi = fdi1 * fi2 * fi3 + fi1 * fdi2 * fi3
for d2 in range(3):
cj1 = bond_array_2[p, d2 + 1]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
cj2 = bond_array_2[ind2, d2 + 1]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fj = fj1 * fj2 * fj3
fdj = fdj1 * fj2 * fj3 + fj1 * fdj2 * fj3
kern[d1, d2] += three_body_ff_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
)
return kern
@njit
def stress_force(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between two force components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 3-body kernel.
"""
kern = np.zeros((6, 3))
# pre-compute constants that appear in the inner loop
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
# first loop over the first 3-body environment
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ei1 = etypes1[m]
# second loop over the first 3-body environment
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
# first loop over the second 3-body environment
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
ej1 = etypes2[p]
# second loop over the second 3-body environment
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + 1 + q]
rj2 = bond_array_2[ind2, 0]
rj3 = cross_bond_dists_2[p, p + 1 + q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
ej2 = etypes2[ind2]
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
stress_count = 0
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi_p1 = fdi1 * fi2 * fi3
fdi_p2 = fi1 * fdi2 * fi3
fdi = fdi_p1 + fdi_p2
for d2 in range(d1, 3):
coord1 = bond_array_1[m, d2 + 1] * ri1
coord2 = bond_array_1[ind1, d2 + 1] * ri2
for d3 in range(3):
cj1 = bond_array_2[p, d3 + 1]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
cj2 = bond_array_2[ind2, d3 + 1]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fj = fj1 * fj2 * fj3
fdj = fdj1 * fj2 * fj3 + fj1 * fdj2 * fj3
kern[stress_count, d3] += three_body_sf_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
coord1,
coord2,
fdi_p1,
fdi_p2,
)
stress_count += 1
return kern / 2
@njit
def stress_stress(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between two force components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 3-body kernel.
"""
kern = np.zeros((6, 6))
# pre-compute constants that appear in the inner loop
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
# first loop over the first 3-body environment
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ei1 = etypes1[m]
# second loop over the first 3-body environment
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
# first loop over the second 3-body environment
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
ej1 = etypes2[p]
# second loop over the second 3-body environment
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + 1 + q]
rj2 = bond_array_2[ind2, 0]
rj3 = cross_bond_dists_2[p, p + 1 + q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
ej2 = etypes2[ind2]
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
stress_count_1 = 0
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi_p1 = fdi1 * fi2 * fi3
fdi_p2 = fi1 * fdi2 * fi3
fdi = fdi_p1 + fdi_p2
for d2 in range(d1, 3):
coord1 = bond_array_1[m, d2 + 1] * ri1
coord2 = bond_array_1[ind1, d2 + 1] * ri2
stress_count_2 = 0
for d3 in range(3):
cj1 = bond_array_2[p, d3 + 1]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
cj2 = bond_array_2[ind2, d3 + 1]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fj = fj1 * fj2 * fj3
fdj_p1 = fdj1 * fj2 * fj3
fdj_p2 = fj1 * fdj2 * fj3
fdj = fdj_p1 + fdj_p2
for d4 in range(d3, 3):
coord3 = bond_array_2[p, d4 + 1] * rj1
coord4 = bond_array_2[ind2, d4 + 1] * rj2
kern[
stress_count_1, stress_count_2
] += three_body_ss_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
coord1,
coord2,
coord3,
coord4,
fdi_p1,
fdi_p2,
fdj_p1,
fdj_p2,
)
stress_count_2 += 1
stress_count_1 += 1
return kern / 4
@njit
def force_force_gradient(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
"""3-body multi-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
(float, float):
Value of the 3-body kernel and its gradient with respect to the
hyperparameters.
"""
kernel_matrix = np.zeros((3, 3))
kernel_grad = np.zeros((2, 3, 3))
# pre-compute constants that appear in the inner loop
sig2, sig3, ls1, ls2, ls3, ls4, ls5, ls6 = grad_constants(sig, ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ei1 = etypes1[m]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri3 = cross_bond_dists_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
ei2 = etypes1[ind1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
ej1 = etypes2[p]
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + q + 1]
rj3 = cross_bond_dists_2[p, p + q + 1]
rj2 = bond_array_2[ind2, 0]
ej2 = etypes2[ind2]
fj3, _ = cutoff_func(r_cut, rj3, 0)
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fdi = fdi1 * fi2 * fi3 + fi1 * fdi2 * fi3
fi = fi1 * fi2 * fi3
for d2 in range(3):
cj1 = bond_array_2[p, d2 + 1]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
cj2 = bond_array_2[ind2, d2 + 1]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fdj = fdj1 * fj2 * fj3 + fj1 * fdj2 * fj3
fj = fj1 * fj2 * fj3
kern_term, sig_term, ls_term = three_body_grad_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
ls4,
ls5,
ls6,
sig2,
sig3,
)
kernel_matrix[d1, d2] += kern_term
kernel_grad[0, d1, d2] += sig_term
kernel_grad[1, d1, d2] += ls_term
return kernel_matrix, kernel_grad
@njit
def efs_energy(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
energy_kernel = 0
force_kernels = np.zeros(3)
stress_kernels = np.zeros(6)
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
fi1, _ = cutoff_func(r_cut, ri1, 0)
ei1 = etypes1[m]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
fi2, _ = cutoff_func(r_cut, ri2, 0)
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1 * fi2 * fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
ej1 = etypes2[p]
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + q + 1]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
ej2 = etypes2[ind2]
rj3 = cross_bond_dists_2[p, p + q + 1]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1 * fj2 * fj3
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
energy_kernel += (
three_body_ee_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
ls1,
sig2,
)
/ 9
)
stress_count = 0
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi_p1 = fdi1 * fi2 * fi3
fdi_p2 = fi1 * fdi2 * fi3
fdi = fdi_p1 + fdi_p2
force_kernels[d1] += (
three_body_fe_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
ls1,
ls2,
sig2,
)
/ 3
)
for d2 in range(d1, 3):
coord1 = bond_array_1[m, d2 + 1] * ri1
coord2 = bond_array_1[ind1, d2 + 1] * ri2
stress_kernels[stress_count] += (
three_body_se_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
ls1,
ls2,
sig2,
coord1,
coord2,
fdi_p1,
fdi_p2,
)
/ 6
)
stress_count += 1
return energy_kernel, force_kernels, stress_kernels
@njit
def efs_force(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1,
triplets_2,
sig,
ls,
r_cut,
cutoff_func,
):
energy_kernels = np.zeros(3)
force_kernels = np.zeros((3, 3))
stress_kernels = np.zeros((6, 3))
# pre-compute constants that appear in the inner loop
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
# first loop over the first 3-body environment
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
fi1, _ = cutoff_func(r_cut, ri1, 0)
ei1 = etypes1[m]
# second loop over the first 3-body environment
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
fi2, _ = cutoff_func(r_cut, ri2, 0)
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1 * fi2 * fi3
# first loop over the second 3-body environment
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
ej1 = etypes2[p]
# second loop over the second 3-body environment
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + 1 + q]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
rj3 = cross_bond_dists_2[p, p + 1 + q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
ej2 = etypes2[ind2]
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
for d3 in range(3):
cj1 = bond_array_2[p, d3 + 1]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
cj2 = bond_array_2[ind2, d3 + 1]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fj = fj1 * fj2 * fj3
fdj = fdj1 * fj2 * fj3 + fj1 * fdj2 * fj3
energy_kernels[d3] += (
three_body_fe_perm(
r11,
r21,
r31,
r12,
r22,
r32,
r13,
r23,
r33,
c2,
c1,
-cj1,
-cj2,
ej1,
ej2,
ei1,
ei2,
fj,
fi,
fdj,
ls1,
ls2,
sig2,
)
/ 3
)
stress_count = 0
for d1 in range(3):
ci1 = bond_array_1[m, d1 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d1 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi_p1 = fdi1 * fi2 * fi3
fdi_p2 = fi1 * fdi2 * fi3
fdi = fdi_p1 + fdi_p2
force_kernels[d1, d3] += three_body_ff_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
)
for d2 in range(d1, 3):
coord1 = bond_array_1[m, d2 + 1] * ri1
coord2 = bond_array_1[ind1, d2 + 1] * ri2
stress_kernels[stress_count, d3] += (
three_body_sf_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c2,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
coord1,
coord2,
fdi_p1,
fdi_p2,
)
/ 2
)
stress_count += 1
return energy_kernels, force_kernels, stress_kernels
@njit
def efs_self(
bond_array_1,
c1,
etypes1,
cross_bond_inds_1,
cross_bond_dists_1,
triplets_1,
sig,
ls,
r_cut,
cutoff_func,
):
energy_kernel = 0
force_kernels = np.zeros(3)
stress_kernels = np.zeros(6)
# pre-compute constants that appear in the inner loop
sig2 = sig * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
fi1, _ = cutoff_func(r_cut, ri1, 0)
ei1 = etypes1[m]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
fi2, _ = cutoff_func(r_cut, ri2, 0)
ei2 = etypes1[ind1]
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1 * fi2 * fi3
for p in range(bond_array_1.shape[0]):
rj1 = bond_array_1[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
ej1 = etypes1[p]
for q in range(triplets_1[p]):
ind2 = cross_bond_inds_1[p, p + 1 + q]
rj2 = bond_array_1[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
rj3 = cross_bond_dists_1[p, p + 1 + q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1 * fj2 * fj3
ej2 = etypes1[ind2]
r11 = ri1 - rj1
r12 = ri1 - rj2
r13 = ri1 - rj3
r21 = ri2 - rj1
r22 = ri2 - rj2
r23 = ri2 - rj3
r31 = ri3 - rj1
r32 = ri3 - rj2
r33 = ri3 - rj3
energy_kernel += (
three_body_ee_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c1,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
ls1,
sig2,
)
/ 9
)
stress_count = 0
for d3 in range(3):
cj1 = bond_array_1[p, d3 + 1]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
cj2 = bond_array_1[ind2, d3 + 1]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fdj_p1 = fdj1 * fj2 * fj3
fdj_p2 = fj1 * fdj2 * fj3
fdj = fdj_p1 + fdj_p2
ci1 = bond_array_1[m, d3 + 1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
ci2 = bond_array_1[ind1, d3 + 1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi = fi1 * fi2 * fi3
fdi_p1 = fdi1 * fi2 * fi3
fdi_p2 = fi1 * fdi2 * fi3
fdi = fdi_p1 + fdi_p2
force_kernels[d3] += three_body_ff_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c1,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
)
for d2 in range(d3, 3):
coord1 = bond_array_1[m, d2 + 1] * ri1
coord2 = bond_array_1[ind1, d2 + 1] * ri2
coord3 = bond_array_1[p, d2 + 1] * rj1
coord4 = bond_array_1[ind2, d2 + 1] * rj2
stress_kernels[stress_count] += (
three_body_ss_perm(
r11,
r12,
r13,
r21,
r22,
r23,
r31,
r32,
r33,
c1,
c1,
ci1,
ci2,
cj1,
cj2,
ei1,
ei2,
ej1,
ej2,
fi,
fj,
fdi,
fdj,
ls1,
ls2,
ls3,
sig2,
coord1,
coord2,
coord3,
coord4,
fdi_p1,
fdi_p2,
fdj_p1,
fdj_p2,
)
/ 4
)
stress_count += 1
return energy_kernel, force_kernels, stress_kernels
|
11454242
|
import sys
import re
import pysam
import shutil
import os
import gzip
from multiprocessing import Pool
from optparse import OptionParser
from collections import Counter
from contextlib import contextmanager
opts = OptionParser()
usage = "usage: %prog [options] [inputs] Script to process aligned .bam files to 1) split by chromosome and 2) report read ID with bead barcode"
opts = OptionParser(usage=usage)
opts.add_option("--input", "-i", help="Name of the .bam file to parse")
opts.add_option("--name", "-n", help="Name of the set of .bam files to collate")
opts.add_option("--output", "-o", help="Path to the output directory for these")
opts.add_option("--mapq", default = 30, help="Minimum mapq for a read to be kept")
opts.add_option("--barcode-tag", default = 'XB', help="Name of the first .bam file")
opts.add_option("--mito-chr", default = "chrM", help="Designation of mtDNA chromosome")
opts.add_option("--ncores", default = 4, help="Number of cores for parallel processing")
opts.add_option("--bedtools-reference-genome", default = "", help="Reference genome sizes from bedtools")
options, arguments = opts.parse_args()
bamname = options.input
name = options.name
out = options.output
minmapq = float(options.mapq)
barcodeTag = options.barcode_tag
mitochr = options.mito_chr
cpu = int(options.ncores)
bedtoolsGenomeFile = options.bedtools_reference_genome
# Handle the chromosomes
chrlens = {}
with open(bedtoolsGenomeFile) as f:
for line in f:
tok = line.split("\t")
chrlens[tok[0]] = tok[1].strip()
chrlenpass = {x : chrlens[x] for x in chrlens }
chrs = list(chrlenpass.keys())
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def listDictToCounter(lst):
dct = Counter()
for d in lst:
for k, v in d.items():
dct[k] += v
return(dct)
def getBarcode(intags):
'''
Parse out the barcode per-read
'''
for tg in intags:
if(barcodeTag == tg[0]):
return(tg[1])
return("NA")
#---------------------------------------------------------
# Function for writing the read name and the bead barcode ID for later use
#---------------------------------------------------------
def writeBeadReadName(two):
chrom = two[0]
filename = two[1]
idx = chrs.index(chrom)
# Iterate through bam file
bam = pysam.AlignmentFile(bamname,'rb')
Itr = bam.fetch(str(chrom),multiple_iterators=True)
with gzip.open(filename, 'wt') as out_write:
for read in Itr:
# only consider reads with sufficient mapping quality
if(read.mapping_quality > minmapq):
read_barcode = getBarcode(read.tags)
read_name = read.query_name
value = read_name + "\t" + read_barcode + "\n"
out_write.write(value)
bam.close()
# Split into per-chromosome bam files
new_bam = filename.replace(".read_bead.tsv.gz", ".raw.bam")
pysam.view(bamname, chrom, '-b' ,'-o', new_bam, catch_stdout=False)
pysam.index(new_bam)
return(chrom)
if __name__ == '__main__':
# Final loop to write out passing reads
read_barcode_file = [out + "/" + name + "." + chr + ".read_bead." +"tsv.gz" for chr in chrs]
pool = Pool(processes=cpu)
toy_out = pool.map(writeBeadReadName, zip(chrs, read_barcode_file))
pool.close()
# Make some routing files
bamchrfiles = [out + "/" + name + "." + chr + ".raw" +".bam" for chr in chrs if chr != mitochr]
bamchrrouter = open(out.replace("temp/filt_split", ".internal/samples") + "/" + name + ".chrbam.txt", "w")
for v in bamchrfiles:
bamchrrouter.write(v+"\n")
bamchrrouter.close()
bamchrrouter2 = open(out.replace("temp/filt_split", ".internal/samples") + "/" + name + ".mitochrbam.txt", "w")
bamchrrouter2.write(out + "/" + name + "." + mitochr + ".raw" +".bam"+"\n")
bamchrrouter2.close()
|
11454294
|
from hathor.graphviz import GraphvizVisualizer
from tests import unittest
from tests.simulation.base import SimulatorTestCase
from tests.utils import add_custom_tx, gen_new_tx
class BaseConsensusSimulatorTestCase(SimulatorTestCase):
def checkConflict(self, tx1, tx2):
meta1 = tx1.get_metadata()
meta2 = tx2.get_metadata()
self.assertIn(tx1.hash, meta2.conflict_with)
self.assertIn(tx2.hash, meta1.conflict_with)
cnt = 0
if not meta1.voided_by:
cnt += 1
if not meta2.voided_by:
cnt += 1
self.assertLessEqual(cnt, 1)
def do_step(self, i, manager1, tx_base):
txA = add_custom_tx(manager1, [(tx_base, 0)], n_outputs=2)
self.graphviz.labels[txA.hash] = f'txA-{i}'
txB = add_custom_tx(manager1, [(txA, 0)])
self.graphviz.labels[txB.hash] = f'txB-{i}'
txC = add_custom_tx(manager1, [(txA, 1)])
self.graphviz.labels[txC.hash] = f'txC-{i}'
txD1 = add_custom_tx(manager1, [(txC, 0)], base_parent=tx_base)
self.graphviz.labels[txD1.hash] = f'txD1-{i}'
txF2 = add_custom_tx(manager1, [(txB, 0), (txD1, 0)])
self.graphviz.labels[txF2.hash] = f'txF2-{i}'
txD2 = add_custom_tx(manager1, [(txC, 0)], base_parent=tx_base)
self.graphviz.labels[txD2.hash] = f'txD2-{i}'
txE = add_custom_tx(manager1, [(txD2, 0)], base_parent=tx_base)
self.graphviz.labels[txE.hash] = f'txE-{i}'
txF1 = add_custom_tx(manager1, [(txB, 0)], base_parent=tx_base)
self.graphviz.labels[txF1.hash] = f'txF1-{i}'
txG = add_custom_tx(manager1, [(txF2, 0)], base_parent=tx_base)
self.graphviz.labels[txG.hash] = f'txG-{i}'
txH = add_custom_tx(manager1, [(txF1, 0), (txG, 0)])
self.graphviz.labels[txH.hash] = f'txH-{i}'
self.checkConflict(txD1, txD2)
self.checkConflict(txF1, txF2)
return txH
def test_two_conflicts_intertwined_once(self):
manager1 = self.create_peer()
manager1.allow_mining_without_peers()
miner1 = self.simulator.create_miner(manager1, hashpower=10e6)
miner1.start()
self.simulator.run(60)
gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(300)
gen_tx1.stop()
# Our full node wallet has a callLater that checks for new utxos every 10 seconds.
# If we don't run 10 seconds, the utxos generated on the create_tx_generator won't be available,
# then we might get an insufficient fund error to create the next tx
self.simulator.run(10)
self.graphviz = GraphvizVisualizer(manager1.tx_storage, include_verifications=True, include_funds=True)
address = manager1.wallet.get_unused_address(mark_as_used=False)
value = 10
initial = gen_new_tx(manager1, address, value)
initial.weight = 25
initial.update_hash()
manager1.propagate_tx(initial, fails_silently=False)
self.graphviz.labels[initial.hash] = 'initial'
x = initial
x = self.do_step(0, manager1, x)
# Uncomment lines below to visualize the DAG and the blockchain.
# dot = self.graphviz.dot()
# dot.render('dot0')
def test_two_conflicts_intertwined_multiple_times(self):
manager1 = self.create_peer()
manager1.allow_mining_without_peers()
miner1 = self.simulator.create_miner(manager1, hashpower=10e6)
miner1.start()
self.simulator.run(60)
gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(300)
gen_tx1.stop()
# Our full node wallet has a callLater that checks for new utxos every 10 seconds.
# If we don't run 10 seconds, the utxos generated on the create_tx_generator won't be available,
# then we might get an insufficient fund error to create the next tx
self.simulator.run(10)
self.graphviz = GraphvizVisualizer(manager1.tx_storage, include_verifications=True, include_funds=True)
address = manager1.wallet.get_unused_address(mark_as_used=False)
value = 10
initial = gen_new_tx(manager1, address, value)
initial.weight = 25
initial.update_hash()
manager1.propagate_tx(initial, fails_silently=False)
self.graphviz.labels[initial.hash] = 'initial'
x = initial
x = self.do_step(0, manager1, x)
x = self.do_step(1, manager1, x)
x = self.do_step(2, manager1, x)
x = self.do_step(3, manager1, x)
x = self.do_step(4, manager1, x)
# Uncomment lines below to visualize the DAG and the blockchain.
# dot = self.graphviz.dot()
# dot.render('dot0')
class SyncV1ConsensusSimulatorTestCase(unittest.SyncV1Params, BaseConsensusSimulatorTestCase):
__test__ = True
class SyncV2ConsensusSimulatorTestCase(unittest.SyncV2Params, BaseConsensusSimulatorTestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeConsensusSimulatorTestCase(unittest.SyncBridgeParams, SyncV2ConsensusSimulatorTestCase):
__test__ = True
|
11454313
|
IS_QT_5 = False
if IS_QT_5:
from PySide2 import QtGui, QtCore, QtWidgets
IS_QT_5 = True
else:
from PySide import QtGui, QtCore
import PySide.QtGui as QtWidgets
import time
# common used classes
QComboBox = QtWidgets.QComboBox
QTableWidgetItem = QtWidgets.QTableWidgetItem
QDoubleSpinBox = QtWidgets.QDoubleSpinBox
QDialogButtonBox = QtWidgets.QDialogButtonBox
# File patterns
IMAGE_FILES = "Image Files (*.png *.jpg *.bmp *.tif)"
JSON_FILES = "JSON Files (*.json)"
# methods
def activeWindow():
return QtWidgets.QApplication.activeWindow()
def showInfo(title, message):
QtWidgets.QMessageBox.information(activeWindow(), title, message)
def userSelectedFile(title, filePattern, mustExist=True):
if mustExist:
fileName = QtWidgets.QFileDialog.getOpenFileName(activeWindow(), title, '', filePattern)[0]
else:
fileName = QtWidgets.QFileDialog.getSaveFileName(activeWindow(), caption=title, filter=filePattern)[0]
if fileName == '':
return None
return fileName
|
11454349
|
import discord
from discord.ext import commands
import os
from .utils.dataIO import dataIO
from cogs.utils import checks
from __main__ import send_cmd_help
import asyncio
import time
import re
from .utils.chat_formatting import pagify, box
try:
from tabulate import tabulate
except Exception as e:
raise RuntimeError("You must run `pip3 install tabulate`.") from e
PATH = 'data/fmod/'
creditIcon = "https://i.imgur.com/TP8GXZb.png"
credits = "Bot by GR8 | Titan"
# stuff for mute time
UNIT_TABLE = {'s': 1, 'm': 60, 'h': 60 * 60, 'd': 60 * 60 * 24}
UNIT_SUF_TABLE = {'sec': (1, ''),
'min': (60, ''),
'hr': (60 * 60, 's'),
'day': (60 * 60 * 24, 's')
}
class BadTimeExpr(Exception):
pass
def _parse_time(time):
if any(u in time for u in UNIT_TABLE.keys()):
delim = '([0-9.]*[{}])'.format(''.join(UNIT_TABLE.keys()))
time = re.split(delim, time)
time = sum([_timespec_sec(t) for t in time if t != ''])
elif not time.isdigit():
raise BadTimeExpr("invalid expression '%s'" % time)
return int(time)
def _timespec_sec(t):
timespec = t[-1]
if timespec.lower() not in UNIT_TABLE:
raise BadTimeExpr("unknown unit '%c'" % timespec)
timeint = float(t[:-1])
return timeint * UNIT_TABLE[timespec]
def _generate_timespec(sec):
timespec = []
def sort_key(kt):
k, t = kt
return t[0]
for unit, kt in sorted(UNIT_SUF_TABLE.items(), key=sort_key, reverse=True):
secs, suf = kt
q = sec // secs
if q:
if q <= 1:
suf = ''
timespec.append('%02.d%s%s' % (q, unit, suf))
sec = sec % secs
return ', '.join(timespec)
class fmod:
"""A feature packed cog for moderation"""
def __init__(self, bot):
self.bot = bot
self.settings = "data/fmod/settings.json"
self.settingsload = dataIO.load_json(self.settings)
self.warnings = "data/fmod/warnings.json"
self.warningsload = dataIO.load_json(self.warnings)
self.handles = {}
@commands.command(no_pm=True, pass_context=True)
@checks.admin()
async def setup(self, ctx):
"""Setup the bot"""
user = ctx.message.author
channel = await self.bot.start_private_message(user)
server = ctx.message.server
if server.id in self.settingsload:
await self.bot.send_message(channel, "You currently have data saved. "
"If you would like to change settings "
"please use the `[p]settings` command.")
return
else:
questions = {
'Warn Message': None,
'Ban Message': None,
'Warn Limit': None,
'Log Channel': None,
'Mute Time': None,
'Mute Role': None,
'Denied Role': None,
'Denied Channel': None,
'DM Warn': None,
'Punishment Roles': None,
'Revoke Message': None
}
embed = discord.Embed(description="Welcome to the setup for the fmod cog! "
"You can stop the setup at any time by typing `stop`. "
"By doing this you will CANCEL any information given.\n\n"
"*When you are ready to begin type `start`.*")
embedmsg = await self.bot.send_message(channel, embed=embed)
await self.bot.wait_for_message(channel=channel, author=ctx.message.author, content='start')
for a in questions:
embed = discord.Embed(description="Welcome to the setup for the fmod cog! "
"You can stop the setup at any time by typing `stop`. "
"*By doing this you will CANCEL any information given.*")
if a == 'Warn Message':
embed.add_field(name="~~~~",
value="**Warn Message** - "
"The message that is sent to the user when they are warned. "
"You can use the following arguments in the message:\n\n"
"```user.mention - mentions the user\n"
"user.name - names the user\n"
"user.id - gets id of user\n"
"warn.count - gets the # of this warn\n"
"warn.limit - # of warns allowed```\n\n"
"*Please type your message*", inline=False)
if a == 'Ban Message':
embed.add_field(name="~~~~",
value="**Ban Message** - "
"The message that is sent to the user when they are banned.\n\n"
"*Please type your message*", inline=False)
if a == 'Warn Limit':
embed.add_field(name="~~~~",
value="**Warn Limit** - "
"The number of warnings before a user is banned.\n\n"
"*Please type the warning number*", inline=False)
if a == 'Log Channel':
embed.add_field(name="~~~~",
value="**Log Channel** - "
"The channel where warnings are logged into.\n\n"
"*Please type the channel name*", inline=False)
if a == 'Mute Time':
embed.add_field(name="~~~~",
value="**Mute Time** - "
"How long a user is muted for on reaching their first warning.\n\n"
"*Please type the mute time with the relevant time format. "
"(*For minutes 'm', for hours 'h'*)*", inline=False)
if a == 'Mute Role':
embed.add_field(name="~~~~",
value="**Mute Role** - "
"The name of the muted role.\n\n"
"*Please type the name of the mute role*", inline=False)
if a == 'Denied Channel':
embed.add_field(name="~~~~",
value="**Denied Channel** - "
"The channel the user will be denied access to on using the [p]deny command.\n\n"
"*Please type the channel name.*", inline=False)
if a == 'Denied Role':
embed.add_field(name="~~~~",
value="**Denied Role** - "
"The name of the denied role used for disabling access to the denied channel.\n\n"
"*Please type the role name*", inline=False)
if a == 'Revoke Message':
embed.add_field(name="~~~~",
value="**Revoke Message** - "
"The message sent when a warning is revoked from a user.\n\n"
"*Please type the message*", inline=False)
if a == 'DM Warn':
embed.add_field(name="~~~~",
value="**DM Warn** - "
"Choose if you want warnings to be sent via PM or posted "
"in the channel the warning was executed in.\n\n"
"Reply with `true` or `false`", inline=False)
if a == 'Punishment Roles':
embed.add_field(name="~~~~",
value="**Punishment Roles** - "
"Choose if you want punishment roles to be added for each warning. "
"(Eg. For 1 warning would have a role with 1 hammer inside. \n\n"
"Reply with `true` or `false`", inline=False)
await self.bot.edit_message(embedmsg, embed=embed)
answer = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
if 'stop' in answer.content.lower():
await self.bot.send_message(channel, "Stopping....")
break
else:
while 'Time' in a:
if "m" in answer.content or "s" in answer.content or "h" in answer.content:
questions.update({a: answer.content})
break
else:
await self.bot.send_message(channel, "You've done something wrong! "
"Please make sure that the format is correct!")
answer = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
while 'Limit' in a:
if answer.content.isdigit():
questions.update({a: answer.content})
break
else:
await self.bot.send_message(channel, "Please enter a number!")
answer = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
while 'Channel' in a:
channelcheck = answer.content
chancheck = discord.utils.get(server.channels, name=channelcheck)
if chancheck is not None:
questions.update({a: answer.content})
break
else:
await self.bot.send_message(channel, "Please enter a valid channel name!")
answer = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
while 'DM' in a or 'Punishment' in a:
answering = answer.content
if 'true' in answering.lower():
questions.update({a: True})
break
if 'false' in answering.lower():
questions.update({a: False})
break
else:
await self.bot.send_message(channel, "Please enter True or False.")
answer = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
while 'Denied Role' in a or 'Mute Role' in a:
rolecheck = answer.content
rolcheck = discord.utils.get(server.roles, name=rolecheck)
if rolcheck is not None:
questions.update({a: answer.content})
break
else:
await self.bot.send_message(channel, "Please enter a valid role name!")
answer = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
while 'Message' in a:
questions.update({a: answer.content})
break
else:
questions.update({a: answer.content})
if not any(x is None for x in questions.values()):
self.settingsload[server.id] = questions
dataIO.save_json(self.settings, self.settingsload)
if 'true' in self.settingsload[server.id]['DM Warn']:
self.settingsload[server.id]['DM Warn'] = True
else:
self.settingsload[server.id]['DM Warn'] = False
if 'true' in self.settingsload[server.id]['Punishment Roles']:
self.settingsload[server.id]['Punishment Roles'] = True
else:
self.settingsload[server.id]['Punishment Roles'] = False
dataIO.save_json(self.settings, self.settingsload)
await self.bot.send_message(channel, "Settings saved!")
await self.currentsettings(ctx, channel, server)
else:
pass
async def currentsettings(self, ctx, channel, server):
jsonload = self.settingsload[server.id]
message = "```\n"
message += "Warn Message: {Warn Message},\n"
message += "Ban Message: {Ban Message},\n"
message += "Warn Limit: {Warn Limit}, \n"
message += "Log Channel: {Log Channel}, \n"
message += "Mute Time: {Mute Time}, \n"
message += "Mute Role: {Mute Role},\n"
message += "Denied Role: {Denied Role},\n"
message += "Denied Channel: {Denied Channel},\n"
message += "Revoke Message: {Revoke Message},\n"
message += "DM Warn: {DM Warn},\n"
message += "Punishment Roles: {Punishment Roles}"
message += "```"
await self.bot.send_message(channel, message.format(**jsonload))
@commands.group(no_pm=True, pass_context=True, name='settings')
@checks.admin()
async def _settings(self, ctx):
"""Sets individual settings for the cog"""
channel = ctx.message.channel
server = ctx.message.server
if server.id not in self.settingsload:
return await self.bot.say("Please run the `[p]setup` command before running this command.")
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
await self.currentsettings(ctx, channel, server)
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def muterole(self, ctx, rolename: str):
"""Change the mute role name."""
server = ctx.message.server
self.settingsload[server.id]["Mute Role"] = rolename
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Muted role name is now: **{}**".format(rolename))
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def reset(self, ctx):
"""Resets all the settings"""
server = ctx.message.server
await self.bot.say("Are you sure you want to reset the settings? Type `yes` to confirm.")
await self.bot.wait_for_message(channel=ctx.message.channel, author=ctx.message.author, content='yes')
del self.settingsload[server.id]
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Settings have been cleared!")
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def mutetime(self, ctx):
"""Change the mute time for the first warning"""
server = ctx.message.server
await self.bot.say("Please make sure to set the time with the correct time prefix at the end. "
"(*For minutes 'm', for hours 'h'*)\n\nPlease type your timeframe now.")
muteroletime = await self.bot.wait_for_message(channel=ctx.message.channel, author=ctx.message.author)
if "m" in muteroletime.content or "s" in muteroletime.content or "h" in muteroletime.content:
self.settingsload[server.id]["Mute Time"] = muteroletime.content
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Default mute time is now: **{}**".format(muteroletime.content))
else:
return await self.bot.say("You've done something wrong! Please make sure that the format is correct!")
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def logchannel(self, ctx, channel: str):
"""Change the logging channel."""
server = ctx.message.server
self.settingsload[server.id]["Log Channel"] = channel
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Log channel is now: **{}**".format(channel))
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def deniedchannel(self, ctx, channel: str):
"""Change the channel for those that have been denied."""
server = ctx.message.server
self.settingsload[server.id]["Denied Channel"] = channel
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Mute channel is now: **{}**".format(channel))
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def pm(self, ctx):
"""Enable/disable PM warn"""
server = ctx.message.server
if 'DM Warn' not in self.settingsload[server.id]:
self.settingsload[server.id]['DM Warn'] = False
p = self.settingsload[server.id]['DM Warn']
if p:
self.settingsload[server.id]['DM Warn'] = False
await self.bot.say("Warnings are now in the channel.")
elif not p:
self.settingsload[server.id]['DM Warn'] = True
await self.bot.say("Warnings are now in DM.")
dataIO.save_json(self.settings,
self.settingsload)
@_settings.command(no_pm=True, pass_context=True, manage_server=True)
async def punishrole(self, ctx):
"""Enable/disable hammer emojis per warning."""
server = ctx.message.server
true_msg = "Punish emojis per warning enabled."
false_msg = "Punish emojis per warning disabled."
if 'Punishment Roles' not in self.settingsload[server.id]:
self.settingsload[server.id]['Punishment Roles'] = True
msg = true_msg
elif self.settingsload[server.id]['Punishment Roles']:
self.settingsload[server.id]['Punishment Roles'] = False
msg = false_msg
elif not self.settingsload[server.id]['Punishment Roles']:
self.settingsload[server.id]['Punishment Roles'] = True
msg = true_msg
else:
msg = "Error."
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say(msg)
@_settings.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(ban_members=True, manage_server=True)
async def max(self, ctx, limit: int):
"""Sets the max amount of warnings before banning."""
server = ctx.message.server
self.settingsload[server.id]["Warn Limit"] = limit
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Warn limit is now: \n{}".format(limit))
@_settings.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(ban_members=True, manage_server=True)
async def revokemsg(self, ctx, *, msg=None):
"""Set the message on warning being revoked."""
if not msg:
return await self.bot.say("```Set the message on warning being removed.\n\n"
"To get a full list of information, use "
"**settings message** without any parameters.```")
server = ctx.message.server
self.settingsload[server.id]["Revoke Message"] = msg
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Revoke message is now: \n{}".format(msg))
@_settings.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(ban_members=True, manage_server=True)
async def ban(self, ctx, *, msg=None):
"""Set the ban message."""
if not msg:
return await self.bot.say("```Set the ban message.\n\n"
"To get a full list of information, use "
"**settings message** without any parameters.```")
server = ctx.message.server
self.settingsload[server.id]["Ban Message"] = msg
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Ban message is now: \n{}".format(msg))
@_settings.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(ban_members=True, manage_server=True)
async def message(self, ctx, *, msg=None):
"""Set the warning message
user.mention - mentions the user
user.name - names the user
user.id - gets id of user
warn.count - gets the # of this warn
warn.limit - # of warns allowed
Example:
**You, user.mention, have received Warning warn.count. After warn.limit,
you will be banned.**
You can set it either for every server.
To set the ban message, use *warnset ban*
"""
if not msg:
await self.bot.say("```Set the warning message\n\n"
"user.mention - mentions the user\n"
"user.name - names the user\n"
"user.id - gets id of user\n"
"warn.count - gets the # of this warn\n"
"warn.limit - # of warns allowed\n\n"
"Example:\n\n"
"**You, user.mention, have received Warning "
"warn.count. After warn.limit, you will be "
"banned.**\n\n"
"You can set it either for every server.\n"
"To set the ban message, use *warnset ban*\n```")
return
server = ctx.message.server
self.settingsload[server.id]["Warn Message"] = msg
dataIO.save_json(self.settings,
self.settingsload)
await self.bot.say("Warn message is now: \n{}".format(msg))
async def filter_message(self, msg, user, count, _max):
msg = msg.replace("user.mention",
user.mention)
msg = msg.replace("user.name",
user.name)
msg = msg.replace("user.id",
user.id)
msg = msg.replace("warn.count",
str(count))
msg = msg.replace("warn.limit",
str(_max))
return msg
async def embedlog(self, mod, user, reason, countnum, channel, ID, warntype):
avatar = user.avatar_url if user.avatar else user.default_avatar_url
if warntype == 'denied':
embed = discord.Embed(title="User Denied", color=0xfd9e11)
elif warntype == 'Ban':
embed = discord.Embed(title="User Banned", color=0xf01e1e)
else:
embed = discord.Embed(title="User Warned", color=0xfd9e11)
embed.set_thumbnail(url=avatar)
embed.add_field(name="Case ID:", value=ID, inline=False)
embed.add_field(name="User:", value=user, inline=False)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.add_field(name="Warning Number:", value=countnum, inline=False)
embed.add_field(name="Attachments:", value='None', inline=False)
embed.set_footer(text=credits, icon_url=creditIcon)
react = await self.bot.send_message(channel, embed=embed)
await self.bot.add_reaction(react, "\U0001f44d")
await self.bot.add_reaction(react, "\U0001f44e")
await self.bot.add_reaction(react, "\U0001f937")
global msgid
msgid = react.id
@commands.command(no_pm=True, pass_context=True)
@checks.mod()
async def warn(self, ctx, user: discord.Member, *, reason: str=None):
server = ctx.message.server
channel = ctx.message.channel
can_ban = channel.permissions_for(server.me).ban_members
can_role = channel.permissions_for(server.me).manage_roles
if reason is None:
msg = await self.bot.say("Please enter a reason for the warning!")
await asyncio.sleep(5)
await self.bot.delete_message(msg)
await self.bot.delete_message(ctx.message)
return
if can_ban and can_role:
pass
await self.bot.delete_message(ctx.message)
else:
await self.bot.say("Sorry, I can't warn this user.\n"
"I am missing the `ban_members` or `manage_roles` permission")
return
if server.id not in self.settingsload:
await self.bot.say("Please run the `[p]setup` command before running this command.")
return
p = self.settingsload[server.id]['DM Warn']
_max = self.settingsload[server.id]['Warn Limit']
mutetime = self.settingsload[server.id]['Mute Time']
msg = self.settingsload[server.id]["Warn Message"]
ban = self.settingsload[server.id]["Ban Message"]
if server.id not in self.warningsload:
self.warningsload[server.id] = {}
dataIO.save_json(self.warnings, self.warningsload)
if user.id not in self.warningsload[server.id]:
self.warningsload[server.id][user.id] = {}
dataIO.save_json(self.warnings, self.warningsload)
else:
pass
else:
if user.id not in self.warningsload[server.id]:
self.warningsload[server.id][user.id] = {}
dataIO.save_json(self.warnings, self.warningsload)
else:
pass
if "Count" in self.warningsload[server.id][user.id]:
count = self.warningsload[server.id][user.id]["Count"]
else:
count = 0
if "ID" in self.warningsload[server.id]:
ID = self.warningsload[server.id]["ID"]
else:
ID = 10000
logchannel = self.settingsload[server.id]["Log Channel"]
channel = discord.utils.get(server.channels, name=logchannel)
if channel is None:
msg = await self.bot.say("I was unable to write to your log channel. "
"Please make sure there is a channel called {} on the server!".format(logchannel))
return
else:
pass
max = int(_max)
# checks for warn number
if count == 0:
count += 1
self.warningsload[server.id][user.id].update({"Count": count})
dataIO.save_json(self.warnings, self.warningsload)
msg = await self.filter_message(msg=msg,
user=user,
count=count,
_max=_max)
data = discord.Embed(title=server.name, color=0xfd9e11)
data.add_field(name="Warning", value=msg)
data.add_field(name="Reason:", value=reason, inline=False)
data.add_field(name="Additional Actions:",
value="*In addition to this you have been muted for {} as a result of your actions.*".format(mutetime),
inline=False)
data.set_footer(text=credits, icon_url=creditIcon)
if p:
# if dm is on
await self.bot.send_message(user, embed=data)
await self.bot.say("Done...")
elif not p:
# if dm is not on
await self.bot.say(embed=data)
# run and log
_max = int(_max)
countnum = "{}/{}".format(count, _max)
mod = ctx.message.author
if 'ID' not in self.warningsload[server.id]:
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
else:
ID = int(ID)+12
ID = str(ID)
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
await self._punish_cmd_common(ctx, user, reason=reason)
await self.embedlog(mod, user, reason, countnum, channel, ID, warntype=True)
if 'Warnings' in self.warningsload[server.id][user.id]:
pass
else:
self.warningsload[server.id][user.id]['Warnings'] = {}
dataIO.save_json(self.warnings, self.warningsload)
self.warningsload[server.id][user.id]["Warnings"][ID] = {
'User': user.id,
'Mod': mod.id,
'Reason': reason,
'Warning Number': countnum,
'Message ID': msgid
}
dataIO.save_json(self.warnings, self.warningsload)
elif count > 0 and count < max - 1:
count += 1
self.warningsload[server.id][user.id].update({"Count": count})
dataIO.save_json(self.warnings, self.warningsload)
msg = await self.filter_message(msg=msg,
user=user,
count=count,
_max=_max)
data = discord.Embed(title=server.name, color=0xfd9e11)
data.add_field(name="Warning",
value=msg)
data.add_field(name="Reason:", value=reason, inline=False)
data.set_footer(text=credits, icon_url=creditIcon)
if p:
# if dm is on
await self.bot.send_message(user, embed=data)
await self.bot.say("Done...")
elif not p:
# if dm is not on
await self.bot.say(embed=data)
# run and log
max = int(_max)
countnum = "{}/{}".format(count, _max)
mod = ctx.message.author
if 'ID' not in self.warningsload[server.id]:
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
else:
ID = int(ID)+12
ID = str(ID)
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
await self.embedlog(mod, user, reason, countnum, channel, ID, warntype=True)
if 'Warnings' in self.warningsload[server.id][user.id]:
pass
else:
self.warningsload[server.id][user.id]['Warnings'] = {}
dataIO.save_json(self.warnings, self.warningsload)
self.warningsload[server.id][user.id]["Warnings"][ID] = {
'User': user.id,
'Mod': mod.id,
'Reason': reason,
'Warning Number': countnum,
'Message ID': msgid
}
dataIO.save_json(self.warnings, self.warningsload)
else:
msg = ban
msg = await self.filter_message(msg=msg,
user=user,
count=count,
_max=_max)
data = discord.Embed(title=server.name, color=0xf01e1e)
data.add_field(name="Banned", value=msg)
data.add_field(name="Reason:", value=reason, inline=False)
data.set_footer(text=credits, icon_url=creditIcon)
if p:
# if dm is on
await self.bot.send_message(user, embed=data)
await self.bot.say("Max warning reached, user banned.")
elif not p:
# if dm is not on
await self.bot.say(embed=data)
# run and log
countnum = "Banned"
mod = ctx.message.author
if 'ID' not in self.warningsload[server.id]:
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
else:
ID = int(ID)+12
ID = str(ID)
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
await self.embedlog(mod, user, reason, countnum, channel, ID, warntype='Ban')
if 'Warnings' in self.warningsload[server.id][user.id]:
pass
else:
self.warningsload[server.id][user.id]['Warnings'] = {}
dataIO.save_json(self.warnings, self.warningsload)
self.warningsload[server.id][user.id]['Warnings'][ID] = {
'User': user.id,
'Mod': mod.id,
'Reason': reason,
'Warning Number': countnum,
'Message ID': msgid
}
dataIO.save_json(self.warnings, self.warningsload)
try:
await self.bot.ban(user, delete_message_days=0)
except discord.errors.Forbidden:
await self.bot.say("I don't have permissions to ban that user.")
if 'Punishment Roles' in self.settingsload[server.id] and can_role:
if self.settingsload[server.id]['Punishment Roles']:
poops = count * "\U0001f528"
role_name = "Warning {}".format(poops)
is_there = False
for role in server.roles:
if role.name == role_name:
poop_role = role
is_there = True
if not is_there:
poop_role = await self.bot.create_role(server)
await self.bot.edit_role(role=poop_role,
name=role_name,
server=server)
try:
await self.bot.add_roles(user,
poop_role)
except discord.errors.Forbidden:
await self.bot.say("No permission to add roles")
async def setup_channel(self, channel, role):
perms = discord.PermissionOverwrite()
if channel.type == discord.ChannelType.text:
perms.send_messages = False
elif channel.type == discord.ChannelType.voice:
perms.speak = False
await self.bot.edit_channel_permissions(channel, role, overwrite=perms)
async def _punish_cmd_common(self, ctx, member, reason):
server = ctx.message.server
mutetime = self.settingsload[server.id]["Mute Time"]
duration = _parse_time(mutetime)
if duration < 1:
await self.bot.say("Duration must be 1 second or longer.")
return False
rolename = self.settingsload[server.id]['Mute Role']
role = discord.utils.get(server.roles, name=rolename)
if role is None:
await self.bot.say("Please make sure the role {} exists!".format(role))
return
if role >= server.me.top_role:
await self.bot.say('The %s role is too high for me to manage.' % role)
return
self.warningsload[server.id][member.id]['User Muted'] = {}
dataIO.save_json(self.warnings, self.warningsload)
self.warningsload[server.id][member.id]['User Muted'] = {
'Action': 'Muted',
'until': (time.time() + duration),
'by': member.id,
'reason': reason
}
dataIO.save_json(self.warnings, self.warningsload)
perms = discord.Permissions.none()
if role is None:
role = await self.bot.edit_role(server, role, permissions=perms)
await self.bot.move_role(server, role, server.me.top_role.position - 1)
for channel in server.channels:
await self.setup_channel(channel, role)
await self.bot.add_roles(member, role)
# schedule callback for role removal
if duration:
self.schedule_unpunish(duration, member, reason)
return True
async def on_channel_create(self, channel):
"""Run when new channels are created and set up role permissions"""
if channel.is_private:
return
server = channel.server
if server.id != "374596069989810176":
return
rolename = self.settingsload[server.id]['Mute Role']
role = discord.utils.get(server.roles, name=rolename)
if not role:
return
await self.setup_channel(channel, role)
def schedule_unpunish(self, delay, member, reason=None):
"""Schedules role removal, canceling and removing existing tasks if present"""
sid = member.server.id
if sid not in self.handles:
self.handles[sid] = {}
if member.id in self.handles[sid]:
self.handles[sid][member.id].cancel()
coro = self._unpunish(member, reason)
handle = self.bot.loop.call_later(delay, self.bot.loop.create_task, coro)
self.handles[sid][member.id] = handle
async def _unpunish(self, member, reason=None):
"""Remove punish role, delete record and task handle"""
server = member.server
rolename = self.settingsload[server.id]['Mute Role']
role = discord.utils.get(server.roles, name=rolename)
if role:
# Has to be done first to prevent triggering on_member_update listener
self._unpunish_data(member)
await self.bot.remove_roles(member, role)
msg = 'Your punishment in %s has ended.' % member.server.name
if reason:
msg += "\nReason was: %s" % reason
def _unpunish_data(self, member):
"""Removes punish data entry and cancels any present callback"""
sid = member.server.id
if sid in self.warningsload and member.id in self.warningsload[sid]:
del(self.warningsload[member.server.id][member.id]['User Muted'])
dataIO.save_json(self.warnings, self.warningsload)
async def on_member_join(self, member):
"""Restore punishment if punished user leaves/rejoins"""
server = member.server
sid = member.server.id
if server.id != "374596069989810176":
return
deniedrole = self.settingsload[server.id]['Denied Role']
# re-adds warning roles
if 'Punishment Roles' in self.settingsload[sid]:
if self.settingsload[sid]['Punishment Roles']:
if member.id in self.warningsload[sid]:
count = self.warningsload[sid][member.id]["Count"]
if count >= 1:
poops = "\U0001f528" * count
role_name = "Warning {}".format(poops)
is_there = False
for role in member.server.roles:
if role.name == role_name:
poop_role = role
is_there = True
if not is_there:
server = member.server
poop_role = await self.bot.create_role(server)
await self.bot.edit_role(role=poop_role,
name=role_name,
server=server)
try:
await self.bot.add_roles(member,
poop_role)
except discord.errors.Forbidden:
await self.bot.say("No permission to add roles")
else:
pass
# checks if denied from a channel and re-adds role
for mid in self.warningsload[sid]:
try:
for warning_key, data in self.warningsload[server.id][mid]["Warnings"].items():
if data['Warning Number'] == 'Channel Denied':
role = discord.utils.get(server.roles, name=deniedrole)
await self.bot.add_roles(member, role)
break
except:
continue
if member.id in self.warningsload[sid]:
if 'User Muted' in self.warningsload[sid][member.id]:
duration = self.warningsload[sid][member.id]['User Muted']['until'] - time.time()
if duration > 0:
rolename = self.settingsload[server.id]['Mute Role']
role = discord.utils.get(member.server.roles, name=rolename)
await self.bot.add_roles(member, role)
if member.id not in self.handles[sid]:
self.schedule_unpunish(duration, member, reason)
# other commands
@commands.command(no_pm=True, pass_context=True)
@checks.admin()
async def warns(self, ctx):
"""Lists all the warnings on the server"""
server = ctx.message.server
server_id = server.id
newcount = 0
deniedcheck = True
if not (server_id in self.warningsload and self.warningsload[server_id]):
await self.bot.say("No users are currently punished.")
return
for mid in self.warningsload[server.id]:
try:
for warning_key, data in self.warningsload[server.id][mid].items():
count = self.warningsload[server.id][mid]["Count"]
newcount += int(count)
for warning_key, data in self.warningsload[server.id][mid]["Warnings"].items():
if data['Warning Number'] == "Channel Denied":
deniedcheck = True
else:
deniedcheck = False
except:
continue
def getmname(mid):
member = discord.utils.get(server.members, id=mid)
if member:
return str(member)
else:
mid = str(mid)
msg = '{}'.format(mid)
return msg
if newcount == 0 and not deniedcheck:
await self.bot.say("No users are currently punished.")
return
headers = ['Case ID', 'Member', 'Warning Number', 'Moderator', 'Reason']
table = []
disp_table = []
for mid in self.warningsload[server.id]:
try:
for warning_key, data in self.warningsload[server.id][mid]["Warnings"].items():
warnid = warning_key
member_name = getmname(data['User'])
numwarns = data['Warning Number']
punisher_name = getmname(data['Mod'])
reason = data['Reason']
table.append((warnid, member_name, numwarns, punisher_name, reason))
except:
continue
for warnid, member_name, numwarns, punisher_name, reason in sorted(table, key=lambda x: x[0]):
disp_table.append((warnid, member_name, numwarns, punisher_name, reason))
for page in pagify(tabulate(disp_table, headers)):
await self.bot.say(box(page))
async def delwarning(self, ctx, server, warnid, reason):
server = ctx.message.server
channel = ctx.message.channel
revokemessage = self.settingsload[server.id]['Revoke Message']
rolename = self.settingsload[server.id]['Mute Role']
role = discord.utils.get(server.roles, name=rolename)
for mid in self.warningsload[server.id]:
try:
for warning_key, data in self.warningsload[server.id][mid]["Warnings"].items():
if warning_key == warnid:
await self.bot.say("Are you sure you want to delete warn number **{}**?\n\nType `yes` to continue.".format(warnid))
continuemsg = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
if 'yes' in continuemsg.content.lower():
user = discord.utils.get(server.members, id=mid)
if data['Warning Number'] == 'Channel Denied':
role = self.settingsload[server.id]['Denied Role']
role = discord.utils.get(server.roles, name=role)
await self.bot.remove_roles(user, role)
await self.bot.say("The denied role has been removed from this user!")
else:
count = self.warningsload[server.id][mid]["Count"]
count = int(count)-1
self.warningsload[server.id][mid].update({"Count": count})
if 'Punishment Roles' in self.settingsload[server.id]:
if self.settingsload[server.id]['Punishment Roles']:
try:
role = role = list(filter(lambda r: r.name.startswith('Warning \U0001f528'), server.roles))
await self.bot.remove_roles(user, *role)
except discord.errors.Forbidden:
await self.bot.send_message(channel, "No permission to add roles")
if count >= 1:
poops = count * "\U0001f528"
role_name = "Warning {}".format(poops)
is_there = False
for role in server.roles:
if role.name == role_name:
poop_role = role
is_there = True
if not is_there:
poop_role = await self.bot.create_role(server)
await self.bot.edit_role(role=poop_role,
name=role_name,
server=server)
try:
await self.bot.add_roles(user,
poop_role)
except discord.errors.Forbidden:
await self.bot.say("No permission to add roles")
embed = discord.Embed(title='Warning Revoked by {}'.format(ctx.message.author),
description=revokemessage,
color=0x00ff40)
embed.add_field(name='Reason:', value=reason)
embed.set_footer(text=credits, icon_url=creditIcon)
channel = await self.bot.start_private_message(user)
await self.bot.send_message(channel, embed=embed)
warnid = warning_key
del(self.warningsload[server.id][mid]['Warnings'][warnid])
dataIO.save_json(self.warnings, self.warningsload)
logchannel = self.settingsload[server.id]["Log Channel"]
logchannel = discord.utils.get(server.channels, name=logchannel)
messageid = data['Message ID']
await self.bot.say("Warning deleted!")
try:
embed2 = await self.bot.get_message(logchannel, messageid)
newembed = discord.Embed(title='Warning Revoked',
color=0x00ff40,
description=('The warning for **{}** '
'has been revoked by **{}**'
' for the reason **{}**.'.format(user,
ctx.message.author,
reason)))
newembed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.edit_message(embed2, embed=newembed)
await self.bot.clear_reactions(embed2)
except discord.NotFound:
await self.bot.say("Log Message is not found. "
"If you changed the log channel you will need to react to the message there")
return
except:
continue
await self.bot.say("This warning was not found. Please make sure you typed it correctly!")
@commands.command(no_pm=True, pass_context=True)
@checks.admin()
async def delwarn(self, ctx, id, *, reason):
server = ctx.message.server
if server.id not in self.settingsload:
await self.bot.say("Please run the `[p]setup` command before running this command.")
return
await self.delwarning(ctx, server=server, warnid=id, reason=reason)
@commands.command(no_pm=True, pass_context=True)
@checks.admin()
async def setcount(self, ctx, user: discord.Member, count):
server = ctx.message.server
counter = self.warningsload[server.id][user.id]["Count"]
max = self.warningsload[server.id][user.id]["Warn Limit"]
max = int(max)
counter = int(counter)
count = int(count)
if server.id not in self.settingsload:
await self.bot.say("Please run the `[p]setup` command before running this command.")
return
if server.id not in self.warningsload:
await self.bot.say("This user has no warnings!")
return
if user.id not in self.warningsload[server.id]:
await self.bot.say("This user has no warnings!")
return
if counter == 0:
await self.bot.say("This user has no warnings!")
return
if count >= max:
await self.bot.say("Please set the count to be under the maximum amount of warnings!")
return
self.warningsload[server.id][user.id].update({'Count': count})
dataIO.save_json(self.warnings, self.warningsload)
await self.bot.say("Count updated!")
@commands.command(no_pm=True, pass_context=True)
@checks.mod()
async def deny(self, ctx, user: discord.Member, *, reason: str=None):
"""Denies a user from the channel"""
server = ctx.message.server
user = user
ID = 10000
if server.id not in self.settingsload:
await self.bot.say("Please run the `[p]setup` command before running this command.")
return
revokechannel = self.settingsload[server.id]['Denied Channel']
deniedrole = self.settingsload[server.id]['Denied Role']
channel = discord.utils.get(server.channels, name=revokechannel)
if channel is None:
msg = await self.bot.say(("I was unable to write to your log channel. "
"Please make sure there is a channel called {} on the server!".format(revokechannel)))
return
else:
pass
if "ID" in self.warningsload[server.id]:
ID = self.warningsload[server.id]["ID"]
else:
ID = 10000
if reason is None:
msg = await self.bot.say("Please enter a reason!")
await asyncio.sleep(5)
await self.bot.delete_message(msg)
return
if server.id not in self.warningsload:
self.warningsload[server.id] = {}
dataIO.save_json(self.warnings, self.warningsload)
if user.id in self.warningsload[server.id]:
for warning_key, data in self.warningsload[server.id][user.id]["Warnings"].items():
try:
if data['Warning Number'] == 'Channel Denied':
msg = await self.bot.say("This user has already been denied access to the channel.")
await asyncio.sleep(8)
await self.bot.delete_message(msg)
await self.bot.delete_message(ctx.message)
return
except:
continue
else:
role = discord.utils.get(server.roles, name=deniedrole)
mod = ctx.message.author
await self.bot.delete_message(ctx.message)
await self.bot.add_roles(user, role)
dmuser = await self.bot.start_private_message(user)
embed = discord.Embed(title='You have been denied from {}'.format(channel),
description=('This is to let you know that you have been '
'denied read permissions for the {} channel.'.format(channel)))
await self.bot.send_message(dmuser, embed=embed)
if 'ID' not in self.warningsload[server.id]:
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
else:
ID = int(ID)+12
ID = str(ID)
self.warningsload[server.id].update({'ID': ID})
dataIO.save_json(self.warnings, self.warningsload)
if 'Warnings' in self.warningsload[server.id][user.id]:
pass
else:
self.warningsload[server.id][user.id]['Warnings'] = {}
dataIO.save_json(self.warnings, self.warningsload)
countnum = 'User Denied'
logchannel = self.settingsload[server.id]['Log Channel']
logchannel = discord.utils.get(server.channels, name=logchannel)
mod = ctx.message.author
await self.embedlog(mod, user, reason, countnum, logchannel, ID, warntype='denied')
self.warningsload[server.id][user.id]["Warnings"][ID] = {
'Message ID': msgid,
'Reason': reason,
'Mod': ctx.message.author.id,
'User': user.id,
'Warning Number': 'Channel Denied'
}
dataIO.save_json(self.warnings, self.warningsload)
for channel in server.channels:
perms = discord.PermissionOverwrite()
if channel.type == discord.ChannelType.text:
perms.send_messages = False
perms.read_messages = False
await self.bot.edit_channel_permissions(channel, role, overwrite=perms)
@commands.command(no_pm=True, pass_context=True)
@checks.mod()
async def attach(self, ctx, warnid):
"""Attaches evidence to a warning"""
server = ctx.message.server
if server.id not in self.settingsload:
await self.bot.say("Please run the `[p]setup` command before running this command.")
return
def getmname(mid):
member = discord.utils.get(server.members, id=mid)
if member:
if member.nick:
return '%s (%s)' % (member.nick, member)
else:
return str(member)
else:
mid = str(mid)
msg = '{} (Member not present)'.format(mid)
return msg
for mid in self.warningsload[server.id]:
try:
for warning_key, data in self.warningsload[server.id][mid]["Warnings"].items():
if warning_key == warnid:
logchannel = self.settingsload[server.id]["Log Channel"]
logchannel = discord.utils.get(server.channels, name=logchannel)
messageid = data['Message ID']
await self.bot.say("Warning attachment manager sent through DM!")
try:
embed2 = await self.bot.get_message(logchannel, messageid)
except discord.NotFound:
await self.bot.say("Message is not found. If you changed the log "
"channel you will need to react to the message there")
return
dmchannel = await self.bot.start_private_message(ctx.message.author)
await self.bot.send_message(dmchannel, ("Please send your attachments for the warning {}. "
"When you have finished please type `stop`.".format(warnid)))
if 'Attachments' in self.warningsload[server.id][mid]["Warnings"][warnid]:
if 'None' not in self.warningsload[server.id][mid]["Warnings"][warnid]['Attachments']:
attachlist = self.warningsload[server.id][mid]["Warnings"][warnid]['Attachments']
else:
attachlist = []
stuff = await self.bot.wait_for_message(channel=dmchannel, author=ctx.message.author)
while stuff.content is not None:
if stuff.content.lower() == 'stop':
await self.bot.send_message(dmchannel, "Done!")
break
else:
if stuff.attachments or 'discord' in stuff.content or 'gyazo' in stuff.content or 'prntscr' in stuff.content:
if stuff.attachments:
attachmentlist = stuff.attachments[0]
attachment = attachmentlist['url']
attachlist.append(attachment)
await self.bot.add_reaction(stuff, emoji='\U0001f4ce')
stuff = await self.bot.wait_for_message(channel=dmchannel, author=ctx.message.author)
else:
attachlist.append(stuff.content)
await self.bot.add_reaction(stuff, emoji='\U0001f4ce')
stuff = await self.bot.wait_for_message(channel=dmchannel, author=ctx.message.author)
else:
await self.bot.send_message(dmchannel, "Please send a picture or a link")
stuff = await self.bot.wait_for_message(channel=dmchannel, author=ctx.message.author)
attachlist2 = ('\n'.join(attachlist))
embed = embed2.embeds[0]
title = embed['title']
user = discord.utils.get(server.members, id=data['User'])
avatar = user.avatar_url if user.avatar else user.default_avatar_url
newembed = discord.Embed(title=title, color=embed['color'])
newembed.set_thumbnail(url=avatar)
newembed.add_field(name='Case ID:', value=warnid, inline=False)
newembed.add_field(name='User:', value=getmname(data['User']), inline=False)
newembed.add_field(name='Reason:', value=data['Reason'], inline=False)
newembed.add_field(name='Warning Number:', value=data['Warning Number'], inline=False)
newembed.add_field(name='Attachments:', value=attachlist2, inline=False)
newembed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.edit_message(embed2, embed=newembed)
self.warningsload[server.id][mid]["Warnings"][warnid].update({"Attachments": attachlist})
dataIO.save_json(self.warnings, self.warningsload)
return
except:
continue
@commands.command(no_pm=True, pass_context=True)
async def report(self, ctx, user: discord.Member):
"""Reports a user to the staff"""
server = ctx.message.server
if server.id not in self.settingsload:
await self.bot.say("Please run the `[p]setup` command before running this command.")
return
channel = await self.bot.start_private_message(ctx.message.author)
await self.bot.delete_message(ctx.message)
await self.bot.send_message(channel, ("You are reporting {}. "
"This will inform the staff members of this users actions. "
"Are you sure you want to continue? \n\n Type `yes` to continue...".format(user)))
await self.bot.wait_for_message(channel=channel, author=ctx.message.author, content='yes')
await self.bot.send_message(channel, "Please enter a reason")
reason = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
await self.bot.send_message(channel, "Your reason is {}. Are you sure? \n\nType `yes` to continue or `no` to return.".format(reason.content))
confirm = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
attachlist = []
while confirm.content is not None:
if confirm.content.lower() == 'yes':
await self.bot.send_message(channel, "Please enter your image attachments as proof. "
"Formats accepted are: Discord, Gyazo and Lightshot. "
"File uploads via discord are also allowed. Once you are done type `send`.")
proof = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
while proof.content is not None:
if proof.content == 'send':
await self.bot.send_message(channel, "Sent!")
break
else:
if proof.attachments or 'discord' in proof.content or 'gyazo' in proof.content or 'prntscr' in proof.content:
if proof.attachments:
attachmentlist = proof.attachments[0]
attachment = attachmentlist['url']
attachlist.append(attachment)
await self.bot.add_reaction(proof, emoji='\U0001f4ce')
proof = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
else:
attachlist.append(proof.content)
await self.bot.add_reaction(proof, emoji='\U0001f4ce')
proof = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
else:
await self.bot.send_message(channel, "Please send a picture or a link")
proof = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
break
if confirm.content == 'no':
confirm = await self.bot.wait_for_message(channel=channel, author=ctx.message.author)
logchannel = self.settingsload[server.id]['Log Channel']
logchannel = discord.utils.get(server.channels, name=logchannel)
attachlist2 = ('\n'.join(attachlist))
embed = discord.Embed(title='Report', color=0x0080ff)
embed.add_field(name='User:', value=user, inline=False)
embed.add_field(name='Reason:', value=reason.content, inline=False)
embed.add_field(name='Attachments:', value=attachlist2, inline=False)
embed.set_footer(text=credits, icon_url=creditIcon)
react = await self.bot.send_message(logchannel, embed=embed)
await self.bot.add_reaction(react, "\U0001f44d")
await self.bot.add_reaction(react, "\U0001f44e")
await self.bot.add_reaction(react, "\U0001f937")
def check_folder():
if not os.path.exists("data/fmod"):
print("Creating data/fmod/server.id folder")
os.makedirs("data/fmod")
if not os.path.exists(PATH):
print('Creating folder: data/fmod')
os.makedirs(PATH)
def check_file():
data = {}
a = "data/fmod/settings.json"
b = "data/fmod/warnings.json"
if not dataIO.is_valid_json(a):
print("Creating data/fmod/settings.json")
dataIO.save_json(a,
data)
if not dataIO.is_valid_json(b):
print("Creating data/fmod/warnings.json")
dataIO.save_json(b,
data)
def setup(bot):
check_folder()
check_file()
bot.add_cog(fmod(bot))
|
11454422
|
import re
import warnings
from typing import Any, Dict, Optional, Tuple, Type, TypeVar, Union
from neo4j import Driver, GraphDatabase
from pandas import Series
from pandas.core.frame import DataFrame
from .call_builder import CallBuilder
from .direct_endpoints import DirectEndpoints
from .error.unable_to_connect import UnableToConnectError
from .error.uncallable_namespace import UncallableNamespace
from .query_runner.arrow_query_runner import ArrowQueryRunner
from .query_runner.neo4j_query_runner import Neo4jQueryRunner
from .query_runner.query_runner import QueryRunner
from .server_version.server_version import ServerVersion
from .version import __version__
GDS = TypeVar("GDS", bound="GraphDataScience")
class InvalidServerVersionError(Exception):
pass
class GraphDataScience(DirectEndpoints, UncallableNamespace):
_AURA_DS_PROTOCOL = "neo4j+s"
def __init__(
self,
endpoint: Union[str, Driver, QueryRunner],
auth: Optional[Tuple[str, str]] = None,
aura_ds: bool = False,
arrow: bool = True,
):
if isinstance(endpoint, str):
self._config: Dict[str, Any] = {"user_agent": f"neo4j-graphdatascience-v{__version__}"}
if aura_ds:
protocol = endpoint.split(":")[0]
if not protocol == self._AURA_DS_PROTOCOL:
raise ValueError(
f"AuraDS requires using the '{self._AURA_DS_PROTOCOL}' protocol ('{protocol}' was provided)"
)
self._config["max_connection_lifetime"] = 60 * 8 # 8 minutes
self._config["keep_alive"] = True
self._config["max_connection_pool_size"] = 50
driver = GraphDatabase.driver(endpoint, auth=auth, **self._config)
self._query_runner = Neo4jQueryRunner(driver, auto_close=True)
elif isinstance(endpoint, QueryRunner):
if arrow:
raise ValueError("Arrow cannot be used if the QueryRunner is provided directly")
self._query_runner = endpoint
else:
driver = endpoint
self._query_runner = Neo4jQueryRunner(driver, auto_close=False)
try:
server_version_string = self._query_runner.run_query("RETURN gds.version()").squeeze()
except Exception as e:
raise UnableToConnectError(e)
server_version_match = re.search(r"^(\d+)\.(\d+)\.(\d+)", server_version_string)
if not server_version_match:
raise InvalidServerVersionError(f"{server_version_string} is not a valid GDS library version")
self._server_version = ServerVersion(*map(int, server_version_match.groups()))
self._query_runner.set_server_version(self._server_version)
if arrow and self._server_version >= ServerVersion(2, 1, 0):
try:
arrow_info: Series = self._query_runner.run_query("CALL gds.debug.arrow()").squeeze()
if arrow_info["running"]:
self._query_runner = ArrowQueryRunner(
arrow_info["listenAddress"], self._query_runner, auth, driver.encrypted, True
)
except Exception as e:
warnings.warn(f"Could not initialize GDS Flight Server client: {e}")
super().__init__(self._query_runner, "gds", self._server_version)
def __getattr__(self, attr: str) -> CallBuilder:
return CallBuilder(self._query_runner, f"gds.{attr}", self._server_version)
def set_database(self, db: str) -> None:
self._query_runner.set_database(db)
def database(self) -> Optional[str]:
return self._query_runner.database()
def run_cypher(self, query: str, params: Optional[Dict[str, Any]] = None) -> DataFrame:
return self._query_runner.run_query(query, params)
def driver_config(self) -> Dict[str, Any]:
return self._config
@classmethod
def from_neo4j_driver(
cls: Type[GDS], driver: Driver, auth: Optional[Tuple[str, str]] = None, arrow: bool = True
) -> "GraphDataScience":
return cls(driver, auth=auth, arrow=arrow)
def close(self) -> None:
self._query_runner.close()
|
11454424
|
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from .utils import lang_names
def get_page_desc(_id):
"""Builds the first big text card that introduces users to the page"""
return [
dbc.Row(
children=[
dbc.Col([
html.Div(
** {"data-aos": "fade-up"},
className="aos-refresh-onload",
children=dbc.Jumbotron(
style={"padding": "4%"},
className="elevation-3",
id=_id,
)
)
]),
]
)
]
def get_labs_indicator(_id, instructions_id):
"""This is the card where user selects if he has lab values or not"""
return [
dbc.Row(
justify="center",
align="stretch",
children=[
dbc.Col(
xs=12, sm=12, md=12, lg=6,
children=html.Div(
**{"data-aos": "fade-up", "data-aos-delay": "0"},
style={"transformStyle": "flat", "zIndex": "50", "position": "relative"},
className="aos-refresh-onload",
children=dbc.Card(
style={"borderWidth": "0px",
"height": "125px",
"marginBottom": 30},
className="elevation-3",
children=[
dbc.CardHeader(id=_id + "_text",
style={"fontWeight": "bold"}),
dbc.CardBody([
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(
clearable=False,
id=_id,
value=0,
className="dcc_dropdown",
),
]),
),
]),
]),
]
),
)
),
dbc.Col(
xs=12, sm=12, md=12, lg=6,
children=html.Div(
**{"data-aos": "fade-up", "data-aos-delay": "100"},
className="aos-refresh-onload",
children=dbc.Card(
className="elevation-3",
style={"borderWidth": "0px", "height": "125px", "marginBottom": 30},
children=[
dbc.CardBody([
# The headline that says 'insert the features below'
dbc.Row(
justify="center",
style={"height": "100%", "padding": 2, "opacity": "0.6"},
no_gutters=True,
children=[
dbc.Col(
style={"flexGrow": "0"},
align="center",
children=html.Div(
className="material-icons",
children="info",
style={"fontSize": "40px", "paddingRight": 20}
),
),
dbc.Col(
align="center",
children=html.H5(
id=instructions_id,
style={"fontSize": "18px"}
),
)
]
),
]),
]
),
)
)
]
),
]
def get_model_desc(_id):
"""This is the big text card that has the technical details of the model"""
return [
dbc.Row(
justify="center",
children=dbc.Col(html.Div(
**{"data-aos": "fade-up"},
children=dbc.Jumbotron(
style={"padding": "4%"},
className="elevation-3",
id=_id,
)
))
),
]
def get_feature_importance(_id):
"""This is the card that houses the feature importance graph at the bottom of the page"""
return [
dbc.Row(
justify="center",
children=dbc.Col([
dbc.Card(
id=_id,
style={"padding": "4%"},
className="elevation-3",
),
]),
),
]
def get_feature_cards(_id):
"""Create feature card container"""
return [
# The container of feature cards
html.Div(
style={"min-height": "550px"},
children=[
dbc.Row(
id=_id,
justify="center"
)
])
]
def get_submit_button(_id, res_id, err_id, imputed_id):
"""Build submit button"""
return [
dbc.Row(
justify="center",
# To fix the white line bug Jumbotron causes
style={"position": "relative", "zIndex": "1"},
children=[
dbc.Col(
xs=12,
sm=4,
md=4,
lg=4,
style={"paddingBottom": 20},
children=html.Div(
**{"data-aos": "fade-up", "data-aos-delay": "400"},
id="submit-features-calc-wrapper",
className="aos-refresh-onload",
children=dbc.Button(
id=_id,
n_clicks=0,
className="mr-1 calc-submit-button elevation-3"
),
)
),
# The card that shows the user's score
dbc.Col(
xs=12,
sm=8,
md=8,
lg=8,
style={"paddingBottom": 20},
children=html.Div(
**{"data-aos": "fade-up", "data-aos-delay": "500"},
className="aos-refresh-onload",
id=res_id
)
),
dbc.Col(
xs=12, sm=12, md=12, lg=12,
children=html.P(
id=err_id,
style={"color": "red", "textAlign": "center"}
),
),
dbc.Col(
xs=12, sm=12, md=12, lg=12,
children=dcc.Markdown(id=imputed_id)
),
],
),
]
def get_results_card(_id, err_id):
"""Build score card and error message text"""
return [
]
def get_inputed_vals(_id):
"""Build the text that says what values where imputed"""
return [
]
def get_personal_visual(_id):
"""Builds the user's personal visual that shows feature contribution to risk score"""
return [
dbc.Row(
justify="center",
children=dbc.Col([
dbc.Card(
style={
"borderColor": "white",
},
children=[
dcc.Markdown(
id=_id + "-explanation"
),
html.Img(
id=_id,
style={"height": 200}
),
]
)
]),
),
]
def get_lang(_id):
"""Builds the language selection dropdown at the top of the page"""
return [
dbc.Row(
justify="end",
children=dbc.Col(
xs=5, sm=5, md=4, lg=3,
children=html.Div(
dcc.Dropdown(
id=_id,
clearable=False,
options=list(sorted([{'label': lang_names[x], 'value': x} for x in range(len(lang_names))],
key=lambda i: i['label'])),
value=0,
style={'marginBottom': 10, "width": "100%"},
className="dcc_dropdown",
),
),
),
),
]
|
11454437
|
from ..remote import RemoteModel
class ChangeSummaryNetworkAnalysisGridRemote(RemoteModel):
"""
This table provides a summary of the changes identified by NetMRI.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``ChangeTime:`` The EARLIEST date/time that this change MAY have occurred. That is, the beginning time of the Change Window.
| ``attribute type:`` string
| ``ChangeDetectedTime:`` The date/time the change was detected. That is, the end time of the Change Window.
| ``attribute type:`` datetime
| ``ChangeID:`` The internal NetMRI identifier for this change.
| ``attribute type:`` number
| ``HardwareInd:`` A flag indicating a hardware change.
| ``attribute type:`` bool
| ``SoftwareInd:`` A flag indicating a software change.
| ``attribute type:`` bool
| ``AdminInd:`` A flag indicating an administrative change.
| ``attribute type:`` bool
| ``ExternalInd:`` A flag indicating an external change.
| ``attribute type:`` bool
| ``SNMPPollInd:`` A flag indicating that this change was detected, at least in part, via differences between SNMP polls.
| ``attribute type:`` bool
| ``SNMPTrapInd:`` A flag indicating that this change was detected, at least in part, via an SNMP trap (not currently supported).
| ``attribute type:`` bool
| ``SyslogInd:`` A flag indicating that this change was detected, at least in part, via a Syslog message.
| ``attribute type:`` bool
| ``ConfigPollInd:`` A flag indicating that this change was detected, at least in part, by differences found between configuration file retrievals.
| ``attribute type:`` bool
| ``DeviceID:`` The internal NetMRI identifier for the device that changed.
| ``attribute type:`` number
| ``DeviceIPDotted:`` The management IP address of the device that changed, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the device that changed; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceVendor:`` The vendor name of the device that changed.
| ``attribute type:`` string
| ``DeviceModel:`` The model name of the device that changed
| ``attribute type:`` string
| ``DeviceIPNumeric:`` The numerical value of the IP address of the device that changed.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type of the device that changed.
| ``attribute type:`` string
| ``ChangeUser:`` The user that made the change, if available.
| ``attribute type:`` string
| ``ChangeAuthorizedInd:`` flag indicating if the change is authorized or not.
| ``attribute type:`` bool
| ``ChangeMethod:`` The method used to make the change.
| ``attribute type:`` string
| ``ChangeType:`` The type of change made.
| ``attribute type:`` string
| ``RunningChangedRev:`` The running change revision of the change.
| ``attribute type:`` string
| ``SavedChangedRev:`` The saved change revision.
| ``attribute type:`` string
| ``HideRunning:`` Flag indicating there is a running revision.
| ``attribute type:`` number
| ``HideSaved:`` Flag indicating there is a saved change revision.
| ``attribute type:`` number
| ``DeviceAssurance:`` The assurance level of the device type value.
| ``attribute type:`` number
| ``HideAccess:`` A flag indicating whether or not access diff viewer is available for this entry.
| ``attribute type:`` number
| ``configstate:`` The state of configuration change.
| ``attribute type:`` string
| ``CustomFieldsEditInd:`` A flag indicating whether the user can edit custom fields for this object.
| ``attribute type:`` bool
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``VirtualNetworkID:`` The internal NetMRI identifier of the Virtual Network to which the management address of this device belongs.
| ``attribute type:`` number
"""
properties = ("id",
"ChangeTime",
"ChangeDetectedTime",
"ChangeID",
"HardwareInd",
"SoftwareInd",
"AdminInd",
"ExternalInd",
"SNMPPollInd",
"SNMPTrapInd",
"SyslogInd",
"ConfigPollInd",
"DeviceID",
"DeviceIPDotted",
"DeviceName",
"DeviceVendor",
"DeviceModel",
"DeviceIPNumeric",
"DeviceType",
"ChangeUser",
"ChangeAuthorizedInd",
"ChangeMethod",
"ChangeType",
"RunningChangedRev",
"SavedChangedRev",
"HideRunning",
"HideSaved",
"DeviceAssurance",
"HideAccess",
"configstate",
"CustomFieldsEditInd",
"Network",
"VirtualNetworkID",
)
|
11454510
|
from amaranth.asserts import *
import warnings
warnings.warn("instead of nmigen.asserts, use amaranth.asserts",
DeprecationWarning, stacklevel=2)
|
11454534
|
import os
import sys
from datetime import datetime
import csv
import json
# Layer code, like parsing_lib, is added to the path by AWS.
# To test locally (e.g. via pytest), we have to modify sys.path.
# pylint: disable=import-error
try:
import parsing_lib
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, 'common'))
import parsing_lib
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "geocoding_dictionaries.json")) as json_file:
geocoding_dictionaries = json.load(json_file)
mun_code_coord = geocoding_dictionaries['mun_code_coord']
mun_code_place_name = geocoding_dictionaries['mun_code_place_name']
spanish_country_code_dict = geocoding_dictionaries['spanish_country_code_dict']
def convert_date(raw_date: str, dataserver=True):
"""
Convert raw date field into a value interpretable by the dataserver.
Removing timestamp as always midnight.
Set dataserver to False in order to return version appropriate for notes.
"""
date = datetime.strptime(raw_date.split(' ')[0], "%d/%m/%Y")
if not dataserver:
return date.strftime("%m/%d/%Y")
return date.strftime("%m/%d/%YZ")
def convert_gender(raw_gender):
if raw_gender == "M":
return "Male"
if raw_gender == "F":
return "Female"
return None
def get_location(raw_entry):
'''
Uses a dict mapping based on common municipality code of lookup table ('MPIO_CCNCT') and source dataset ('Código DIVIPOLA municipio')
'''
try:
long, lat = mun_code_coord[raw_entry['Código DIVIPOLA municipio']][1:]
geometry = {'latitude': lat,
'longitude': long}
mun, dep = mun_code_place_name[raw_entry['Código DIVIPOLA municipio']]
place_name = f"{mun}, {dep}, Colombia"
location = {}
location["country"] = "Colombia"
location["geoResolution"] = "Admin2"
location["name"] = place_name
location["geometry"] = geometry
location["administrativeAreaLevel1"] = dep
location["administrativeAreaLevel2"] = mun
return location
except BaseException:
print(raw_entry)
return None
def get_travel_history_location(raw_entry):
country_spanish = raw_entry['Nombre del país']
try:
iso2 = spanish_country_code_dict[country_spanish.lower()]
return parsing_lib.geocode_country(iso2)
except BaseException:
print(country_spanish)
def convert_demographics(entry):
'''
This takes a whole row, and returns Age, Gender and Ethnicity in a dictionary.
Age is given as an int, but the adjacent field, 'Age Measurement Unit', determines what this int represents.
1 = Years; 2 = Months; 3 = Days
'''
ethnicity_map = {'1': 'Indigenous',
'2': 'ROM',
'3': 'Raizal',
'4': 'Palenquero',
'5': 'Black',
'6': 'Other'}
demo = {}
if entry['Edad']:
if str(entry['Unidad de medida de edad']) == '1':
demo["ageRange"] = {
"start": float(entry['Edad']),
"end": float(entry['Edad'])
}
elif str(entry['Unidad de medida de edad']) == '2':
demo["ageRange"] = {
"start": float(entry['Edad']) / 12,
"end": float(entry['Edad']) / 12
}
elif str(entry['Unidad de medida de edad']) == '3':
demo["ageRange"] = {
"start": float(entry['Edad']) / 365,
"end": float(entry['Edad']) / 365
}
if entry['Sexo']:
demo["gender"] = convert_gender(entry['Sexo'])
if entry['Pertenencia étnica']:
ethnicity = ethnicity_map.get(str(entry['Pertenencia étnica']), "")
if entry['Nombre del grupo étnico']:
ethnicity += f", {entry['Nombre del grupo étnico']}"
else:
ethnicity = 'Unknown'
demo["ethnicity"] = ethnicity
return demo or None
def parse_cases(raw_data_file, source_id, source_url):
"""
Parses G.h-format case data from raw API data.
Caveats:
- Assuming the date confirmed is the date of diagnosis (Fecha diagnostico) rather than
Fecha de notificación (generally several days earlier). When date of diagnosis, using date reported online as proxy.
- Case can have date of death, but 'Recuperado' column says recovered. This corresponds to patients who died but
not from Covid-19.
- Notes added include date reported online and date that SIVIGILA (national health alert system) was notiifed.
Also whether case was imported, and how patient recovery was confirmed.
- Tipo recuperación refers to how they decided the patient had recovered: either by 21 days elapsing since
symptoms, or a negative PCR/antigen test
- No dates for travel history, only distinction is between cases of type: 'Importado' vs. 'Relacionado'.
We assume cases listed as importado (imported) have travelled in the last 30 days, and geocode their country of origin.
"""
symptom_map = {'leve': 'Mild',
'moderado': 'Moderate',
'grave': 'Serious'}
with open(raw_data_file, "r") as f:
reader = csv.DictReader(f)
for entry in reader:
location = get_location(entry)
if entry["Fecha de diagnóstico"]:
confirmation_date = convert_date(entry["Fecha de diagnóstico"])
else:
confirmation_date = convert_date(entry["fecha reporte web"])
if location and confirmation_date:
notes = []
case = {
"caseReference": {
"sourceId": source_id,
"sourceEntryId": entry["ID de caso"],
"sourceUrl": source_url
},
"location": location,
"demographics": convert_demographics(entry)
}
if entry["Fecha de diagnóstico"]:
case["events"] = [
{
"name": "confirmed",
"dateRange":
{
"start": confirmation_date,
"end": confirmation_date
}
},
]
else:
case["events"] = [
{
"name": "confirmed",
"dateRange":
{
"start": confirmation_date,
"end": confirmation_date
}
},
]
notes.append(
"No Date of Diagnosis provided, so using Date Reported Online (fecha reporte web) as a proxy. This is normally approx. 1 week later.")
# If patient was symptomatic, mark date of onsetSymptoms,
# otherwise asymptomatic
if entry["Fecha de inicio de síntomas"]:
case["symptoms"] = {
"status": "Symptomatic",
}
case["events"].append({
"name": "onsetSymptoms",
"dateRange": {
"start": convert_date(entry['Fecha de inicio de síntomas']),
"end": convert_date(entry['Fecha de inicio de síntomas']),
}
})
else:
case["symptoms"] = {
"status": "Asymptomatic",
}
# Patient Outcome - If patient died, mark date
if entry["Fecha de muerte"]:
case["events"].append({
"name": "outcome",
"value": "Death",
"dateRange": {
"start": convert_date(entry['Fecha de muerte']),
"end": convert_date(entry['Fecha de muerte']),
}
})
if entry["Recuperado"].lower() != "fallecido":
notes.append(
"Died from something other than Covid-19.")
elif entry["Recuperado"].lower() == "recuperado":
case["events"].append({
"name": "outcome",
"value": "Recovered",
"dateRange": {
"start": convert_date(entry['Fecha de recuperación']),
"end": convert_date(entry['Fecha de recuperación']),
}
})
elif entry['Recuperado'].lower() == 'activo':
notes.append('Case still active')
if entry["Ubicación del caso"].lower() == "hospital":
case["events"].append({
"name": "hospitalAdmission",
"value": "Yes"
})
if entry["Ubicación del caso"].lower() == 'hospital uci':
case["events"].append({
"name": "icuAdmission",
"value": "Yes"
})
if entry["Ubicación del caso"].lower() == 'casa':
notes.append(
"Patient self-isolated and recovered at home.")
# Add travelHistory and notes for imported cases - we currently do not have any travel dates,
# so unknown whether in last 30 days, assuming YES
if entry["Tipo de contagio"].lower() == "importado":
if entry['Nombre del país']:
country_of_origin = entry['Nombre del país']
case["travelHistory"] = {
"traveledPrior30Days": True,
"travel": [
{
"location": get_travel_history_location(entry)
}]
}
notes.append(
f"Case is reported as importing the disease into Colombia, and country of origin is {entry['Nombre del país']}.")
else:
notes.append(
f"Case is reported as importing the disease into Colombia, but country of origin is not specified")
elif entry["Tipo de contagio"].lower() == 'relacionado':
notes.append("Case was transmitted within Colombia.")
elif entry["Tipo de contagio"].lower() == 'en estudio':
notes.append(
"Case transmission under investigation (currently unknown).")
# Include severity of symptoms
if entry["Estado"].lower() in symptom_map.keys():
notes.append(
f"Symptom severity was {symptom_map.get(entry['Estado'].lower())}")
if entry['fecha reporte web']:
notes.append(
f"Date reported online was {convert_date(entry['fecha reporte web'],dataserver=False)}.")
if entry['Fecha de notificación']:
notes.append(
f"Date reported to SIVIGILA was {convert_date(entry['Fecha de notificación'],dataserver=False)}.")
if entry['Tipo de recuperación'] == 'PCR':
notes.append(
f"Patient recovery was confirmed by a negative PCR test.")
elif entry['Tipo de recuperación'] == 'Tiempo':
notes.append(
f"Patient recovery was confirmed by 21 days elapsing with no symptoms.")
if notes:
case["notes"] = ", ".join(notes)
yield case
def event_handler(event):
return parsing_lib.run(event, parse_cases)
if __name__ == "__main__":
with open('input_event.json') as f:
event = json.load(f)
event_handler(event)
|
11454582
|
from config import OptimizerConfig
from lib import tf_util, util
from player.aiplayer import AIPlayer
from time import time, sleep
try:
import _pickle as pickle
except:
import pickle
import glob
def start():
config = OptimizerConfig()
tf_util.update_memory(config.gpu_mem_fraction)
util.set_high_process_priority()
AIPlayer.create_if_nonexistant(config)
models = sorted(glob.glob(config.data.model_location+"*.h5"))
ai = AIPlayer(config.buffer_size, 1, model=models[-1], compile=True)
train(ai, config)
def train(ai, config):
loaded_files = []
x = config.iterations
i = len(glob.glob(config.data.model_location+"*.h5"))
loaded_files, _ = load_games(ai, loaded_files, config)
while(x != 0):
if i > config.iter3:
ai.update_lr(config.learning_rate3)
elif i > config.iter2:
ai.update_lr(config.learning_rate2)
else:
ai.update_lr(config.learning_rate1)
loaded_files, diff = load_games(ai, loaded_files, config)
total_diff = diff
start = time()
print("Iteration %04d"%i)
end = config.min_new_game_files if i> 0 else config.min_game_file
util.print_progress_bar(0, end, start=start)
while(total_diff < end):
if diff > 0:
total_diff += diff
util.print_progress_bar(total_diff, end, start=start)
sleep(5)
loaded_files, diff = load_games(ai, loaded_files, config)
util.print_progress_bar(end, end, start=start)
print("Training for %d batches on %d samples" % (config.batches_per_iter, len(ai.buffer.buffer)))
start = time()
history = ai.train_batches(config.batch_size, config.batches_per_iter, config.verbose)
for val in history.history.keys():
print("%s: %0.4f" % (val, history.history[val][-1]))
if i % config.save_model_cycles == 0:
ai.save("%smodel_%04d.h5" % (config.data.model_location, i))
file = open("%shist_%04d.pickle" % (config.data.history_location, i), 'wb')
pickle.dump(pickle.dumps(history.history), file)
file.close()
print("Iteration Time: %0.2f" % (time()-start))
x -= 1
i += 1
def load_games(ai, loaded_files, config):
games = sorted(glob.glob(config.data.game_location+"*.pickle"))
new = [game for game in games if game not in loaded_files]
for game in sorted(new):
if not ai.buffer.load(game):
games.remove(game)
return games, len(new)
|
11454644
|
import os
import random
import torch
import torch.utils.data as data
import torchvision.transforms as T
from PIL import Image
class LowLightFDataset(data.Dataset):
def __init__(self, root, image_split='images_aug', targets_split='targets', training=True):
self.root = root
self.num_instances = 8
self.img_root = os.path.join(root, image_split)
self.target_root = os.path.join(root, targets_split)
self.training = training
print('----', image_split, targets_split, '----')
self.imgs = list(sorted(os.listdir(self.img_root)))
self.gts = list(sorted(os.listdir(self.target_root)))
names = [img_name.split('_')[0] + '.' + img_name.split('.')[-1] for img_name in self.imgs]
self.imgs = list(
filter(lambda img_name: img_name.split('_')[0] + '.' + img_name.split('.')[-1] in self.gts, self.imgs))
self.gts = list(filter(lambda gt: gt in names, self.gts))
print(len(self.imgs), len(self.gts))
self.preproc = T.Compose(
[T.ToTensor()]
)
self.preproc_gt = T.Compose(
[T.ToTensor()]
)
def __getitem__(self, idx):
fn, ext = self.gts[idx].split('.')
imgs = []
for i in range(self.num_instances):
img_path = os.path.join(self.img_root, f"{fn}_{i}.{ext}")
imgs += [self.preproc(Image.open(img_path).convert("RGB"))]
if self.training:
random.shuffle(imgs)
gt_path = os.path.join(self.target_root, self.gts[idx])
gt = Image.open(gt_path).convert("RGB")
gt = self.preproc_gt(gt)
# print(img_path, gt_path)
return torch.stack(imgs, dim=0), gt, fn
def __len__(self):
return len(self.gts)
class LowLightFDatasetEval(data.Dataset):
def __init__(self, root, targets_split='targets', training=True):
self.root = root
self.num_instances = 1
self.img_root = os.path.join(root, 'images')
self.target_root = os.path.join(root, targets_split)
self.training = training
self.imgs = list(sorted(os.listdir(self.img_root)))
self.gts = list(sorted(os.listdir(self.target_root)))
self.imgs = list(filter(lambda img_name: img_name in self.gts, self.imgs))
self.gts = list(filter(lambda gt: gt in self.imgs, self.gts))
print(len(self.imgs), len(self.gts))
self.preproc = T.Compose(
[T.ToTensor()]
)
self.preproc_gt = T.Compose(
[T.ToTensor()]
)
def __getitem__(self, idx):
fn, ext = self.gts[idx].split('.')
imgs = []
for i in range(self.num_instances):
img_path = os.path.join(self.img_root, f"{fn}.{ext}")
imgs += [self.preproc(Image.open(img_path).convert("RGB"))]
gt_path = os.path.join(self.target_root, self.gts[idx])
gt = Image.open(gt_path).convert("RGB")
gt = self.preproc_gt(gt)
# print(img_path, gt_path)
return torch.stack(imgs, dim=0), gt, fn
def __len__(self):
return len(self.gts)
class LowLightDataset(data.Dataset):
def __init__(self, root, targets_split='targets', color_tuning=False):
self.root = root
self.img_root = os.path.join(root, 'images')
self.target_root = os.path.join(root, targets_split)
self.color_tuning = color_tuning
self.imgs = list(sorted(os.listdir(self.img_root)))
self.gts = list(sorted(os.listdir(self.target_root)))
self.imgs = list(filter(lambda img_name: img_name in self.gts, self.imgs))
self.gts = list(filter(lambda gt: gt in self.imgs, self.gts))
print(len(self.imgs), len(self.gts))
self.preproc = T.Compose(
[T.ToTensor()]
)
self.preproc_gt = T.Compose(
[T.ToTensor()]
)
def __getitem__(self, idx):
fn, ext = self.gts[idx].split('.')
img_path = os.path.join(self.img_root, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
img = self.preproc(img)
gt_path = os.path.join(self.target_root, self.gts[idx])
gt = Image.open(gt_path).convert("RGB")
gt = self.preproc_gt(gt)
if self.color_tuning:
return img, gt, 'a' + self.imgs[idx], 'a' + self.imgs[idx]
else:
return img, gt, fn
def __len__(self):
return len(self.imgs)
class LowLightDatasetReverse(data.Dataset):
def __init__(self, root, targets_split='targets', color_tuning=False):
self.root = root
self.img_root = os.path.join(root, 'images')
self.target_root = os.path.join(root, targets_split)
self.color_tuning = color_tuning
self.imgs = list(sorted(os.listdir(self.img_root)))
self.gts = list(sorted(os.listdir(self.target_root)))
self.imgs = list(filter(lambda img_name: img_name in self.gts, self.imgs))
self.gts = list(filter(lambda gt: gt in self.imgs, self.gts))
print(len(self.imgs), len(self.gts))
self.preproc = T.Compose(
[T.ToTensor()]
)
self.preproc_gt = T.Compose(
[T.ToTensor()]
)
def __getitem__(self, idx):
img_path = os.path.join(self.img_root, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
img = self.preproc(img)
gt_path = os.path.join(self.target_root, self.gts[idx])
gt = Image.open(gt_path).convert("RGB")
gt = self.preproc_gt(gt)
if self.color_tuning:
return gt, img, 'a' + self.imgs[idx], 'a' + self.imgs[idx]
else:
fn, ext = os.path.splitext(self.imgs[idx])
return gt, img, '%03d' % int(fn) + ext
def __len__(self):
return len(self.imgs)
|
11454750
|
import six
from django.db.models import Case, When, Q, F, Sum, CharField, Value
from django.db.models.functions import Coalesce
from django.shortcuts import _get_queryset
from django_pivot.utils import get_column_values, get_field_choices, default_fill
def pivot(queryset, rows, column, data, aggregation=Sum, choices='auto', display_transform=lambda s: s, default=None, row_range=()):
"""
Takes a queryset and pivots it. The result is a table with one record
per unique value in the `row` column, a column for each unique value in the `column` column
and values in the table aggregated by the data column.
:param queryset: a QuerySet, Model, or Manager
:param rows: list of strings, name of columns that will key the rows
:param column: string, name of column that will define columns
:param data: column name or Combinable
:param aggregation: aggregation function to apply to data column
:param display_transform: function that takes a string and returns a string
:param default: default value to pass to the aggregate function when no record is found
:param row_range: iterable with the expected range of rows in the result
:return: ValuesQueryset
"""
values = [rows] if isinstance(rows, six.string_types) else list(rows)
queryset = _get_queryset(queryset)
column_values = get_column_values(queryset, column, choices)
annotations = _get_annotations(column, column_values, data, aggregation, display_transform, default=default)
for row in values:
row_choices = get_field_choices(queryset, row)
if row_choices:
whens = (When(Q(**{row: value}), then=Value(display_value, output_field=CharField())) for value, display_value in row_choices)
row_display = Case(*whens)
queryset = queryset.annotate(**{'get_' + row + '_display': row_display})
values.append('get_' + row + '_display')
values_list = queryset.values(*values).annotate(**annotations)
if row_range:
attributes = [value[0] for value in column_values]
values_list = default_fill(values_list, values[0], row_range, fill_value=default, fill_attributes=attributes)
return values_list
def _get_annotations(column, column_values, data, aggregation, display_transform=lambda s: s, default=None):
value = data if hasattr(data, 'resolve_expression') else F(data)
return {
display_transform(display_value): Coalesce(aggregation(Case(When(Q(**{column: column_value}), then=value))), default)
for column_value, display_value in column_values
}
|
11454764
|
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
Vis_REGISTER = {}
class SimpleConvNetwork(nn.Module):
def __init__(self, visual_dim):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(visual_dim[-1], 16, kernel_size=8, stride=4),
nn.ELU(inplace=True),
nn.Conv2d(16, 32, kernel_size=4, stride=2),
nn.ELU(inplace=True),
nn.Flatten()
)
with th.no_grad():
self.output_dim = np.prod(
self.net(th.zeros(1, visual_dim[-1], visual_dim[0], visual_dim[1])).shape[1:])
def forward(self, x):
return self.net(x)
class NatureConvNetwork(nn.Module):
def __init__(self, visual_dim):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(visual_dim[-1], 32, kernel_size=8, stride=4),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(inplace=True),
nn.Flatten()
)
with th.no_grad():
self.output_dim = np.prod(
self.net(th.zeros(1, visual_dim[-1], visual_dim[0], visual_dim[1])).shape[1:])
def forward(self, x):
return self.net(x)
class Match3ConvNetwork(nn.Module):
def __init__(self, visual_dim):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(visual_dim[-1], 35, kernel_size=3, stride=3),
nn.ELU(inplace=True),
nn.Conv2d(35, 144, kernel_size=1, stride=1),
nn.ELU(inplace=True),
nn.Flatten()
)
with th.no_grad():
self.output_dim = np.prod(
self.net(th.zeros(1, visual_dim[-1], visual_dim[0], visual_dim[1])).shape[1:])
def forward(self, x):
return self.net(x)
class DeepConvNetwork(nn.Sequential):
def __init__(self,
visual_dim,
out_channels=[16, 32],
kernel_sizes=[[8, 8], [4, 4]],
stride=[[4, 4], [2, 2]],
use_bn=False,
max_pooling=False,
avg_pooling=False,
pool_sizes=[[2, 2], [2, 2]],
pool_strides=[[1, 1], [1, 1]],
):
super().__init__()
conv_layers = len(out_channels)
in_channels = [visual_dim[-1]] + out_channels[:-1]
for i in range(conv_layers):
self.add_module(f'conv2d_{i}', nn.Conv2d(in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=kernel_sizes[i],
stride=stride[i]))
self.add_module(f'relu_{i}', nn.ReLU())
if use_bn:
self.add_module(f'bachnorm2d_{i}', nn.BatchNorm2d(out_channels[i]))
if max_pooling:
self.add_module(f'maxpool2d_{i}', nn.MaxPool2d(kernel_size=pool_sizes[i],
stride=pool_strides[i]))
elif avg_pooling:
self.add_module(f'avgpool2d_{i}', nn.AvgPool2d(kernel_size=pool_sizes[i],
stride=pool_strides[i]))
self.add_module('flatten', nn.Flatten())
with th.no_grad():
self.output_dim = np.prod(
self(th.zeros(1, visual_dim[-1], visual_dim[0], visual_dim[1])).shape[1:])
class ResnetNetwork(nn.Module):
def __init__(self, visual_dim):
super().__init__()
self.out_channels = [16, 32, 32]
in_channels = [visual_dim[-1]] + self.out_channels[:-1]
self.res_blocks = 2
for i in range(len(self.out_channels)):
setattr(self, 'conv' + str(i), nn.Conv2d(
in_channels=in_channels[i], out_channels=self.out_channels[i], kernel_size=[3, 3], stride=(1, 1)))
setattr(self, 'pool' + str(i),
nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], padding='same'))
for j in range(self.res_blocks):
setattr(self, 'resblock' + str(i) + 'conv' + str(j), nn.Conv2d(
in_channels=self.out_channels[i], out_channels=self.out_channels[i], kernel_size=[3, 3],
stride=(1, 1), padding='same'))
self.flatten = nn.Flatten()
with th.no_grad():
self.output_dim = np.prod(
self.net(th.zeros(1, visual_dim[-1], visual_dim[0], visual_dim[1])).shape[1:])
def forward(self, x):
"""
-----------------------------------multi conv layer---------------------------------
↓ ----multi residual block------- ↑
↓ ↓ ↑ ↑
x - > conv -> x -> max_pooling -> x(block_x) -> relu -> x -> resnet_conv -> x => x ↘ ↑
↓ + x -> relu -> x -> flatten -> x
--------------residual add----------------↑ ↗
"""
for i in range(len(self.out_channels)):
x = getattr(self, 'conv' + str(i))(x)
block_x = x = getattr(self, 'pool' + str(i))(x)
for j in range(self.res_blocks):
x = F.relu(x)
x = getattr(self, 'resblock' + str(i) + 'conv' + str(j))(x)
x += block_x
x = F.relu(x)
x = self.flatten(x)
return x
Vis_REGISTER['simple'] = SimpleConvNetwork
Vis_REGISTER['nature'] = NatureConvNetwork
Vis_REGISTER['match3'] = Match3ConvNetwork
Vis_REGISTER['deepconv'] = DeepConvNetwork
Vis_REGISTER['resnet'] = ResnetNetwork
|
11454774
|
class Constants:
DATABASE_PATH = "deepluna_db.json"
ALLSCR_MRG = "allscr.mrg"
SCRIPT_TEXT_MRG = "script_text.mrg"
EXPORT_DIRECTORY = "export/"
IMPORT_DIRECTORY = "import/"
LEGACY_IMPORT_DIRECTORY = "update/"
CHARS_PER_LINE = 55
|
11454817
|
from django.http import Http404
from django.views.generic import ListView, DetailView, FormView
from django.utils.translation import ugettext_lazy as _
from rest_framework import generics
from .serializers import SongSerializer
from .models import Song
from .forms import SongFilterForm
class SongList(ListView, FormView):
form_class = SongFilterForm
model = Song
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
self.form = self.get_form(form_class)
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list, form=self.form)
return self.render_to_response(context)
def get_form_kwargs(self):
kwargs = {
'initial': self.get_initial(),
'prefix': self.get_prefix(),
}
if self.request.method == 'GET':
kwargs.update({
'data': self.request.GET,
})
return kwargs
def get_queryset(self):
queryset = super().get_queryset()
if self.form.is_valid():
artist = self.form.cleaned_data.get("artist")
if artist:
queryset = queryset.filter(artist=artist)
return queryset
class SongDetail(DetailView):
model = Song
class RESTSongList(generics.ListCreateAPIView):
queryset = Song.objects.all()
serializer_class = SongSerializer
def get_view_name(self):
return "Song List"
class RESTSongDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Song.objects.all()
serializer_class = SongSerializer
def get_view_name(self):
return "Song Detail"
|
11454833
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import h5py
import json
import sys
def build_elmo():
token_ph = tf.placeholder(tf.string, [None, None])
len_ph = tf.placeholder(tf.int32, [None])
elmo_module = hub.Module("https://tfhub.dev/google/elmo/2")
lm_embeddings = elmo_module(
inputs={"tokens": token_ph, "sequence_len": len_ph},
signature="tokens", as_dict=True)
word_emb = lm_embeddings["word_emb"]
lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1),
lm_embeddings["lstm_outputs1"],
lm_embeddings["lstm_outputs2"]], -1)
return token_ph, len_ph, lm_emb
def cache_dataset(data_path, session, token_ph, len_ph, lm_emb, out_file):
with open(data_path) as in_file:
for doc_num, line in enumerate(in_file.readlines()):
example = json.loads(line)
sentences = example["sentences"]
max_sentence_length = max(len(s) for s in sentences)
tokens = [[""] * max_sentence_length for _ in sentences]
text_len = np.array([len(s) for s in sentences])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
tokens[i][j] = word
tokens = np.array(tokens)
tf_lm_emb = session.run(lm_emb, feed_dict={
token_ph: tokens,
len_ph: text_len
})
file_key = example["doc_key"].replace("/", ":")
group = out_file.create_group(file_key)
for i, (e, l) in enumerate(zip(tf_lm_emb, text_len)):
e = e[:l, :, :]
group[str(i)] = e
if doc_num % 10 == 0:
print("Cached {} documents in {}".format(doc_num + 1, data_path))
if __name__ == "__main__":
token_ph, len_ph, lm_emb = build_elmo()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
with h5py.File("elmo_cache.hdf5", "w") as out_file:
for json_filename in sys.argv[1:]:
cache_dataset(json_filename, session, token_ph, len_ph, lm_emb, out_file)
|
11454867
|
import requests
import pandas as pd
import glob
import json
import time
import os
def pull_ridership_by_stop(line_number):
"""
Pull the ridership data from the local files - only get the fields we care
about, sorted, and return the Dataframe.
"""
# allFiles = glob.glob('.' + "/gps_*.csv")
frame = pd.DataFrame()
frames = []
for file_ in ['Ridership/WEEKDAY.XLSX','Ridership/LRTWEEKDAY.XLSX']:
df = pd.read_excel(file_, header=0)
df['DAY']='RS_WKDY'
frames.append(df)
#When adding Saturday and Sunday numbers, use this to help
# for file_ in ['Ridership/SATURDAY.XLSX','Ridership/LRTSATURDAY.XLSX']:
# df = pd.read_excel(file_, header=0)
# df['DAY']='RS_SAT'
# frames.append(df)
# for file_ in ['Ridership/SUNDAY.XLSX','Ridership/LRTSUNDAY.XLSX']:
# df = pd.read_excel(file_, header=0)
# df['DAY']='RS_SUN'
# frames.append(df)
df = pd.concat(frames)
df = df[~df['ROUTE_NUMBER'].isin([911,912,913,914])]
df = df.query("DAY=='RS_WKDY'&ROUTE_NUMBER=='%d'" % line_number)
rid_line = df[
[
'STOP_ID','DIRECTION_NAME','TIME_PERIOD','SORT_ORDER','BOARD_ALL',
'ALIGHT_ALL','LOAD_ALL','AVG_SERVICED','TIME_PERIOD_SORT','TRIPS_ALL','TRIPS_GROSS'
]
].sort_values(by=['DIRECTION_NAME','TIME_PERIOD_SORT','SORT_ORDER'])
rid_line['ALIGHT_ALL']= rid_line['ALIGHT_ALL'].round(2)
rid_line['AVG_SERVICED'] = rid_line['AVG_SERVICED'].round(2)
rid_line['BOARD_ALL'] = rid_line['BOARD_ALL'].round(2)
rid_line['LOAD_ALL'] = rid_line['LOAD_ALL'].round(2)
return rid_line
def pull_early_late_by_stop(line_number,SWIFTLY_API_KEY, dateRange, timeRange):
"""
Pulls from the Swiftly APIS to get OTP.
Follow the docs: http://dashboard.goswift.ly/vta/api-guide/docs/otp
"""
line_table = pd.read_csv('line_table.csv')
line_table.rename(columns={"DirNum":"direction_id","DirectionName":"DIRECTION_NAME"},inplace=True)
line_table['direction_id'] = line_table['direction_id'].astype(str)
headers = {'Authorization': SWIFTLY_API_KEY}
payload = {'agency': 'vta', 'route': line_number, 'dateRange': dateRange,'timeRange': timeRange, 'onlyScheduleAdherenceStops':'True'}
url = 'https://api.goswift.ly/otp/by-stop'
r = requests.get(url, headers=headers, params=payload)
try:
swiftly_df = pd.DataFrame(r.json()['data'])
swiftly_df.rename(columns={"stop_id":"STOP_ID"},inplace=True)
swiftly_df = pd.merge(swiftly_df,line_table.query('lineabbr==%s'%line_number)[['direction_id','DIRECTION_NAME']])
swiftly_df['STOP_ID'] = swiftly_df['STOP_ID'].astype(int)
return swiftly_df
except KeyError:
print(r.json())
def stop_frequency_percent(connection, line_number, days_to_consider, date_range):
"""
From parameters, query the MSSQL database to get how often vehicles on a route
and direction get apc data, thus how often they stop and open the doors, then
generate a percentage.
"""
sql = '''
SELECT
DATEPART(month, apc_date_time) as month_of_year,
DATEPART ( day , apc_date_time ) as day_of_month,
datepart(dy, [apc_date_time]) as 'day_of_year',
apc_date_time,
current_route_id,
K.direction_code_id,
dc.[direction_description],
bs_id,
ext_trip_id
FROM
[ACS_13].[dbo].[apc_correlated] K
LEFT JOIN
[ACS_13].[dbo].[direction_codes] dc
on k.direction_code_id = dc.[direction_code_id]
WHERE
(apc_date_time between %s) and
current_route_id = %d and
bs_id != 0
ORDER BY
direction_code_id, bs_id, apc_date_time
''' % (date_range, line_number)
trips_sampled = pd.read_sql(sql,connection).rename(columns={'bs_id':'STOP_ID','direction_description':'DIRECTION_NAME'})
#Only consider certain days of the month
trips_sampled = trips_sampled.loc[trips_sampled['day_of_month'].isin(days_to_consider),]
#Add a time grouping
trips_sampled['TIME_PERIOD'] = trips_sampled['apc_date_time'].apply(TIME_PERIOD)
stops_visited_counts = trips_sampled.groupby([
'current_route_id','DIRECTION_NAME','TIME_PERIOD','STOP_ID'
])['ext_trip_id'].count().reset_index()
stops_visited_counts.rename(columns={'ext_trip_id':'number_of_times_stopped'},inplace=True)
trips_sampled_unique = trips_sampled.groupby([
'current_route_id','DIRECTION_NAME','day_of_year','TIME_PERIOD'
])['ext_trip_id'].nunique().reset_index()
trips_sampled_count = trips_sampled_unique.groupby([
'current_route_id','DIRECTION_NAME','TIME_PERIOD'
])['ext_trip_id'].sum().reset_index()
# stops_visited_counts
trips_sampled_count.rename(columns={'ext_trip_id':'total_trips_sampled'},inplace=True)
return stops_visited_counts, trips_sampled_count
def minutes_of_day(hour, minute):
return hour*60 + minute
def TIME_PERIOD(x):
"""
X is a timestamp. Breack up minutes and return a zone. This gets used by Pandas's apply.
"""
x_min = x.hour * 60 + x.minute
if (x_min >= minutes_of_day(5,30) and x_min < minutes_of_day(9,0)):
return 'AM Peak'
elif (x_min >= minutes_of_day(14,30) and x_min < minutes_of_day(18,30)):
return 'PM Peak'
elif (x_min >= minutes_of_day(22,0) or x_min < minutes_of_day(3,0)):
return 'PM Nite'
elif (x_min >= minutes_of_day(3,0) and x_min < minutes_of_day(5,30)):
return 'AM Early'
elif (x_min >= minutes_of_day(9,0) and x_min < minutes_of_day(14,30)):
return 'Midday'
elif (x_min >= minutes_of_day(18,30) and x_min < minutes_of_day(22,0)):
return 'PM Late'
else:
return 'Neither time zone'
def read_in_dwell_runtime(month = 10, year = 2018):
"""
This function reads in the scraped swiftly runtinme data from disk and returns a Dataframe.
"""
# Match the 01, 02, data format for months.
if month < 10:
month = '0' + str(month)
else:
month = str(month)
frame = pd.DataFrame()
frames = []
for dir_name in ['00-06','06-12','12_18','18_24']:
allFiles = glob.glob('.' + "/swiftly_data/" + dir_name + "/*_" + month + "-*" + str(year) + ".csv")
for file_ in allFiles:
df = pd.read_csv(file_)
frames.append(df)
df = pd.concat(frames, ignore_index=True)
df['time'] = pd.to_datetime(df['actual_date'] + ' ' + df['actual_time'], format='%m-%d-%y %H:%M:%S')
#Clean out spare vehicle.
try:
df = df[df.vehicle_id != 'spare']
df['vehicle_id'] = df['vehicle_id'].astype(int)
except TypeError:
pass
return df
def dwell_runtime(swiftly_source_data_df, line_number, days_to_consider, debug=True):
"""
Generate 3 dfs: travel_time/dwell, stop path lengths, and minimum travel time.
Take the swiftly data, filter down to the route in consideration, and then filter and rename,
clean out bottom and top 5% of dwell and travel time data, create summary statistics and return.
"""
df = swiftly_source_data_df
df['day_of_month'] = df['time'].dt.day
df = df.loc[df['day_of_month'].isin(days_to_consider),]
df = df.query("route_id=='%d'" % line_number)
df_stop_path_length = df.groupby(['route_id','direction_id','stop_id'])['stop_path_length_meters'
].max().reset_index().rename(columns={'stop_id':'STOP_ID'})
df.loc[df.index, 'TIME_PERIOD'] = df['time'].apply(TIME_PERIOD)
# Generate percentiles of results for a given route, direction and stop.
df['dwell_rank'] = df.groupby(['route_id','direction_id','stop_id'])['dwell_time_secs'].rank(pct=True)
df['travel_time_rank'] = df.groupby(['route_id','direction_id','stop_id'])['travel_time_secs'].rank(pct=True)
if(debug):
df.to_csv('debug/full_rankings.csv', index=False)
# Only keep .05 to .95 percentile of the data.
df = df.query("travel_time_rank>.05&travel_time_rank<.95|dwell_rank>.05&dwell_rank<.95")
if(debug):
df.to_csv('debug/cut_data_rankings.csv', index=False)
f = {'dwell_time_secs':['mean','std','size']}
df_dwell = df.query("route_id=='%d'&is_departure==True" % line_number).groupby(['route_id','direction_id','TIME_PERIOD','stop_id']).agg(f)
f = {'travel_time_secs':['mean','std','size']}
df_runtime = df.query("route_id=='%d'&is_departure==False" % line_number).groupby(['route_id','direction_id','TIME_PERIOD','stop_id']).agg(f)
df_min_travel_time = df.groupby(['route_id','direction_id','stop_id'
])['travel_time_secs'].min().reset_index().rename(columns={'travel_time_secs':'travel_time_min_secs', 'stop_id':'STOP_ID'})
df_results = pd.merge(df_dwell.reset_index(), df_runtime.reset_index())
# Use the line_table to generate east/west/south/north directions to corresponding 0/1
line_table = pd.read_csv('line_table.csv')
line_table.rename(columns={"DirNum":"direction_id","DirectionName":"DIRECTION_NAME", "lineabbr":"route_id"},inplace=True)
line_table['direction_id'] = line_table['direction_id'].astype(int)
df_results = pd.merge(df_dwell.reset_index(), df_runtime.reset_index(), how='outer')
df_results.rename(columns={'dwell_time_secs':'dwell_time_secs_','travel_time_secs':'travel_time_secs_'},inplace=True)
df_results.columns = [''.join(t) for t in df_results.columns]
df_results.rename(columns={'stop_id':'STOP_ID'},inplace=True)
df_results = df_results.round({'dwell_time_secs_mean': 1, 'dwell_time_secs_std': 1, 'travel_time_secs_mean': 1, 'travel_time_secs_std': 1})
df_results = pd.merge(df_results,line_table, how='left', left_on = ['route_id','direction_id'], right_on = ['route_id','direction_id'])
return df_results, df_stop_path_length, df_min_travel_time
def timepoint_finder(transitfeeds_url = 'http://transitfeeds.com/p/vta/45/20170929/download'):
"""
Given a certain gtfs on transitfeeds, (if the vta posted a time in the feed, we marked the stop as a timepoint), get the timepoints and returns the df.
"""
def gtfs_downloader(url):
import urllib.request
import zipfile
file_name = 'gtfs.zip'
urllib.request.urlretrieve(url, file_name)
with zipfile.ZipFile(file_name,"r") as zip_ref:
zip_ref.extractall("gtfs/")
gtfs_downloader(transitfeeds_url)
routes = pd.read_csv('gtfs/routes.txt')
trips = pd.read_csv('gtfs/trips.txt')
st = pd.read_csv('gtfs/stop_times.txt')
count_df = trips[trips.service_id=='Weekdays'].groupby(['route_id','direction_id','shape_id']).count().reset_index()
top_shapes = count_df.sort_values('service_id',ascending=False).drop_duplicates(['route_id','direction_id']).sort_values(by=['route_id','direction_id'],ascending=True)
trip_set = []
for i,r in top_shapes.iterrows():
shape_id = r['shape_id']
trip_set.extend(trips.query("shape_id=='%s'" %(shape_id))['trip_id'].head(1).values)
trip_subset = trips.loc[trips['trip_id'].isin(trip_set)]
timepoints = pd.merge(trip_subset,st.loc[st['arrival_time'].dropna(axis='index').index,], how='left')
timepoints = timepoints[['route_id','direction_id','stop_id']]
timepoints['timepoint'] = 1
timepoints.rename(columns={'stop_id':'STOP_ID'},inplace=True)
return timepoints
|
11454875
|
import collections
import sys
from pathlib import Path
from tqdm import tqdm
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from modelzoo import get_model
from experiments.utils.training import evaluate, update
from experiments.utils.logging import Logger
from experiments.addition.data import Addition
def train(cfg: dict):
""" Train a model for multiple epochs with a given configuration. """
global_seed = cfg.get('global_seed')
if global_seed is not None:
torch.manual_seed(global_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_data = Addition(sample_count=cfg['num_samples'],
seq_len=cfg.get('seq_length', 100),
max_components=cfg.get('max_components', 2),
min_components=cfg.get('min_components', 2),
max_mass=cfg.get('max_mass', 1.),
seed=cfg.get('data_seed'))
valid_data = Addition(sample_count=cfg['num_samples'],
seq_len=cfg.get('seq_length', 100),
max_components=cfg.get('max_components', 2),
min_components=cfg.get('min_components', 2),
max_mass=cfg.get('max_mass', 1.),
seed=train_data.seed + cfg['num_samples'])
train_loader = DataLoader(train_data, shuffle=True, batch_size=cfg['batch_size'], num_workers=cfg['num_workers'])
valid_loader = DataLoader(valid_data, shuffle=False, batch_size=cfg['batch_size'], num_workers=cfg['num_workers'])
model = get_model(cfg).to(cfg['device'])
loss_func = mse = nn.MSELoss()
optimiser = optim.Adam(model.parameters(), lr=cfg['lr'])
logger = Logger(cfg)
if cfg["log_tensorboard"]:
logger.start_tb()
evaluate(model, mse, train_loader, logger.train())
print(f"Train: {logger.summarise(model):.4f}", end='')
evaluate(model, mse, valid_loader, logger.valid())
print(f" -- Valid: {logger.summarise(model):.4f}")
for epoch in range(1, cfg['num_epochs'] + 1):
with tqdm(train_loader, file=sys.stdout) as pbar:
pbar.set_description(f"Epoch {epoch: 3d}")
update(model=model, loss_func=loss_func, loader=pbar, opt=optimiser, logger=logger.train(), progress=True)
avg_train_err = logger.summarise(model)
train_msg = f"Train: {avg_train_err: .4f}"
evaluate(model=model, loss_func=mse, loader=valid_loader, logger=logger.valid())
avg_valid_err = logger.summarise(model)
valid_msg = f"Valid: {avg_valid_err: .4f}"
print(" -- ".join([train_msg, valid_msg]))
return None, None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="train on LSTM addition task")
default_config = Path(__file__).absolute().parent / "config.yml"
parser.add_argument("--config", type=Path, default=default_config, help="path to configuration file")
args = parser.parse_args()
from experiments.utils import read_config
cfg = read_config(cfg_path=args.config)
t_errs, v_errs = train(cfg)
|
11454902
|
from .em_rvm import EMRVR, EMRVC
from ._version import __version__
__all__ = ['EMRVR', 'EMRVC', '__version__']
|
11454963
|
import csv
import os
from rdr_service.config import LOCALHOST_DEFAULT_BUCKET_NAME
from rdr_service.api_util import open_cloud_file, list_blobs
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.offline.table_exporter import TableExporter
from rdr_service.participant_enums import make_primary_provider_link_for_name
from tests.helpers.unittest_base import BaseTestCase
class TableExporterTest(BaseTestCase):
def setUp(self):
super(TableExporterTest, self).setUp()
def testDeidentifiedExport_empty(self):
mock_export_sub_folder = 'dir'
mock_bucket = [LOCALHOST_DEFAULT_BUCKET_NAME, LOCALHOST_DEFAULT_BUCKET_NAME + os.sep + mock_export_sub_folder]
self.clear_default_storage()
self.create_mock_buckets(mock_bucket)
TableExporter.export_tables("rdr", ["ppi_participant_view"], mock_export_sub_folder, deidentify=True)
blobs = list(list_blobs(LOCALHOST_DEFAULT_BUCKET_NAME))
self.assertEqual(len(blobs), 1)
blob = blobs[0]
self.assertEqual(blob.name, 'dir/ppi_participant_view.csv')
def testDeidentifiedExport_participantIds(self):
mock_export_sub_folder = 'dir'
mock_bucket = [LOCALHOST_DEFAULT_BUCKET_NAME, LOCALHOST_DEFAULT_BUCKET_NAME + os.sep + mock_export_sub_folder]
self.clear_default_storage()
self.create_mock_buckets(mock_bucket)
p1 = self.data_generator._participant_with_defaults(
participantId=1, version=2, biobankId=2, providerLink=make_primary_provider_link_for_name("PITT")
)
ParticipantDao().insert(p1)
p2 = self.data_generator._participant_with_defaults(
participantId=2, version=3, biobankId=3, providerLink=make_primary_provider_link_for_name("PITT")
)
ParticipantDao().insert(p2)
TableExporter.export_tables("rdr", ["ppi_participant_view"], mock_export_sub_folder, deidentify=True)
csv_path = 'dir/ppi_participant_view.csv'
with open_cloud_file("/%s/%s" % (LOCALHOST_DEFAULT_BUCKET_NAME, csv_path)) as f:
reader = csv.reader(f)
rows = list(reader)[1:]
self.assertEqual(2, len(rows))
pmi_ids = set([p1.participantId, p2.participantId])
obf_ids = set([row[0] for row in rows])
self.assertFalse(pmi_ids.intersection(obf_ids), "should be no overlap between pmi_ids and obfuscated IDs")
self.assertEqual(2, len(obf_ids))
|
11454964
|
import ctypes
import pandas
lib = ctypes.CDLL('./numpypandas.dll')
increase = lib.increase
increase.argtypes = [
ctypes.POINTER(ctypes.c_longlong),
ctypes.c_longlong,
ctypes.c_longlong,
]
people = pandas.DataFrame({
'name': ['Alice', 'Bob', 'Charlie'],
'age': [20, 30, 40],
})
# First we check the type.
ages = people.age
if str(ages.dtypes) != 'int64':
raise TypeError(f'Expected type int64, got {ages.dtypes}')
values = ages.values # type=numpy.Array
ptr = values.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
print('Before')
print(people)
print('After')
increase(ptr, len(people), 5)
print(people)
|
11454969
|
import datetime
import unittest
from localstack.utils.aws import aws_stack
DEFAULT_TASK_LIST = {"name": "default"}
class TestSwf(unittest.TestCase):
def setUp(self):
self.swf_client = aws_stack.create_external_boto_client("swf")
self.swf_unique_id = datetime.datetime.now().isoformat()
self.swf_version = "1.0"
self.workflow_domain_name = "unit-test-swf-domain-{}".format(self.swf_unique_id)
self.workflow_type_name = "unit-test-swf-workflow-{}".format(self.swf_unique_id)
self.workflow_activity_name = "unit-test-swf-activity-{}".format(self.swf_unique_id)
self.swf_client.register_domain(
name=self.workflow_domain_name, workflowExecutionRetentionPeriodInDays="1"
)
def test_run_workflow(self):
self.given_workflow()
self.when_workflow_is_started()
self.then_workflow_components_execute()
self.then_workflow_history_has_expected_events()
def given_workflow(self):
self.swf_client.register_workflow_type(
domain=self.workflow_domain_name,
name=self.workflow_type_name,
version=self.swf_version,
defaultExecutionStartToCloseTimeout="500",
defaultTaskStartToCloseTimeout="300",
defaultTaskList=DEFAULT_TASK_LIST,
defaultChildPolicy="TERMINATE",
)
workflow_types = self.swf_client.list_workflow_types(
domain=self.workflow_domain_name, registrationStatus="REGISTERED"
)
self.assertIn(
self.workflow_type_name,
map(
lambda workflow_type: workflow_type["workflowType"]["name"],
workflow_types["typeInfos"],
),
)
self.swf_client.register_activity_type(
domain=self.workflow_domain_name,
name=self.workflow_activity_name,
version=self.swf_version,
defaultTaskList=DEFAULT_TASK_LIST,
defaultTaskStartToCloseTimeout="NONE",
defaultTaskScheduleToStartTimeout="NONE",
defaultTaskScheduleToCloseTimeout="NONE",
defaultTaskHeartbeatTimeout="100",
)
def when_workflow_is_started(self):
self.workflow_execution = self.swf_client.start_workflow_execution(
domain=self.workflow_domain_name,
workflowId=self.swf_unique_id,
workflowType={"name": self.workflow_type_name, "version": self.swf_version},
)
def then_workflow_components_execute(self):
decision_task = self.swf_client.poll_for_decision_task(
domain=self.workflow_domain_name, taskList=DEFAULT_TASK_LIST
)
self.swf_client.respond_decision_task_completed(
taskToken=decision_task["taskToken"],
decisions=[
{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityType": {
"name": self.workflow_activity_name,
"version": self.swf_version,
},
"activityId": "10",
},
}
],
)
activity_task = self.swf_client.poll_for_activity_task(
domain=self.workflow_domain_name, taskList=DEFAULT_TASK_LIST
)
self.swf_client.respond_activity_task_completed(
taskToken=activity_task["taskToken"], result="activity success"
)
decision_task = self.swf_client.poll_for_decision_task(
domain=self.workflow_domain_name, taskList=DEFAULT_TASK_LIST
)
self.swf_client.respond_decision_task_completed(
taskToken=decision_task["taskToken"],
decisions=[
{
"decisionType": "CompleteWorkflowExecution",
"completeWorkflowExecutionDecisionAttributes": {"result": "workflow success"},
}
],
)
def then_workflow_history_has_expected_events(self):
history = self.swf_client.get_workflow_execution_history(
domain=self.workflow_domain_name,
execution={
"workflowId": self.swf_unique_id,
"runId": self.workflow_execution["runId"],
},
)
events = map(lambda event: event["eventType"], history["events"])
for event_type in [
"WorkflowExecutionStarted",
"DecisionTaskCompleted",
"ActivityTaskCompleted",
"WorkflowExecutionCompleted",
]:
self.assertIn(event_type, events)
|
11454987
|
import click
import yaml
from . import driver
from ckan_cloud_operator import logs
@click.group()
def jenkins():
"""Interact with a Jenkins server"""
pass
@jenkins.command()
@click.argument('JENKINS_USER')
@click.argument('JENKINS_TOKEN')
@click.argument('JENKINS_URL')
@click.argument('POST_JSON_DATA', required=False)
def curl(jenkins_user, jenkins_token, jenkins_url, post_json_data):
logs.print_yaml_dump(driver.curl(jenkins_user, jenkins_token, jenkins_url, post_json_data))
logs.exit_great_success(quiet=True)
|
11454998
|
import torch
from .utils import *
# === Import model-related objects ===
from comvex.perceiver import Perceiver
# === Instantiate your Model ===
# - For single model
model = Perceiver(
data_shape=[3, 224, 224],
cross_heads=1,
num_latent_tokens=32,
dim=128,
heads=4,
layers_indice=[0] + [1]*7,
num_latent_transformers_in_layers=[6]*2,
num_bands=64,
resolution=224,
frequency_base=2,
pre_norm=True,
ff_dim=None,
ff_expand_scale=4,
ff_dropout=0.0,
attention_dropout=0.0,
cross_kv_dim=None,
head_dim=None
)
# === Settings ===
# - Required:
input_shape = (1, 3, 224, 224)
expected_shape = (1, 128)
# - Optional:
# === Test Cases ===
# Default test for the single model case
def test_forward():
model.eval()
x = torch.randn(input_shape)
out = model(x)
assert_output_shape_wrong(out, expected_shape)
assert_output_has_nan(out)
|
11455020
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("CSC HLT DQM")
#-------------------------------------------------
# DQM Module Configuration
#-------------------------------------------------
process.load("DQM.CSCMonitorModule.csc_hlt_dqm_sourceclient_cfi")
#----------------------------
# Event Source
#-----------------------------
#process.load("DQM.Integration.test.inputsource_playback_cfi")
maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("EventStreamHttpReader",
sourceURL = cms.string('http://localhost:50082/urn:xdaq-application:lid=29'),
consumerPriority = cms.untracked.string('normal'),
max_event_size = cms.int32(7000000),
consumerName = cms.untracked.string('Playback Source'),
max_queue_depth = cms.int32(5),
maxEventRequestRate = cms.untracked.double(12.0),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('*')
),
headerRetryInterval = cms.untracked.int32(3)
)
process.EventStreamHttpReader.consumerName = 'CSC HLT DQM Consumer'
#process.EventStreamHttpReader.sourceURL = "http://localhost:50082/urn:xdaq-application:lid=29"
#----------------------------
# DQM Environment
#-----------------------------
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
#----------------------------
# DQM Playback Environment
#-----------------------------
process.load("DQM.Integration.test.environment_playback_cfi")
process.dqmEnv.subSystemFolder = "CSC"
process.DQM.collectorHost = 'pccmsdqm02.cern.ch'
#process.DQM.collectorHost = 'localhost'
process.dqmSaver.dirName = '.'
#-------------------------------------------------
# Global Tag
#-------------------------------------------------
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.connect = "sqlite_file:/nfshome0/malgeri/public/globtag/CRZT210_V1H.db"
#process.GlobalTag.connect = "frontier://FrontierDev/CMS_COND_CSC"
process.GlobalTag.globaltag = "CRZT210_V1H::All"
process.es_prefer_GlobalTag = cms.ESPrefer('PoolDBESSource','GlobalTag')
#--------------------------
# Message Logger
#--------------------------
MessageLogger = cms.Service("MessageLogger",
suppressInfo = cms.untracked.vstring('source'),
suppressDebug = cms.untracked.vstring('source'),
suppressWarning = cms.untracked.vstring('source'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(False)
),
detailedInfo = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
),
critical = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR')
),
debug = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG')
),
debugModules = cms.untracked.vstring('CSCHLTMonitormodule'),
destinations = cms.untracked.vstring(
# 'debug',
# 'detailedInfo',
# 'critical',
# 'cout'
)
)
#--------------------------
# Sequences
#--------------------------
process.p = cms.Path(process.cscDQMEvF+process.dqmEnv+process.dqmSaver)
|
11455037
|
from poop.hfdp.decorator.pizza.pizza import Pizza
class ThincrustPizza(Pizza):
def __init__(self) -> None:
self.description = "Thin crust pizza, with tomato sauce"
def cost(self) -> float:
return 7.99
|
11455067
|
from setuptools import setup, find_packages
import sys
if sys.version_info < (3,):
sys.exit("Sorry, Python3 is required.")
with open("README.md", encoding="utf8") as f:
readme = f.read()
with open('requirements.txt') as f:
install_reqs = f.read().splitlines()
setup(
name="nlpaug",
version="1.1.10",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/makcedward/nlpaug",
license="MIT",
description="Natural language processing augmentation library for deep neural networks",
long_description=readme,
long_description_content_type="text/markdown",
packages=find_packages(exclude="test"),
include_package_data=True,
install_requires=install_reqs,
keywords=[
"deep learning", "neural network", "machine learning",
"nlp", "natural language processing", "text", "audio", "spectrogram",
"augmentation", "adversarial attack", "ai", "ml"]
)
|
11455071
|
import argparse
import os
import numpy as np
import scipy.misc as ssc
import kitti_util
import imageio
def project_disp_to_depth(calib, disp, max_high, baseline=0.54):
disp[disp < 0] = 0
mask = disp > 0
depth = calib.f_u * baseline / (disp + 1. - mask)
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows))
points = np.stack([c, r, depth])
points = points.reshape((3, -1))
points = points.T
points = points[mask.reshape(-1)]
cloud = calib.project_image_to_velo(points)
valid = (cloud[:, 0] >= 0) & (cloud[:, 2] < max_high)
return cloud[valid]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Libar')
parser.add_argument('--calib_dir', type=str,
default='~/Kitti/object/training/calib')
parser.add_argument('--disparity_dir', type=str,
default='~/Kitti/object/training/predicted_disparity')
parser.add_argument('--save_dir', type=str,
default='~/Kitti/object/training/predicted_velodyne')
parser.add_argument('--max_high', type=int, default=1)
args = parser.parse_args()
assert os.path.isdir(args.disparity_dir)
assert os.path.isdir(args.calib_dir)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
disps = [x for x in os.listdir(args.disparity_dir) if x[-3:] == 'png']
disps = sorted(disps)
for fn in disps:
predix = fn[:-4]
calib_file = '{}/{}.txt'.format(args.calib_dir, predix)
calib = kitti_util.Calibration(calib_file)
disp_map = imageio.imread(args.disparity_dir + '/' + fn) / 256.
lidar = project_disp_to_depth(calib, disp_map, args.max_high)
# pad 1 in the indensity dimension
lidar = np.concatenate([lidar, np.ones((lidar.shape[0], 1))], 1)
lidar = lidar.astype(np.float32)
lidar.tofile('{}/{}.bin'.format(args.save_dir, predix))
print('Finish Depth {}'.format(predix))
|
11455085
|
import os
import tempfile
from unittest import mock, TestCase
import gdal2tiles
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class OptionParserInputOutputTest(TestCase):
def test_vanilla_input_output(self):
_, input_file = tempfile.mkstemp()
output_folder = tempfile.mkdtemp()
parsed_input, parsed_output, options = gdal2tiles.process_args([input_file, output_folder])
self.assertEqual(parsed_input, input_file)
self.assertEqual(parsed_output, output_folder)
self.assertNotEqual(options, {})
def test_output_folder_is_the_input_file_folder_when_none_passed(self):
_, input_file = tempfile.mkstemp()
_, parsed_output, _ = gdal2tiles.process_args([input_file])
self.assertEqual(parsed_output, os.path.basename(input_file))
def _asserts_exits_with_code_2(self, params):
with self.assertRaises(SystemExit) as cm:
gdal2tiles.process_args(params)
e = cm.exception
self.assertEqual(str(e), '2')
def test_exits_when_0_args_passed(self):
self._asserts_exits_with_code_2([])
def test_exits_when_more_than_2_free_parameters(self):
self._asserts_exits_with_code_2(['input1.tiff', 'input2.tiff', 'output_folder'])
def test_exits_when_input_file_does_not_exist(self):
self._asserts_exits_with_code_2(['foobar.tiff'])
def test_exits_when_first_param_is_not_a_file(self):
folder = tempfile.gettempdir()
self._asserts_exits_with_code_2([folder])
# pylint:disable=E1101
class OptionParserPostProcessingTest(TestCase):
def setUp(self):
self.DEFAULT_OPTIONS = {
'verbose': True,
'resampling': 'near',
'title': '',
'url': '',
}
self.DEFAULT_ATTRDICT_OPTIONS = AttrDict(self.DEFAULT_OPTIONS)
def _setup_gdal_patch(self, mock_gdal):
mock_gdal.TermProgress_nocb = True
mock_gdal.RegenerateOverview = True
mock_gdal.GetCacheMax = lambda: 1024 * 1024
return mock_gdal
def test_title_is_untouched_if_set(self):
title = "fizzbuzz"
self.DEFAULT_ATTRDICT_OPTIONS['title'] = title
options = gdal2tiles.options_post_processing(
self.DEFAULT_ATTRDICT_OPTIONS, "bar.tiff", "baz")
self.assertEqual(options.title, title)
def test_title_default_to_input_filename_if_not_set(self):
input_file = "foo/bar/fizz/buzz.tiff"
options = gdal2tiles.options_post_processing(
self.DEFAULT_ATTRDICT_OPTIONS, input_file, "baz")
self.assertEqual(options.title, os.path.basename(input_file))
def test_url_stays_empty_if_not_passed(self):
options = gdal2tiles.options_post_processing(
self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "baz")
self.assertEqual(options.url, "")
def test_url_ends_with_the_output_folder_last_component(self):
output_folder = "foo/bar/fizz"
url = "www.mysite.com/storage"
self.DEFAULT_ATTRDICT_OPTIONS['url'] = url
options = gdal2tiles.options_post_processing(
self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", output_folder)
self.assertEqual(options.url, url + "/fizz/")
# With already present trailing slashes
output_folder = "foo/bar/fizz/"
url = "www.mysite.com/storage/"
self.DEFAULT_ATTRDICT_OPTIONS['url'] = url
options = gdal2tiles.options_post_processing(
self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", output_folder)
self.assertEqual(options.url, url + "fizz/")
@mock.patch('gdal2tiles.gdal', spec=AttrDict())
def test_average_resampling_supported_with_latest_gdal(self, mock_gdal):
self._setup_gdal_patch(mock_gdal)
self.DEFAULT_ATTRDICT_OPTIONS['resampling'] = "average"
gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "/bar/")
# No error means it worked as expected
@mock.patch('gdal2tiles.gdal', spec=AttrDict())
def test_average_resampling_not_supported_in_old_gdal(self, mock_gdal):
mock_gdal = self._setup_gdal_patch(mock_gdal)
del mock_gdal.RegenerateOverview
self.DEFAULT_ATTRDICT_OPTIONS['resampling'] = "average"
with self.assertRaises(SystemExit):
gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "/bar/")
def test_antialias_resampling_supported_with_numpy(self):
gdal2tiles.numpy = True
self.DEFAULT_ATTRDICT_OPTIONS['resampling'] = "antialias"
gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "/bar/")
# No error means it worked as expected
def test_antialias_resampling_not_supported_wout_numpy(self):
if hasattr(gdal2tiles, "numpy"):
del gdal2tiles.numpy
self.DEFAULT_ATTRDICT_OPTIONS['resampling'] = "antialias"
with self.assertRaises(SystemExit):
gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "/bar/")
def test_zoom_option_not_specified(self):
self.DEFAULT_ATTRDICT_OPTIONS["zoom"] = None
options = gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "baz")
self.assertEqual(options.zoom, [None, None])
def test_zoom_option_single_level(self):
self.DEFAULT_ATTRDICT_OPTIONS["zoom"] = "10"
options = gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "baz")
self.assertEqual(options.zoom, [10, 10])
def test_zoom_option_two_levels(self):
self.DEFAULT_ATTRDICT_OPTIONS["zoom"] = '14-24'
options = gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "baz")
self.assertEqual(options.zoom, [14, 24])
def test_zoom_option_two_levels_automatic_max(self):
self.DEFAULT_ATTRDICT_OPTIONS["zoom"] = '14-'
options = gdal2tiles.options_post_processing(self.DEFAULT_ATTRDICT_OPTIONS, "foo.tiff", "baz")
self.assertEqual(options.zoom, [14, None])
|
11455156
|
import re
import sys
import traceback
from typing import NoReturn
import pytest
from .._util import (
bytesify,
LocalProtocolError,
ProtocolError,
RemoteProtocolError,
Sentinel,
validate,
)
def test_ProtocolError() -> None:
with pytest.raises(TypeError):
ProtocolError("abstract base class")
def test_LocalProtocolError() -> None:
try:
raise LocalProtocolError("foo")
except LocalProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 400
try:
raise LocalProtocolError("foo", error_status_hint=418)
except LocalProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 418
def thunk() -> NoReturn:
raise LocalProtocolError("a", error_status_hint=420)
try:
try:
thunk()
except LocalProtocolError as exc1:
orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
exc1._reraise_as_remote_protocol_error()
except RemoteProtocolError as exc2:
assert type(exc2) is RemoteProtocolError
assert exc2.args == ("a",)
assert exc2.error_status_hint == 420
new_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
assert new_traceback.endswith(orig_traceback)
def test_validate() -> None:
my_re = re.compile(br"(?P<group1>[0-9]+)\.(?P<group2>[0-9]+)")
with pytest.raises(LocalProtocolError):
validate(my_re, b"0.")
groups = validate(my_re, b"0.1")
assert groups == {"group1": b"0", "group2": b"1"}
# successful partial matches are an error - must match whole string
with pytest.raises(LocalProtocolError):
validate(my_re, b"0.1xx")
with pytest.raises(LocalProtocolError):
validate(my_re, b"0.1\n")
def test_validate_formatting() -> None:
my_re = re.compile(br"foo")
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops")
assert "oops" in str(excinfo.value)
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops {}")
assert "oops {}" in str(excinfo.value)
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops {} xx", 10)
assert "oops 10 xx" in str(excinfo.value)
def test_make_sentinel() -> None:
class S(Sentinel, metaclass=Sentinel):
pass
assert repr(S) == "S"
assert S == S
assert type(S).__name__ == "S"
assert S in {S}
assert type(S) is S
class S2(Sentinel, metaclass=Sentinel):
pass
assert repr(S2) == "S2"
assert S != S2
assert S not in {S2}
assert type(S) is not type(S2)
def test_bytesify() -> None:
assert bytesify(b"123") == b"123"
assert bytesify(bytearray(b"123")) == b"123"
assert bytesify("123") == b"123"
with pytest.raises(UnicodeEncodeError):
bytesify("\u1234")
with pytest.raises(TypeError):
bytesify(10)
|
11455179
|
import time
import yaml
from pathlib import Path
import matplotlib
import numpy as np
import yaml
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.utils.data
from torch.utils import tensorboard
from liegroups.torch import SO3
from deeplio import datasets as ds
from deeplio.common import spatial
from deeplio.models import nets
from deeplio.models.misc import DataCombiCreater
from deeplio.models.worker import Worker, AverageMeter, ProgressMeter, worker_init_fn
from deeplio.losses import get_loss_function
class Tester(Worker):
ACTION = "test"
def __init__(self, args, cfg):
super(Tester, self).__init__(args, cfg)
args = self.args
# if self.batch_size != 1:
# self.logger.info("batch size in the testing mode should be set to one.")
# self.logger.info("setting batch size (batch-size = 1).")
# self.batch_size = 1
if self.seq_size != 1:
self.logger.info("setting sequence size (s=1)")
raise ValueError("Sequence size mus tbe equal 1 in test mode.")
# create the folder for saving training checkpoints
self.checkpoint_dir = self.out_dir
Path(self.checkpoint_dir).mkdir(parents=True, exist_ok=True)
# preapre dataset and dataloaders
transform = None
self.model = nets.get_model(input_shape=(self.n_channels, self.im_height_model, self.im_width_model),
cfg=self.cfg, device=self.device)
self.criterion = get_loss_function(self.cfg, args.device)
self.has_lidar = True if self.model.lidar_feat_net is not None else False
self.has_imu = True if self.model.imu_feat_net is not None else False
self.test_dataset = ds.Kitti(config=self.cfg, transform=transform, ds_type='test',
has_imu=self.has_imu, has_lidar=self.has_lidar)
self.test_dataloader = torch.utils.data.DataLoader(self.test_dataset, batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
worker_init_fn = worker_init_fn,
collate_fn = ds.deeplio_collate)
self.data_permuter = DataCombiCreater(combinations=self.combinations,
device=self.device)
self.tensor_writer = tensorboard.SummaryWriter(log_dir=self.runs_dir)
# debugging and visualizing
self.logger.print("System Training Configurations:")
self.logger.print("args: {}".
format(self.args))
self.logger.print(yaml.dump(self.cfg))
self.logger.print(self.test_dataset)
def run(self):
self.is_running = True
self.test()
self.logger.info("Testing done!")
self.close()
def test(self):
writer = self.tensor_writer
model = self.model
batch_time = AverageMeter('Time', ':6.3f')
inference_time = AverageMeter('Inf-Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
self.logger,
len(self.test_dataloader),
[batch_time, inference_time, losses],
prefix='Test: ')
seq_names = []
last_seq = None
curr_seq = None
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for idx, data in enumerate(self.test_dataloader):
# check if we can run or are we stopped
if not self.is_running:
return 0
# prepare data
self.data_permuter(data)
imgs = self.data_permuter.res_imgs
normals = self.data_permuter.res_normals
imus = self.data_permuter.res_imu
gts_f2f = self.data_permuter.res_gt_f2f
gts_f2g = self.data_permuter.res_gt_f2g
gts_global = self.data_permuter.res_gt_global
if torch.isnan(gts_f2f).any() or torch.isinf(gts_f2f).any():
raise ValueError("gt-f2f:\n{}".format(gts_f2f))
if torch.isnan(gts_f2g).any() or torch.isinf(gts_f2g).any():
raise ValueError("gt-f2g:\n{}".format(gts_f2g))
# prepare ground truth tranlational and rotational part
gt_f2f_t = gts_f2f[:, :, 0:3]
gt_f2f_w = gts_f2f[:, :, 3:]
gt_f2g_p = gts_f2g[:, :, 0:3]
gt_f2g_q = gts_f2g[:, :, 3:7]
# compute model predictions and loss
start_inference = time.time()
pred_f2f_t, pred_f2f_w = self.model([[imgs, normals], imus])
inference_time.update(time.time() - start_inference)
pred_f2g_p, pred_f2g_q = self.se3_to_SE3(pred_f2f_t, pred_f2f_w)
loss = self.criterion(pred_f2f_t, pred_f2f_w,
pred_f2g_p, pred_f2g_q,
gt_f2f_t, gt_f2f_w,
gt_f2g_p, gt_f2g_q)
# measure accuracy and record loss
losses.update(loss.detach().item(), len(pred_f2f_t))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
batch_size = len(data['metas'])
# get meta information for saving the odom. results
for b in range(batch_size):
meta = data['metas'][b]
date, drive = meta['date'][0], meta['drive'][0]
velo_ts = meta['velo-timestamps']
gt_global = data['gts'][b].cpu().numpy() # gts_global[0].cpu().numpy()
seq_name = "{}_{}".format(date, drive)
if seq_name not in seq_names:
if last_seq is not None:
last_seq.write_to_file()
curr_seq = OdomSeqRes(date, drive, output_dir=self.out_dir)
T_glob = np.identity(4)
T_glob[:3, 3] = gt_global[0, 0:3] # t
T_glob[:3, :3] = gt_global[0, 3:12].reshape(3, 3) # R
curr_seq.add_local_prediction(velo_ts[0], 0., T_glob, T_glob)
# add the file name and file-pointer to the list
seq_names.append(seq_name)
losses.reset()
# global ground truth pose
T_glob = np.identity(4)
T_glob[:3, 3] = gt_global[1, 0:3] # t
T_glob[:3, :3] = gt_global[1, 3:12].reshape(3, 3) # R
gt_t = gt_f2f_t[b].detach().cpu().squeeze()
gt_w = gt_f2f_w[b].detach().cpu().squeeze()
pred_f2f_t_b = pred_f2f_t[b].detach().cpu().squeeze()
pred_f2f_w_b = pred_f2f_w[b].detach().cpu().squeeze()
#if self.has_imu and not np.all(data['valids']):
# pred_f2f_t_b = gt_t
# pred_f2f_w_b = gt_w
T_local = np.identity(4)
if self.args.param == 'xq':
T_local[:3, 3] = pred_f2f_t_b.numpy()
T_local[:3, :3] = SO3.exp(pred_f2f_w_b).as_matrix().numpy() # spatial.quaternion_to_rotation_matrix(pred_f2f_r).numpy()
elif self.args.param == 'x':
T_local[:3, 3] = pred_f2f_t_b.numpy()
T_local[:3, :3] = SO3.exp(gt_w).as_matrix().numpy() # spatial.quaternion_to_rotation_matrix(gt_q).numpy()
elif self.args.param == 'q':
T_local[:3, 3] = gt_t.numpy()
T_local[:3, :3] = SO3.exp(pred_f2f_w_b).as_matrix().numpy()
else:
T_local[:3, 3] = gt_t.numpy()
T_local[:3, :3] = spatial.quaternion_to_rotation_matrix(gt_w).numpy()
curr_seq.add_local_prediction(velo_ts[1], losses.avg, T_local, T_glob)
last_seq = curr_seq
if idx % self.args.print_freq == 0:
progress.display(idx)
# update tensorboard
step_val = idx
self.tensor_writer.add_scalar\
("Loss test", losses.avg, step_val)
self.tensor_writer.flush()
if curr_seq is not None:
curr_seq.write_to_file()
def se3_to_SE3(self, f2f_x, f2f_r):
batch_size, seq_size, _ = f2f_x.shape
f2g_q = torch.zeros((batch_size, seq_size, 4), dtype=f2f_x.dtype, device=f2f_x.device)
f2g_x = torch.zeros((batch_size, seq_size, 3), dtype=f2f_x.dtype, device=f2f_x.device)
for b in range(batch_size):
R_prev = torch.zeros((3, 3), dtype=f2f_x.dtype, device=f2f_x.device)
R_prev[:] = torch.eye(3, dtype=f2f_x.dtype, device=f2f_x.device)
t_prev = torch.zeros((3), dtype=f2f_x.dtype, device=f2f_x.device)
for s in range(seq_size):
t_cur = f2f_x[b, s]
#q_cur = spatial.euler_to_rotation_matrix (f2f_r[b, s])
w_cur = f2f_r[b, s]
R_cur = SO3.exp(w_cur).as_matrix() # spatial.quaternion_to_rotation_matrix(q_cur)
if not torch.isclose(torch.det(R_cur), torch.FloatTensor([1.]).to(self.device)).all():
raise ValueError("Det error:\nR\n{}\nq:\n{}".format(R_cur, w_cur))
t_prev = torch.matmul(R_prev, t_cur) + t_prev
R_prev = torch.matmul(R_prev, R_cur)
if not torch.isclose(torch.det(R_prev), torch.FloatTensor([1.]).to(self.device)).all():
raise ValueError("Det error:\nR\n{}".format(R_prev))
f2g_q[b, s] = spatial.rotation_matrix_to_quaternion(R_prev)
f2g_x[b, s] = t_prev
return f2g_x, f2g_q
class TesterDeepLIO(Tester):
ACTION = "test_deeplio"
class OdomSeqRes:
def __init__(self, date, drive, output_dir="."):
self.date = date
self.drive = drive
self.T_local_pred = []
self.T_global = []
self.timestamps = []
self.loss = []
self.out_dir = output_dir
def add_local_prediction(self, timestamp, loss, T_local, T_gt_global):
self.timestamps.append(timestamp)
self.loss.append(loss)
self.T_local_pred.append(T_local)
self.T_global.append(T_gt_global)
def write_to_file(self):
T_glob_pred = []
T_0i = self.T_local_pred[0]
T_glob_pred.append(T_0i)
for i in range(1, len(self.T_local_pred)):
T_i = self.T_local_pred[i]
T = np.matmul(T_0i, T_i)
T_glob_pred.append(T)
T_0i = np.copy(T)
T_global = np.array(self.T_global)
#q_gt_global = spatial.rotation_matrix_to_quaternion(torch.from_numpy(T_global[:, :3, :3]).contiguous()).numpy()
#p_gt_global = T_global[:, :3, 3]
T_glob_pred = np.array(T_glob_pred)
#q_pred_global = spatial.rotation_matrix_to_quaternion(torch.from_numpy(T_glob_pred[:, :3, :3]).contiguous()).numpy()
#p_pred_global = T_glob_pred[:, :3, 3]
timestamps = np.asarray(self.timestamps).reshape(-1, 1)
loss = np.asarray(self.loss).reshape(-1, 1)
# save as tum format
#gt_poses = np.hstack((timestamps, p_gt_global, q_gt_global))
#fname = "{}/gt_tum_{}_{}.txt".format(self.out_dir, self.date, self.drive)
#np.savetxt(fname, gt_poses, fmt='%.5f', delimiter=' ')
#pred_poses = np.hstack((timestamps, p_pred_global, q_pred_global))
#fname = "{}/pred_tum_{}_{}.txt".format(self.out_dir, self.date, self.drive)
#np.savetxt(fname, pred_poses, fmt='%.5f', delimiter=' ')
# save as KITTI format
gt_poses = T_global[:, :3, :].reshape(len(T_global), -1)
fname = "{}/gt_kitti_{}_{}.txt".format(self.out_dir, self.date, self.drive)
np.savetxt(fname, gt_poses, fmt='%.5f', delimiter=' ')
pred_poses = T_glob_pred[:, :3, :].reshape(len(T_glob_pred), -1)
fname = "{}/pred_kitti_{}_{}.txt".format(self.out_dir, self.date, self.drive)
np.savetxt(fname, pred_poses, fmt='%.5f', delimiter=' ')
fname = "{}/{}_{}.png".format(self.out_dir, self.date, self.drive)
plt.figure()
plt.plot(T_global[:, 0, 3], T_global[:, 1, 3], alpha=0.75, linewidth=1, label="GT")
#plt.scatter(T_global[:, 0, 3], T_global[:, 1, 3], alpha=0.7, s=0.5)
plt.plot(T_glob_pred[:, 0, 3], T_glob_pred[:, 1, 3], alpha=0.75, linewidth=1, label="DeepLIO")
#plt.scatter(T_glob_pred[:, 0, 3], T_glob_pred[:, 1, 3], alpha=0.7, s=0.5)
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.grid()
plt.legend()
plt.savefig(fname, figsize=(50, 50), dpi=600)
plt.close()
|
11455198
|
import sys
import argparse
import json
def main(args):
json.dump([line.strip('\n') for line in args.ifile], args.ofile)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i', '--ifile',
type = argparse.FileType('r'),
help = 'Input file. Default is stdin. ',
default = sys.stdin)
parser.add_argument('-o', '--ofile',
type = argparse.FileType('w'),
help = 'Output file. Default is stdout. ',
default = sys.stdout)
main(parser.parse_args())
|
11455290
|
from math import sqrt
from numba import njit
import numpy as np
import flare.kernels.cutoffs as cf
from flare.kernels.kernels import coordination_number, q_value_mc
@njit
def get_2_body_arrays(
positions,
atom: int,
cell,
r_cut,
cutoff_2,
species,
sweep,
nspecie,
species_mask,
twobody_mask,
):
"""Returns distances, coordinates, species of atoms, and indices of neighbors
in the 2-body local environment. This method is implemented outside
the AtomicEnvironment class to allow for njit acceleration with Numba.
:param positions: Positions of atoms in the structure.
:type positions: np.ndarray
:param atom: Index of the central atom of the local environment.
:type atom: int
:param cell: 3x3 array whose rows are the Bravais lattice vectors of the
cell.
:type cell: np.ndarray
:param cutoff_2: 2-body cutoff radius.
:type cutoff_2: np.ndarray
:param species: Numpy array of species represented by their atomic numbers.
:type species: np.ndarray
:param nspecie: number of atom types to define bonds
:type: int
:param species_mask: mapping from atomic number to atom types
:type: np.ndarray
:param twobody_mask: mapping from the types of end atoms to bond types
:type: np.ndarray
:return: Tuple of arrays describing pairs of atoms in the 2-body local
environment.
bond_array_2: Array containing the distances and relative
coordinates of atoms in the 2-body local environment. First column
contains distances, remaining columns contain Cartesian coordinates
divided by the distance (with the origin defined as the position of the
central atom). The rows are sorted by distance from the central atom.
bond_positions_2: Coordinates of atoms in the 2-body local environment.
etypes: Species of atoms in the 2-body local environment represented by
their atomic number.
bond_indices: Structure indices of atoms in the local environment.
:rtype: np.ndarray, np.ndarray, np.ndarray, np.ndarray
"""
noa = len(positions)
pos_atom = positions[atom]
super_count = sweep.shape[0] ** 3
coords = np.zeros((noa, 3, super_count), dtype=np.float64)
dists = np.zeros((noa, super_count), dtype=np.float64)
cutoff_count = 0
vec1 = cell[0]
vec2 = cell[1]
vec3 = cell[2]
sepcut = False
bcn = 0
if nspecie > 1 and cutoff_2 is not None:
sepcut = True
bc = species_mask[species[atom]]
bcn = nspecie * bc
# record distances and positions of images
for n in range(noa):
diff_curr = positions[n] - pos_atom
im_count = 0
if sepcut and (species_mask is not None) and (cutoff_2 is not None):
bn = species_mask[species[n]]
r_cut = cutoff_2[twobody_mask[bn + bcn]]
for s1 in sweep:
for s2 in sweep:
for s3 in sweep:
im = diff_curr + s1 * vec1 + s2 * vec2 + s3 * vec3
dist = sqrt(im[0] * im[0] + im[1] * im[1] + im[2] * im[2])
if (dist < r_cut) and (dist != 0):
dists[n, im_count] = dist
coords[n, :, im_count] = im
cutoff_count += 1
im_count += 1
# create 2-body bond array
bond_indices = np.zeros(cutoff_count, dtype=np.int8)
bond_array_2 = np.zeros((cutoff_count, 4), dtype=np.float64)
bond_positions_2 = np.zeros((cutoff_count, 3), dtype=np.float64)
etypes = np.zeros(cutoff_count, dtype=np.int8)
bond_count = 0
for m in range(noa):
spec_curr = species[m]
if sepcut and (species_mask is not None) and (cutoff_2 is not None):
bm = species_mask[species[m]]
r_cut = cutoff_2[twobody_mask[bm + bcn]]
for im_count in range(super_count):
dist_curr = dists[m, im_count]
if (dist_curr < r_cut) and (dist_curr != 0):
coord = coords[m, :, im_count]
bond_array_2[bond_count, 0] = dist_curr
bond_array_2[bond_count, 1:4] = coord / dist_curr
bond_positions_2[bond_count, :] = coord
etypes[bond_count] = spec_curr
bond_indices[bond_count] = m
bond_count += 1
# sort by distance
sort_inds = bond_array_2[:, 0].argsort()
bond_array_2 = bond_array_2[sort_inds]
bond_positions_2 = bond_positions_2[sort_inds]
bond_indices = bond_indices[sort_inds]
etypes = etypes[sort_inds]
return bond_array_2, bond_positions_2, etypes, bond_indices
@njit
def get_3_body_arrays(
bond_array_2,
bond_positions_2,
ctype,
etypes,
r_cut,
cutoff_3,
nspecie,
species_mask,
cut3b_mask,
):
"""Returns distances and coordinates of triplets of atoms in the
3-body local environment.
:param bond_array_2: 2-body bond array.
:type bond_array_2: np.ndarray
:param bond_positions_2: Coordinates of atoms in the 2-body local
environment.
:type bond_positions_2: np.ndarray
:param ctype: atomic number of the center atom
:type: int
:param cutoff_3: 3-body cutoff radius.
:type cutoff_3: np.ndarray
:param nspecie: number of atom types to define bonds
:type: int
:param species_mask: mapping from atomic number to atom types
:type: np.ndarray
:param cut3b_mask: mapping from the types of end atoms to bond types
:type: np.ndarray
:return: Tuple of 4 arrays describing triplets of atoms in the 3-body local
environment.
bond_array_3: Array containing the distances and relative
coordinates of atoms in the 3-body local environment. First column
contains distances, remaining columns contain Cartesian coordinates
divided by the distance (with the origin defined as the position of the
central atom). The rows are sorted by distance from the central atom.
cross_bond_inds: Two dimensional array whose row m contains the indices
of atoms n > m that are within a distance cutoff_3 of both atom n and the
central atom.
cross_bond_dists: Two dimensional array whose row m contains the
distances from atom m of atoms n > m that are within a distance cutoff_3
of both atom n and the central atom.
triplet_counts: One dimensional array of integers whose entry m is the
number of atoms that are within a distance cutoff_3 of atom m.
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
"""
sepcut = False
if nspecie > 1 and cutoff_3 is not None:
bc = species_mask[ctype]
bcn = nspecie * bc
r_cut = np.max(cutoff_3)
sepcut = True
# get 3-body bond array
ind_3_l = np.where(bond_array_2[:, 0] > r_cut)[0]
if ind_3_l.shape[0] > 0:
ind_3 = ind_3_l[0]
else:
ind_3 = bond_array_2.shape[0]
bond_array_3 = bond_array_2[0:ind_3, :]
bond_positions_3 = bond_positions_2[0:ind_3, :]
cut_m = r_cut
cut_n = r_cut
cut_mn = r_cut
# get cross bond array
cross_bond_inds = np.zeros((ind_3, ind_3), dtype=np.int8) - 1
cross_bond_dists = np.zeros((ind_3, ind_3), dtype=np.float64)
triplet_counts = np.zeros(ind_3, dtype=np.int8)
for m in range(ind_3):
pos1 = bond_positions_3[m]
count = m + 1
trips = 0
if (
sepcut
and (species_mask is not None)
and (cut3b_mask is not None)
and (cutoff_3 is not None)
):
# choose bond dependent bond
bm = species_mask[etypes[m]]
btype_m = cut3b_mask[bm + bcn] # (m, c)
cut_m = cutoff_3[btype_m]
bmn = nspecie * bm # for cross_dist usage
for n in range(m + 1, ind_3):
if (
sepcut
and (species_mask is not None)
and (cut3b_mask is not None)
and (cutoff_3 is not None)
):
bn = species_mask[etypes[n]]
btype_n = cut3b_mask[bn + bcn] # (n, c)
cut_n = cutoff_3[btype_n]
# for cross_dist (m,n) pair
btype_mn = cut3b_mask[bn + bmn]
cut_mn = cutoff_3[btype_mn]
pos2 = bond_positions_3[n]
diff = pos2 - pos1
dist_curr = sqrt(diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2])
if (
dist_curr < cut_mn
and bond_array_2[m, 0] < cut_m
and bond_array_2[n, 0] < cut_n
):
cross_bond_inds[m, count] = n
cross_bond_dists[m, count] = dist_curr
count += 1
trips += 1
triplet_counts[m] = trips
return bond_array_3, cross_bond_inds, cross_bond_dists, triplet_counts
@njit
def get_m2_body_arrays(
positions,
atom: int,
cell,
r_cut,
manybody_cutoff_list,
species,
sweep: np.ndarray,
nspec,
spec_mask,
manybody_mask,
cutoff_func=cf.quadratic_cutoff,
):
# TODO:
# 1. need to deal with the conflict of cutoff functions if other funcs are used
# 2. complete the docs of "Return"
# TODO: this can be probably improved using stored arrays, redundant calls to get_2_body_arrays
# Get distances, positions, species and indices of neighbouring atoms
"""
Args:
positions (np.ndarray): Positions of atoms in the structure.
atom (int): Index of the central atom of the local environment.
cell (np.ndarray): 3x3 array whose rows are the Bravais lattice vectors of the
cell.
manybody_cutoff_list (float): 2-body cutoff radius.
species (np.ndarray): Numpy array of species represented by their atomic numbers.
Return:
Tuple of arrays describing pairs of atoms in the 2-body local
environment.
"""
# Get distances, positions, species and indexes of neighbouring atoms
bond_array_mb, _, etypes, bond_inds = get_2_body_arrays(
positions,
atom,
cell,
r_cut,
manybody_cutoff_list,
species,
sweep,
nspec,
spec_mask,
manybody_mask,
)
sepcut = False
if nspec > 1 and manybody_cutoff_list is not None:
bc = spec_mask[species[atom]]
bcn = bc * nspec
sepcut = True
species_list = np.array(list(set(species)), dtype=np.int8)
n_bonds = len(bond_inds)
n_specs = len(species_list)
qs = np.zeros(n_specs, dtype=np.float64)
qs_neigh = np.zeros((n_bonds, n_specs), dtype=np.float64)
q_neigh_grads = np.zeros((n_bonds, 3), dtype=np.float64)
# get coordination number of center atom for each species
for s in range(n_specs):
if (
sepcut
and (spec_mask is not None)
and (manybody_mask is not None)
and (manybody_cutoff_list is not None)
):
bs = spec_mask[species_list[s]]
mbtype = manybody_mask[bcn + bs]
r_cut = manybody_cutoff_list[mbtype]
qs[s] = q_value_mc(
bond_array_mb[:, 0], r_cut, species_list[s], etypes, cutoff_func
)
# get coordination number of all neighbor atoms for each species
for i in range(n_bonds):
if (
sepcut
and (spec_mask is not None)
and (manybody_mask is not None)
and (manybody_cutoff_list is not None)
):
be = spec_mask[etypes[i]]
ben = be * nspec
neigh_bond_array, __, neigh_etypes, ___ = get_2_body_arrays(
positions,
bond_inds[i],
cell,
r_cut,
manybody_cutoff_list,
species,
sweep,
nspec,
spec_mask,
manybody_mask,
)
for s in range(n_specs):
if (
sepcut
and (spec_mask is not None)
and (manybody_mask is not None)
and (manybody_cutoff_list is not None)
):
bs = spec_mask[species_list[s]]
mbtype = manybody_mask[bs + ben]
r_cut = manybody_cutoff_list[mbtype]
qs_neigh[i, s] = q_value_mc(
neigh_bond_array[:, 0],
r_cut,
species_list[s],
neigh_etypes,
cutoff_func,
)
# get grad from each neighbor atom
for i in range(n_bonds):
if (
sepcut
and (spec_mask is not None)
and (manybody_mask is not None)
and (manybody_cutoff_list is not None)
):
be = spec_mask[etypes[i]]
mbtype = manybody_mask[bcn + be]
r_cut = manybody_cutoff_list[mbtype]
ri = bond_array_mb[i, 0]
for d in range(3):
ci = bond_array_mb[i, d + 1]
____, q_neigh_grads[i, d] = coordination_number(ri, ci, r_cut, cutoff_func)
# get grads of the center atom
q_grads = q2_grads_mc(q_neigh_grads, species_list, etypes)
return qs, qs_neigh, q_grads, q_neigh_grads, species_list, etypes
@njit
def q2_grads_mc(neigh_grads, species_list, etypes):
n_specs = len(species_list)
n_neigh = neigh_grads.shape[0]
grads = np.zeros((n_specs, 3))
for i in range(n_neigh):
si = np.where(species_list == etypes[i])[0][0]
grads[si, :] += neigh_grads[i, :]
return grads
@njit
def get_m3_body_arrays(
positions,
atom: int,
cell,
cutoff: float,
species,
sweep,
cutoff_func=cf.quadratic_cutoff,
):
"""
Note: here we assume the cutoff is not too large,
i.e., 2 * cutoff < cell_size
"""
species_list = np.array(list(set(species)), dtype=np.int8)
q_func = coordination_number
bond_array, bond_positions, etypes, bond_inds = get_2_body_arrays(
positions, atom, cell, cutoff, species, sweep
)
bond_array_m3b, cross_bond_inds, cross_bond_dists, triplets = get_3_body_arrays(
bond_array, bond_positions, cutoff
)
# get descriptor of center atom for each species
m3b_array = q3_value_mc(
bond_array_m3b[:, 0],
cross_bond_inds,
cross_bond_dists,
triplets,
cutoff,
species_list,
etypes,
cutoff_func,
q_func,
)
# get descriptor of all neighbor atoms for each species
n_bonds = len(bond_array_m3b)
n_specs = len(species_list)
m3b_neigh_array = np.zeros((n_bonds, n_specs, n_specs))
for i in range(n_bonds):
neigh_bond_array, neigh_positions, neigh_etypes, _ = get_2_body_arrays(
positions, bond_inds[i], cell, cutoff, species, sweep
)
(
neigh_array_m3b,
neigh_cross_inds,
neigh_cross_dists,
neigh_triplets,
) = get_3_body_arrays(neigh_bond_array, neigh_positions, cutoff)
m3b_neigh_array[i, :, :] = q3_value_mc(
neigh_array_m3b[:, 0],
neigh_cross_inds,
neigh_cross_dists,
neigh_triplets,
cutoff,
species_list,
neigh_etypes,
cutoff_func,
q_func,
)
# get grad from each neighbor atom, assume the cutoff is not too large
# such that 2 * cutoff < cell_size
m3b_neigh_grads = q3_neigh_grads_mc(
bond_array_m3b,
cross_bond_inds,
cross_bond_dists,
triplets,
cutoff,
species_list,
etypes,
cutoff_func,
q_func,
)
# get grads of the center atom
m3b_grads = q3_grads_mc(m3b_neigh_grads, species_list, etypes)
return m3b_array, m3b_neigh_array, m3b_grads, m3b_neigh_grads, species_list, etypes
@njit
def q3_grads_mc(neigh_grads, species_list, etypes):
n_specs = len(species_list)
n_neigh = neigh_grads.shape[0]
grads = np.zeros((n_specs, n_specs, 3))
for i in range(n_neigh):
si = np.where(species_list == etypes[i])[0][0]
for spec_j in species_list:
sj = np.where(species_list == spec_j)[0][0]
if si == sj:
grads[si, sj, :] += neigh_grads[i, sj, :] / 2
else:
grads[si, sj, :] += neigh_grads[i, sj, :]
return grads
@njit
def q3_neigh_grads_mc(
bond_array_m3b,
cross_bond_inds,
cross_bond_dists,
triplets,
r_cut,
species_list,
etypes,
cutoff_func,
q_func=coordination_number,
):
n_bonds = len(bond_array_m3b)
n_specs = len(species_list)
m3b_grads = np.zeros((n_bonds, n_specs, 3))
# get grad from each neighbor atom
for i in range(n_bonds):
# get grad of q_func
ri = bond_array_m3b[i, 0]
si = np.where(species_list == etypes[i])[0][0]
qi, _ = q_func(ri, 0, r_cut, cutoff_func)
qi_grads = np.zeros(3)
for d in range(3):
ci = bond_array_m3b[i, d + 1]
_, qi_grads[d] = q_func(ri, ci, r_cut, cutoff_func)
# go through all triplets with "atom" and "i"
for ind in range(triplets[i]):
j = cross_bond_inds[i, i + ind + 1]
rj = bond_array_m3b[j, 0]
sj = np.where(species_list == etypes[j])[0][0]
qj, _ = q_func(rj, 0, r_cut, cutoff_func)
qj_grads = np.zeros(3)
for d in range(3):
cj = bond_array_m3b[j, d + 1]
_, qj_grads[d] = q_func(rj, cj, r_cut, cutoff_func)
rij = cross_bond_dists[i, i + ind + 1]
qij, _ = q_func(rij, 0, r_cut, cutoff_func)
q_grad = (qi_grads * qj + qi * qj_grads) * qij
# remove duplicant
# if si == sj:
# q_grad /= 2
m3b_grads[i, sj, :] += q_grad
m3b_grads[j, si, :] += q_grad
return m3b_grads
@njit
def q3_value_mc(
distances,
cross_bond_inds,
cross_bond_dists,
triplets,
r_cut,
species_list,
etypes,
cutoff_func,
q_func=coordination_number,
):
"""Compute value of many-body many components descriptor based
on distances of atoms in the local many-body environment.
Args:
distances (np.ndarray): distances between atoms i and j
r_cut (float): cutoff hyperparameter
ref_species (int): species to consider to compute the contribution
etypes (np.ndarray): atomic species of neighbours
cutoff_func (callable): cutoff function
q_func (callable): many-body pairwise descrptor function
Return:
float: the value of the many-body descriptor
"""
n_specs = len(species_list)
mb3_array = np.zeros((n_specs, n_specs))
n_bonds = len(distances)
for m in range(n_bonds):
q1, _ = q_func(distances[m], 0, r_cut, cutoff_func)
s1 = np.where(species_list == etypes[m])[0][0]
for n in range(triplets[m]):
ind = cross_bond_inds[m, m + n + 1]
s2 = np.where(species_list == etypes[ind])[0][0]
q2, _ = q_func(distances[ind], 0, r_cut, cutoff_func)
r3 = cross_bond_dists[m, m + n + 1]
q3, _ = q_func(r3, 0, r_cut, cutoff_func)
mb3_array[s1, s2] += q1 * q2 * q3
if s1 != s2:
mb3_array[s2, s1] += q1 * q2 * q3
return mb3_array
|
11455311
|
def count_Binary_One(arr,low,high):
if high>=low:
mid = low + (high-low)//2
if ((mid == high or arr[mid+1]==0) and (arr[mid]==1)):
return mid+1
if arr[mid]==1:
return count_Binary_One(arr, (mid+1), high)
return count_Binary_One(arr, low, mid-1)
return 0
arr=list(map(int, input("Enter NUMS with space: ").split()))
print ("Count of 1's in given array is",count_Binary_One(arr, 0 , len(arr)-1))
|
11455317
|
from __future__ import absolute_import
import logging
import contextlib
import itertools
from collections import namedtuple
from huskar_api.models.auth import Application
from huskar_api.models.const import SELF_APPLICATION_NAME
from .const import (TYPE_SITE, TYPE_TEAM, TYPE_APPLICATION,
TYPE_CONFIG, TYPE_SWITCH, TYPE_SERVICE)
__all__ = ['action_types', 'action_creator']
logger = logging.getLogger(__name__)
INSTANCE_TYPE_MAP = {
'config': TYPE_CONFIG,
'switch': TYPE_SWITCH,
'service': TYPE_SERVICE
}
Action = namedtuple('Action', [
'action_type',
'action_data',
'action_indices'
])
class ActionCreator(object):
"""The factory method registry of actions."""
def __init__(self):
self._funcs = {}
def __call__(self, action_type):
"""Registers an action factory."""
def decorator(func):
assert action_type not in self._funcs
self._funcs[action_type] = func
return func
return decorator
def make_action(self, action_type, **extra):
"""Creates an action tuple."""
func = self._funcs[action_type]
return Action(action_type, *func(action_type, **extra))
class ActionType(object):
"""The immutable map for audit action types."""
def __init__(self, action_map):
self._action_map = {
name: ident for name, ident in action_map.items()
if not name.startswith('_')}
self._action_reversed_map = {
ident: name for name, ident in self._action_map.items()}
def __getitem__(self, ident):
return self._action_reversed_map[ident]
def __getattribute__(self, name):
if not name.isupper() or name not in self._action_map:
return object.__getattribute__(self, name)
return self._action_map[name]
def __setattr__(self, name, value):
if name.isupper():
raise AttributeError('can not set attribute')
object.__setattr__(self, name, value)
@property
def action_map(self):
return self._action_map
# XXX NEVER REMOVE ANY ACTION TYPE HERE
# If you want to discard an action type, prefix it an underline instead.
# Don't forget update settings.DANGEROUS_ACTION_TYPES_EXCLUDE_LIST if necessary
action_types = ActionType({
'_DISCARD_TYPE': -1,
'CREATE_TEAM': 1001,
'DELETE_TEAM': 1002,
'ARCHIVE_TEAM': 1003,
'_UNARCHIVE_TEAM': 1004, # reserved
'CREATE_APPLICATION': 1101,
'DELETE_APPLICATION': 1102,
'ARCHIVE_APPLICATION': 1103,
'_UNARCHIVE_APPLICATION': 1104, # reserved
'CREATE_USER': 1201,
'DELETE_USER': 1202,
'ARCHIVE_USER': 1203,
'_UNARCHIVE_USER': 1204, # reserved
'CHANGE_USER_PASSWORD': <PASSWORD>,
'FORGOT_USER_PASSWORD': <PASSWORD>,
'GRANT_HUSKAR_ADMIN': 2001,
'DISMISS_HUSKAR_ADMIN': 2002,
'GRANT_TEAM_ADMIN': 2101,
'DISMISS_TEAM_ADMIN': 2102,
'GRANT_APPLICATION_AUTH': 2201,
'DISMISS_APPLICATION_AUTH': 2202,
'_CREATE_SERVICE': 3001, # reserved
'UPDATE_SERVICE': 3002,
'DELETE_SERVICE': 3003,
'CREATE_SERVICE_CLUSTER': 3004,
'DELETE_SERVICE_CLUSTER': 3005,
'IMPORT_SERVICE': 3006,
'_CREATE_SWITCH': 3101, # reserved
'UPDATE_SWITCH': 3102,
'DELETE_SWITCH': 3103,
'CREATE_SWITCH_CLUSTER': 3104,
'DELETE_SWITCH_CLUSTER': 3105,
'IMPORT_SWITCH': 3106,
'_CREATE_CONFIG': 3201, # reserved
'UPDATE_CONFIG': 3202,
'DELETE_CONFIG': 3203,
'CREATE_CONFIG_CLUSTER': 3204,
'DELETE_CONFIG_CLUSTER': 3205,
'IMPORT_CONFIG': 3206,
'UPDATE_INFRA_CONFIG': 3207,
'DELETE_INFRA_CONFIG': 3208,
'UPDATE_SERVICE_INFO': 3301,
'UPDATE_CLUSTER_INFO': 3302,
'ASSIGN_CLUSTER_LINK': 3303,
'DELETE_CLUSTER_LINK': 3304,
'OBTAIN_USER_TOKEN': 4001,
'OBTAIN_APPLICATION_TOKEN': 4002,
'UPDATE_ROUTE': 5001,
'DELETE_ROUTE': 5002,
'UPDATE_DEFAULT_ROUTE': 5003,
'DELETE_DEFAULT_ROUTE': 5004,
'PROGRAM_UPDATE_ROUTE_STAGE': 8001,
})
action_creator = ActionCreator()
@action_creator(action_types.CREATE_TEAM)
@action_creator(action_types.DELETE_TEAM)
@action_creator(action_types.ARCHIVE_TEAM)
def make_team_action(action_type, team):
data = {
'team_id': team.id,
'team_name': team.team_name,
'team_desc': team.team_desc,
}
return data, [(TYPE_SITE, 0)]
@action_creator(action_types.CREATE_APPLICATION)
@action_creator(action_types.DELETE_APPLICATION)
@action_creator(action_types.ARCHIVE_APPLICATION)
def make_application_action(action_type, application, team):
data = {
'application_id': application.id,
'application_name': application.application_name,
'team_id': team.id,
'team_name': team.team_name,
'team_desc': team.team_desc,
}
return data, [(TYPE_SITE, 0), (TYPE_TEAM, team.id)]
@action_creator(action_types.CREATE_USER)
@action_creator(action_types.DELETE_USER)
@action_creator(action_types.ARCHIVE_USER)
@action_creator(action_types.CHANGE_USER_PASSWORD)
@action_creator(action_types.FORGOT_USER_PASSWORD)
@action_creator(action_types.GRANT_HUSKAR_ADMIN)
@action_creator(action_types.DISMISS_HUSKAR_ADMIN)
@action_creator(action_types.OBTAIN_USER_TOKEN)
def make_user_action(action_type, user):
data = {'username': user.username, 'user_id': user.id}
return data, [(TYPE_SITE, 0)]
@action_creator(action_types.GRANT_TEAM_ADMIN)
@action_creator(action_types.DISMISS_TEAM_ADMIN)
def make_team_admin_action(action_type, user, team):
data = {'user_id': user.id, 'username': user.username,
'team_id': team.id, 'team_name': team.team_name,
'team_desc': team.team_desc}
return data, [(TYPE_SITE, 0), (TYPE_TEAM, team.id)]
@action_creator(action_types.GRANT_APPLICATION_AUTH)
@action_creator(action_types.DISMISS_APPLICATION_AUTH)
def make_application_auth_action(action_type, user, application, authority):
data = {'application_id': application.id,
'application_name': application.application_name,
'user_id': user.id, 'username': user.username,
'authority': authority}
return data, [(TYPE_SITE, 0),
(TYPE_APPLICATION, application.id),
(TYPE_TEAM, application.team.id)]
@action_creator(action_types.UPDATE_SERVICE)
@action_creator(action_types.DELETE_SERVICE)
@action_creator(action_types.UPDATE_SWITCH)
@action_creator(action_types.DELETE_SWITCH)
@action_creator(action_types.UPDATE_CONFIG)
@action_creator(action_types.DELETE_CONFIG)
def make_configuration_action(
action_type, application_name, cluster_name, key,
old_data=None, new_data=None):
data = {'old': old_data, 'new': new_data}
data = {'application_name': application_name,
'cluster_name': cluster_name, 'key': key, 'data': data}
indices = itertools.chain(
_optional_indices(application_name),
_optional_instance_indices(
application_name, cluster_name, key, action_type)
)
return data, list(indices)
@action_creator(action_types.IMPORT_SERVICE)
@action_creator(action_types.IMPORT_SWITCH)
@action_creator(action_types.IMPORT_CONFIG)
def make_import_action(action_type, datalist, overwrite, affected):
nested = {}
for item in datalist:
_application = nested.setdefault(item['application'], {})
_cluster = _application.setdefault(item['cluster'], {})
_cluster[item['key']] = item['value']
application_names = [application_name for application_name in nested]
data = {'data': {'nested': nested}, 'stored': True,
'overwrite': overwrite, 'affected': affected,
'application_names': application_names}
indices = itertools.chain.from_iterable(
_optional_indices(application_name)
for application_name in application_names)
return data, list(indices)
@action_creator(action_types.UPDATE_INFRA_CONFIG)
@action_creator(action_types.DELETE_INFRA_CONFIG)
def make_infra_config_action(action_type, application_name, infra_type,
infra_name, scope_type, scope_name,
old_value=None, new_value=None):
data = {'old': old_value, 'new': new_value}
data = {'application_name': application_name, 'infra_type': infra_type,
'infra_name': infra_name, 'scope_type': scope_type,
'scope_name': scope_name, 'value': new_value, 'data': data}
indices = _optional_indices(application_name)
return data, list(indices)
@action_creator(action_types.CREATE_SERVICE_CLUSTER)
@action_creator(action_types.DELETE_SERVICE_CLUSTER)
@action_creator(action_types.CREATE_SWITCH_CLUSTER)
@action_creator(action_types.DELETE_SWITCH_CLUSTER)
@action_creator(action_types.CREATE_CONFIG_CLUSTER)
@action_creator(action_types.DELETE_CONFIG_CLUSTER)
def make_cluster_action(action_type, application_name, cluster_name):
data = {'application_name': application_name, 'cluster_name': cluster_name}
indices = _optional_indices(application_name)
return data, list(indices)
@action_creator(action_types.ASSIGN_CLUSTER_LINK)
@action_creator(action_types.DELETE_CLUSTER_LINK)
def make_cluster_link_action(action_type, application_name, cluster_name,
physical_name=None):
data = {'application_name': application_name, 'cluster_name': cluster_name,
'physical_name': physical_name}
indices = _optional_indices(application_name)
return data, list(indices)
@action_creator(action_types.UPDATE_SERVICE_INFO)
@action_creator(action_types.UPDATE_CLUSTER_INFO)
def make_service_info_action(action_type, application_name, cluster_name=None,
old_data=None, new_data=None):
data = {'old': old_data, 'new': new_data}
data = {'application_name': application_name,
'cluster_name': cluster_name, 'data': data}
indices = _optional_indices(application_name)
return data, list(indices)
@action_creator(action_types.UPDATE_ROUTE)
@action_creator(action_types.DELETE_ROUTE)
def make_route_action(action_type, application_name, cluster_name,
dest_application_name, dest_cluster_name,
intent, **kwargs):
data = {'application_name': application_name, 'cluster_name': cluster_name,
'intent': intent, 'dest_application_name': dest_application_name,
'dest_cluster_name': dest_cluster_name}
indices = _optional_indices(application_name)
return data, list(indices)
@action_creator(action_types.UPDATE_DEFAULT_ROUTE)
@action_creator(action_types.DELETE_DEFAULT_ROUTE)
def make_default_route_action(action_type, application_name, ezone, intent,
cluster_name=None):
data = {'application_name': application_name, 'ezone': ezone,
'intent': intent, 'cluster_name': cluster_name}
indices = _optional_indices(application_name)
return data, list(indices)
@action_creator(action_types.PROGRAM_UPDATE_ROUTE_STAGE)
def make_program_route_stage_action(action_type, application_name, old_stage,
new_stage):
data = {'application_name': application_name,
'old_stage': old_stage, 'new_stage': new_stage}
indices = itertools.chain(
_optional_indices(application_name),
_optional_indices(SELF_APPLICATION_NAME))
return data, list(indices)
def _optional_indices(application_name):
application = None
team = None
with _suppress_exception(application_name=application_name):
application = Application.get_by_name(application_name)
with _suppress_exception(application_name=application_name):
team = application and application.team
if application:
yield (TYPE_APPLICATION, application.id)
if team:
yield (TYPE_TEAM, team.id)
def _optional_instance_indices(
application_name, cluster_name, instance_key, action_type):
_, action_name = action_types[action_type].split('_', 1)
instance_type = INSTANCE_TYPE_MAP[action_name.lower()]
application = None
with _suppress_exception(application_name=application_name):
application = Application.get_by_name(application_name)
if application:
yield (instance_type, application.id, cluster_name, instance_key)
@contextlib.contextmanager
def _suppress_exception(**kwargs):
try:
yield
except Exception as e:
logger.error('Failed to create audit index: %r %r', e, kwargs)
|
11455344
|
import pytest
from openbb_terminal.settings_controller import SettingsController
# pylint: disable=W0621
@pytest.fixture()
def controller(mocker):
mocker.patch(
"openbb_terminal.settings_controller.obbff.USE_PROMPT_TOOLKIT",
True,
)
mocker.patch("openbb_terminal.settings_controller.session", True)
mocker.patch("openbb_terminal.settings_controller.set_key")
mocker.patch("openbb_terminal.settings_controller.obbff")
return SettingsController()
def test_print_help(controller):
controller.print_help()
def test_call_dt(controller):
controller.call_dt(None)
def test_call_autoscaling(controller):
controller.call_autoscaling(None)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_dpi(controller, other):
controller.call_dpi(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_height(controller, other):
controller.call_height(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_width(controller, other):
controller.call_width(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_pheight(controller, other):
controller.call_pheight(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_pwidth(controller, other):
controller.call_pwidth(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_monitor(controller, other):
controller.call_monitor(other)
@pytest.mark.parametrize("other", [["GTK3Agg"], ["-v", "GTK3Agg"], ["None"]])
def test_call_backend(controller, other):
controller.call_backend(other)
|
11455345
|
import pytest
from brownie import chain, history
from brownie.test import given, strategy
from hypothesis import settings
WEEK = 86400 * 7
YEAR = 86400 * 365
@pytest.fixture(scope="module", autouse=True)
def setup(gauge_controller, accounts, three_gauges, token, voting_escrow):
# We handle setup logic in a fixture to avoid repeating it in each test run
# Set up gauges and types
gauge_controller.add_type(b"Liquidity", 10 ** 18, {"from": accounts[0]})
for gauge in three_gauges:
gauge_controller.add_gauge(gauge, 0, {"from": accounts[0]})
# Distribute coins
for acct in accounts[:3]:
token.transfer(acct, 10 ** 24, {"from": accounts[0]})
token.approve(voting_escrow, 10 ** 24, {"from": acct})
@given(
st_deposits=strategy("uint256[3]", min_value=10 ** 21, max_value=10 ** 23),
st_length=strategy("uint256[3]", min_value=52, max_value=100),
st_votes=strategy("uint[2][3]", min_value=0, max_value=5),
)
@settings(max_examples=10)
def test_gauge_weight_vote(
accounts, gauge_controller, three_gauges, voting_escrow, st_deposits, st_length, st_votes
):
"""
Test that gauge weights correctly adjust over time.
Strategies
---------
st_deposits : [int, int, int]
Number of coins to be deposited per account
st_length : [int, int, int]
Policy duration in weeks
st_votes : [(int, int), (int, int), (int, int)]
(vote for gauge 0, vote for gauge 1) for each account, in units of 10%
"""
# Init 10 s before the week change
t0 = chain.time()
t1 = (t0 + 2 * WEEK) // WEEK * WEEK - 10
chain.sleep(t1 - t0)
# Deposit for voting
timestamp = t1
for i, acct in enumerate(accounts[:3]):
voting_escrow.create_lock(st_deposits[i], timestamp + (st_length[i] * WEEK), {"from": acct})
# Place votes
votes = []
for i, acct in enumerate(accounts[:3]):
votes.append([x * 1000 for x in st_votes[i]])
votes[-1].append(10000 - sum(votes[-1])) # XXX what if votes are not used up to 100%?
# Now votes are [[vote_gauge_0, vote_gauge_1, vote_gauge_2], ...]
for x in range(3):
gauge_controller.vote_for_gauge_weights(three_gauges[x], votes[-1][x], {"from": acct})
# Vote power assertions - everyone used all voting power
for acct in accounts[:3]:
assert gauge_controller.vote_user_power(acct) == 10000
# Calculate slope data, build model functions
slope_data = []
for i, acct in enumerate(accounts[:3]):
initial_bias = voting_escrow.get_last_user_slope(acct) * (
voting_escrow.locked(acct)[1] - timestamp
)
duration = (
timestamp + st_length[i] * WEEK
) // WEEK * WEEK - timestamp # <- endtime rounded to whole weeks
slope_data.append((initial_bias, duration))
max_duration = max(duration for bias, duration in slope_data)
def models(idx, relative_time):
bias, duration = slope_data[idx]
return max(bias * (1 - relative_time * max_duration / duration), 0)
chain.sleep(WEEK * 4)
chain.mine()
# advance clock a month at a time and compare theoretical weight to actual weights
while history[-1].timestamp < timestamp + 1.5 * max_duration:
for i in range(3):
gauge_controller.checkpoint_gauge(three_gauges[i], {"from": accounts[4]})
relative_time = (history[-1].timestamp // WEEK * WEEK - timestamp) / max_duration
weights = [gauge_controller.gauge_relative_weight(three_gauges[i]) / 1e18 for i in range(3)]
if relative_time < 1:
theoretical_weights = [
sum((votes[i][0] / 10000) * models(i, relative_time) for i in range(3)),
sum((votes[i][1] / 10000) * models(i, relative_time) for i in range(3)),
sum((votes[i][2] / 10000) * models(i, relative_time) for i in range(3)),
]
theoretical_weights = [
w and (w / sum(theoretical_weights)) for w in theoretical_weights
]
else:
theoretical_weights = [0] * 3
print(relative_time, weights, theoretical_weights)
if relative_time != 1: # XXX 1 is odd: let's look at it separately
for i in range(3):
assert (
abs(weights[i] - theoretical_weights[i])
<= (history[-1].timestamp - timestamp) / WEEK + 1
) # 1 s per week?
chain.sleep(WEEK * 4)
chain.mine()
|
11455388
|
from __future__ import print_function
import six
from tempfile import mkdtemp
from shutil import rmtree
from os.path import join
from subprocess import CalledProcessError
from dark.diamond.conversion import FIELDS, DiamondTabularFormat
from dark.process import Executor
from dark.utils import cd
def diamondInstalled():
"""
Test if DIAMOND is installed.
@return: A C{bool}, which is C{True} if DIAMOND seems to be installed.
"""
try:
Executor().execute('diamond help')
except CalledProcessError:
return False
else:
return True
class DiamondExecutor(object):
"""
@param dryRun: If C{True} do not actually execute the DIAMOND commands.
"""
SUBJECTS_FILENAME = 'subjects.fasta'
QUERIES_FILENAME = 'queries.fasta'
OUTPUT_FILENAME = 'diamond.tsv'
def __init__(self, dryRun=False):
self._dirty = False
self._dir = mkdtemp()
self._subjectsFp = None
self._subjectsExist = False
self._executor = Executor(dryRun)
def addSubject(self, subject):
"""
Add a subject sequence to the database.
@param subject: A C{dark.reads.Read} instance.
"""
if self._subjectsFp is None:
if six.PY3:
self._subjectsFp = open(
join(self._dir, self.SUBJECTS_FILENAME), 'a',
encoding='utf-8')
else:
self._subjectsFp = open(
join(self._dir, self.SUBJECTS_FILENAME), 'a')
print(subject.toString('fasta'), end='', file=self._subjectsFp)
self._subjectsExist = self._dirty = True
def cleanup(self):
"""
Remove the temporary directory we made.
"""
if self._subjectsFp:
self._subjectsFp.close()
self._subjectsFp = None
rmtree(self._dir)
def search(self, reads, fieldNames=None):
"""
Match reads against the database.
@param reads: An instance of C{dark.reads.Reads}.
@param fieldNames: An iterable of C{str} field names for DIAMOND
tabular output (format 6). See diamond help for the names of all
available fields.
@return: A generator that yields C{dict}s with keys as in
C{fieldNames}.
"""
if not self._subjectsExist:
raise ValueError('No subject sequences in the database')
with cd(self._dir):
if self._dirty:
self._subjectsFp.close()
self._subjectsFp = None
self._executor.execute('diamond makedb --db database --in %s' %
self.SUBJECTS_FILENAME)
with open(self.QUERIES_FILENAME, 'w') as fp:
count = reads.save(fp, format_='fastq')
if count == 0:
raise ValueError('No query sequences were passed')
fieldNames = fieldNames or FIELDS.split()
self._executor.execute(
'diamond blastx --db database --query %s --outfmt 6 %s > %s' %
(self.QUERIES_FILENAME, ' '.join(fieldNames),
self.OUTPUT_FILENAME))
dtf = DiamondTabularFormat(fieldNames)
for diamondDict in dtf.diamondTabularFormatToDicts(
self.OUTPUT_FILENAME):
yield diamondDict
def __enter__(self):
return self
def __exit__(self, excType, excValue, traceback):
self.cleanup()
|
11455406
|
from verta import Client
client = Client('https://dev.verta.ai')
client.set_project('Demo - Jenkins+Prometheus')
client.set_experiment('Demo')
run = client.set_experiment_run()
class Predictor(object):
def __init__(self):
pass
def predict(self, X):
return X
run.log_model(Predictor())
|
11455407
|
import random
import typing
from typing import List, Callable
from hearthstone.simulator.agent.actions import StandardAction, generate_standard_actions, BuyAction, EndPhaseAction, \
SummonAction, DiscoverChoiceAction, RearrangeCardsAction, FreezeDecision, RerollAction, \
SellAction, TavernUpgradeAction, HeroPowerAction
from hearthstone.simulator.agent.agent import Agent
from hearthstone.simulator.core.player import Player, StoreIndex
if typing.TYPE_CHECKING:
from hearthstone.simulator.core.cards import MonsterCard
class HeroBot(Agent):
def __init__(self, authors: List[str], priority: Callable[['Player', 'MonsterCard'], float], seed: int):
if not authors:
authors = ["<NAME>", "<NAME>", "<NAME>"]
self.authors = authors
self.priority = priority
self.local_random = random.Random(seed)
async def rearrange_cards(self, player: 'Player') -> RearrangeCardsAction:
permutation = list(range(len(player.in_play)))
self.local_random.shuffle(permutation)
return RearrangeCardsAction(permutation)
async def buy_phase_action(self, player: 'Player') -> StandardAction:
all_actions = list(generate_standard_actions(player))
if player.tavern_tier < 2:
upgrade_action = TavernUpgradeAction()
if upgrade_action.valid(player):
return upgrade_action
if not player.room_on_board():
hero_actions = [action for action in all_actions if type(action) is HeroPowerAction]
if hero_actions:
return self.local_random.choice(hero_actions)
top_hand_priority = max([self.priority(player, card) for card in player.hand], default=None)
top_store_priority = max([self.priority(player, card) for card in player.store], default=None)
bottom_board_priority = min([self.priority(player, card) for card in player.in_play], default=None)
if top_hand_priority:
if player.room_on_board():
return [
action for action in all_actions
if type(action) is SummonAction and self.priority(player,
player.hand[action.index]) == top_hand_priority
][0]
else:
if top_hand_priority > bottom_board_priority:
return [
action for action in all_actions
if type(action) is SellAction and self.priority(player, player.in_play[
action.index]) == bottom_board_priority
][0]
if top_store_priority:
if player.room_on_board() or bottom_board_priority < top_store_priority:
buy_action = BuyAction(
[StoreIndex(index) for index, card in enumerate(player.store) if
self.priority(player, card) == top_store_priority][0]
)
if buy_action.valid(player):
return buy_action
reroll_action = RerollAction()
if reroll_action.valid(player):
return reroll_action
return EndPhaseAction(FreezeDecision.NO_FREEZE)
async def discover_choice_action(self, player: 'Player') -> DiscoverChoiceAction:
discover_cards = player.discover_queue[0].items
discover_cards = sorted(discover_cards, key=lambda card: self.priority(player, card), reverse=True)
return DiscoverChoiceAction(player.discover_queue[0].items.index(discover_cards[0]))
|
11455497
|
from .client import DockerClient
from .container import DockerContainer
from .image import DockerImage
__all__ = [
"DockerClient",
"DockerContainer",
"DockerImage",
]
|
11455508
|
import os
import sys
import tensorflow as tf
sys.path.append('./TFext/models/slim')
from datasets import dataset_utils
from nets import inception
from preprocessing import inception_preprocessing
from coco_loss import coco_loss_layer
slim = tf.contrib.slim
url = 'http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz'
checkpoints_dir = '/home/mscvproject/users/yumao/humanRecog/HumanRecognition/pretrained_model_upperbody'
checkpoint_name = 'model.ckpt-104956'
original_variable_namescope = 'InceptionV3'
feature_length = 1024
num_identity = 1409
image_size = inception.inception_v3.default_image_size
def build_network(batch_size, is_training):
# input
tf_raw_image_data = tf.placeholder(tf.string, shape=(batch_size,))
tf_body_bbox = tf.placeholder(tf.int32, shape=(batch_size, 4))
tf_labels = tf.placeholder(tf.int32, shape=(batch_size,))
# pre-processing pipeline
crops = []
for i in range(batch_size):
image = tf.image.decode_jpeg(tf_raw_image_data[i], channels=3)
body_crop = tf.image.crop_to_bounding_box(image, tf_body_bbox[i, 1], tf_body_bbox[i, 0], tf_body_bbox[i, 3],
tf_body_bbox[i, 2])
processed_crop = inception_preprocessing.preprocess_image(body_crop, image_size, image_size,
is_training=is_training)
crops.append(processed_crop)
processed_images = tf.stack(crops)
# training pipeline
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, endpoints = inception.inception_v3(processed_images, num_classes=num_identity, is_training=is_training)
# load model parameters
init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, checkpoint_name),
slim.get_model_variables(original_variable_namescope))
net_before_pool = tf.reshape(endpoints['Mixed_7c'], shape=(batch_size, -1))
net_before_pool_frozen = tf.stop_gradient(net_before_pool)
tf_features = slim.fully_connected(net_before_pool_frozen, feature_length, activation_fn=None)
tf_features_normalized = tf.nn.l2_normalize(tf_features, dim=1)
tf_loss = coco_loss_layer(tf_features_normalized, tf_labels, batch_size)
# optimizer
tf_lr = tf.placeholder(dtype=tf.float32, shape=(), name='learning_rate')
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(tf_loss)
# summary
tf.summary.scalar('coco_loss', tf_loss)
summary_op = tf.summary.merge_all()
return (tf_raw_image_data, tf_body_bbox, tf_labels), (init_fn, tf_loss, tf_lr, train, summary_op), tf_features
def download_pretrained_model():
pass
|
11455521
|
import pytest
import numpy as np
import os.path
import sofa
import scipy.io.wavfile as wavfile
from pyfar.samplings import SphericalVoronoi
from pyfar import Orientations
from pyfar import Coordinates
from pyfar import FrequencyData, TimeData
import pyfar.classes.filter as fo
import pyfar.signals
from pyfar.testing import stub_utils
@pytest.fixture
def sine_stub():
"""Sine signal stub.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of sine signal
"""
frequency = 441
sampling_rate = 44100
n_samples = 10000
fft_norm = 'rms'
cshape = (1,)
time, freq, frequency = stub_utils.sine_func(
frequency, sampling_rate, n_samples, fft_norm, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def sine_stub_odd():
"""Sine signal stub, odd number of samples
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of sine signal
"""
frequency = 441
sampling_rate = 44100
n_samples = 9999
fft_norm = 'rms'
cshape = (1,)
time, freq, frequency = stub_utils.sine_func(
frequency, sampling_rate, n_samples, fft_norm, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def impulse_stub():
"""Delta impulse signal stub.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of impulse signal
"""
delay = 0
sampling_rate = 44100
n_samples = 10000
fft_norm = 'none'
cshape = (1,)
time, freq = stub_utils.impulse_func(
delay, n_samples, fft_norm, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def noise_stub():
"""Gaussian white noise signal stub.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of noise signal
"""
sigma = 1
n_samples = int(1e5)
cshape = (1,)
sampling_rate = 44100
fft_norm = 'rms'
time, freq = stub_utils.noise_func(sigma, n_samples, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def noise_stub_odd():
"""Gaussian white noise signal stub, odd number of samples.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of noise signal
"""
sigma = 1
n_samples = int(1e5 - 1)
cshape = (1,)
sampling_rate = 44100
fft_norm = 'rms'
time, freq = stub_utils.noise_func(sigma, n_samples, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def sine():
"""Sine signal.
Returns
-------
signal : Signal
Sine signal
"""
frequency = 441
n_samples = 10000
sampling_rate = 44100
amplitude = 1
signal = pyfar.signals.sine(
frequency, n_samples, amplitude=amplitude,
sampling_rate=sampling_rate)
return signal
@pytest.fixture
def sine_short():
"""Short sine signal where the first frequency is > 20 Hz.
This is used for testing plot._line._lower_frequency_limit.
Returns
-------
signal : Signal
Sine signal
"""
frequency = 441
n_samples = 100
sampling_rate = 44100
amplitude = 1
signal = pyfar.signals.sine(
frequency, n_samples, amplitude=amplitude,
sampling_rate=sampling_rate)
return signal
@pytest.fixture
def impulse():
"""Delta impulse signal.
Returns
-------
signal : Signal
Impulse signal
"""
n_samples = 10000
delay = 0
amplitude = 1
sampling_rate = 44100
signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
return signal
@pytest.fixture
def impulse_group_delay():
"""Delayed delta impulse signal with analytical group delay.
Returns
-------
signal : Signal
Impulse signal
group_delay : ndarray
Group delay of impulse signal
"""
n_samples = 10000
delay = 0
amplitude = 1
sampling_rate = 44100
signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
group_delay = delay * np.ones_like(signal.freq, dtype=float)
return signal, group_delay
@pytest.fixture
def impulse_group_delay_two_channel():
"""Delayed 2 channel delta impulse signal with analytical group delay.
Returns
-------
signal : Signal
Impulse signal
group_delay : ndarray
Group delay of impulse signal
"""
n_samples = 10000
delay = np.atleast_1d([1000, 2000])
amplitude = np.atleast_1d([1, 1])
sampling_rate = 44100
signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
group_delay = delay[..., np.newaxis] * np.ones_like(
signal.freq, dtype=float)
return signal, group_delay
@pytest.fixture
def impulse_group_delay_two_by_two_channel():
"""Delayed 2-by-2 channel delta impulse signal with analytical group delay.
Returns
-------
signal : Signal
Impulse signal
group_delay : ndarray
Group delay of impulse signal
"""
n_samples = 10000
delay = np.array([[1000, 2000], [3000, 4000]])
amplitude = np.atleast_1d([[1, 1], [1, 1]])
sampling_rate = 44100
signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
group_delay = delay[..., np.newaxis] * np.ones_like(
signal.freq, dtype=float)
return signal, group_delay
@pytest.fixture
def sine_plus_impulse():
"""Added sine and delta impulse signals.
Returns
-------
signal : Signal
Combined signal
"""
frequency = 441
delay = 100
n_samples = 10000
sampling_rate = 44100
amplitude = 1
sine_signal = pyfar.signals.sine(
frequency, n_samples, amplitude=amplitude,
sampling_rate=sampling_rate)
sine_signal.fft_norm = 'none'
impulse_signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
signal = sine_signal + impulse_signal
return signal
@pytest.fixture
def noise():
"""Gaussian white noise signal.
Returns
-------
signal : Signal
Noise signal
"""
n_samples = 10000
rms = 1
sampling_rate = 44100
seed = 1234
signal = pyfar.signals.noise(
n_samples, spectrum="white", rms=rms, sampling_rate=sampling_rate,
seed=seed)
return signal
@pytest.fixture
def noise_two_by_three_channel():
""" 2-by-3 channel gaussian white noise signal.
Returns
-------
signal : Signal
Noise signal
"""
n_samples = 10000
rms = np.ones((2, 3))
sampling_rate = 44100
seed = 1234
signal = pyfar.signals.noise(
n_samples, spectrum="white", rms=rms, sampling_rate=sampling_rate,
seed=seed)
return signal
@pytest.fixture
def time_data():
"""
TimeData object with three data points.
Returns
-------
time_data TimeData
Data
"""
time_data = TimeData([1, 0, -1], [0, .1, .4])
return time_data
@pytest.fixture
def frequency_data():
"""
FrequencyData object with three data points.
Returns
-------
frequency_data FrequencyData
Data
"""
frequency_data = FrequencyData([2, .25, .5], [100, 1000, 20000])
return frequency_data
@pytest.fixture
def frequency_data_one_point():
"""
FrequencyData object with one data point.
Returns
-------
frequency_data FrequencyData
Data
"""
frequency_data = FrequencyData([2], [0])
return frequency_data
@pytest.fixture
def fft_lib_np(monkeypatch):
"""Set numpy.fft as fft library.
"""
import pyfar.dsp.fft
monkeypatch.setattr(pyfar.dsp.fft, 'fft_lib', np.fft)
return np.fft.__name__
@pytest.fixture
def fft_lib_pyfftw(monkeypatch):
"""Set pyfftw as fft library.
"""
import pyfar.dsp.fft
from pyfftw.interfaces import numpy_fft as npi_fft
monkeypatch.setattr(pyfar.dsp.fft, 'fft_lib', npi_fft)
return npi_fft.__name__
@pytest.fixture
def generate_wav_file(tmpdir, noise):
"""Create wav file in temporary folder.
"""
filename = os.path.join(tmpdir, 'test_wav.wav')
wavfile.write(filename, noise.sampling_rate, noise.time.T)
return filename
@pytest.fixture
def sofa_reference_coordinates(noise_two_by_three_channel):
"""Define coordinates to write in reference files.
"""
n_measurements = noise_two_by_three_channel.cshape[0]
n_receivers = noise_two_by_three_channel.cshape[1]
source_coordinates = np.random.rand(n_measurements, 3)
receiver_coordinates = np.random.rand(n_receivers, n_measurements, 3)
return source_coordinates, receiver_coordinates
@pytest.fixture
def generate_sofa_GeneralFIR(
tmpdir, noise_two_by_three_channel, sofa_reference_coordinates):
""" Generate the reference sofa files of type GeneralFIR.
"""
sofatype = 'GeneralFIR'
n_measurements = noise_two_by_three_channel.cshape[0]
n_receivers = noise_two_by_three_channel.cshape[1]
n_samples = noise_two_by_three_channel.n_samples
dimensions = {"M": n_measurements, "R": n_receivers, "N": n_samples}
filename = os.path.join(tmpdir, (sofatype + '.sofa'))
sofafile = sofa.Database.create(filename, sofatype, dimensions=dimensions)
sofafile.Listener.initialize(fixed=["Position", "View", "Up"])
sofafile.Source.initialize(variances=["Position"], fixed=["View", "Up"])
sofafile.Source.Position.set_values(sofa_reference_coordinates[0])
sofafile.Receiver.initialize(variances=["Position"], fixed=["View", "Up"])
r_coords = np.transpose(sofa_reference_coordinates[1], (0, 2, 1))
sofafile.Receiver.Position.set_values(r_coords)
sofafile.Emitter.initialize(fixed=["Position", "View", "Up"], count=1)
sofafile.Data.Type = 'FIR'
sofafile.Data.initialize()
sofafile.Data.IR = noise_two_by_three_channel.time
sofafile.Data.SamplingRate = noise_two_by_three_channel.sampling_rate
sofafile.close()
return filename
@pytest.fixture
def generate_sofa_GeneralTF(
tmpdir, noise_two_by_three_channel, sofa_reference_coordinates):
""" Generate the reference sofa files of type GeneralTF.
"""
sofatype = 'GeneralTF'
n_measurements = noise_two_by_three_channel.cshape[0]
n_receivers = noise_two_by_three_channel.cshape[1]
n_bins = noise_two_by_three_channel.n_bins
dimensions = {"M": n_measurements, "R": n_receivers, "N": n_bins}
filename = os.path.join(tmpdir, (sofatype + '.sofa'))
sofafile = sofa.Database.create(filename, sofatype, dimensions=dimensions)
sofafile.Listener.initialize(fixed=["Position", "View", "Up"])
sofafile.Source.initialize(variances=["Position"], fixed=["View", "Up"])
sofafile.Source.Position.set_values(sofa_reference_coordinates[0])
sofafile.Receiver.initialize(variances=["Position"], fixed=["View", "Up"])
r_coords = np.transpose(sofa_reference_coordinates[1], (0, 2, 1))
sofafile.Receiver.Position.set_values(r_coords)
sofafile.Emitter.initialize(fixed=["Position", "View", "Up"], count=1)
sofafile.Data.Type = 'TF'
sofafile.Data.initialize()
sofafile.Data.Real.set_values(np.real(noise_two_by_three_channel.freq))
sofafile.Data.Imag.set_values(np.imag(noise_two_by_three_channel.freq))
sofafile.close()
return filename
@pytest.fixture
def generate_sofa_postype_spherical(
tmpdir, noise_two_by_three_channel, sofa_reference_coordinates):
""" Generate the reference sofa files of type GeneralFIR,
spherical position type.
"""
sofatype = 'GeneralFIR'
n_measurements = noise_two_by_three_channel.cshape[0]
n_receivers = noise_two_by_three_channel.cshape[1]
n_samples = noise_two_by_three_channel.n_samples
dimensions = {"M": n_measurements, "R": n_receivers, "N": n_samples}
filename = os.path.join(tmpdir, (sofatype + '.sofa'))
sofafile = sofa.Database.create(filename, sofatype, dimensions=dimensions)
sofafile.Listener.initialize(fixed=["Position", "View", "Up"])
sofafile.Source.initialize(
variances=["Position"], fixed=["View", "Up"])
sofafile.Source.Position.set_system('spherical')
sofafile.Source.Position.set_values(sofa_reference_coordinates[0])
sofafile.Receiver.initialize(
variances=["Position"], fixed=["View", "Up"])
sofafile.Receiver.Position.set_system('spherical')
r_coords = np.transpose(sofa_reference_coordinates[1], (0, 2, 1))
sofafile.Receiver.Position.set_values(r_coords)
sofafile.Emitter.initialize(fixed=["Position", "View", "Up"], count=1)
sofafile.Data.Type = 'FIR'
sofafile.Data.initialize()
sofafile.Data.IR = noise_two_by_three_channel.time
sofafile.Data.SamplingRate = noise_two_by_three_channel.sampling_rate
sofafile.close()
return filename
@pytest.fixture
def generate_sofa_unit_error(
tmpdir, noise_two_by_three_channel, sofa_reference_coordinates):
""" Generate the reference sofa files of type GeneralFIR
with incorrect sampling rate unit.
"""
sofatype = 'GeneralFIR'
n_measurements = noise_two_by_three_channel.cshape[0]
n_receivers = noise_two_by_three_channel.cshape[1]
n_samples = noise_two_by_three_channel.n_samples
dimensions = {"M": n_measurements, "R": n_receivers, "N": n_samples}
filename = os.path.join(tmpdir, (sofatype + '.sofa'))
sofafile = sofa.Database.create(filename, sofatype, dimensions=dimensions)
sofafile.Listener.initialize(fixed=["Position", "View", "Up"])
sofafile.Source.initialize(variances=["Position"], fixed=["View", "Up"])
sofafile.Source.Position.set_values(sofa_reference_coordinates[0])
sofafile.Receiver.initialize(variances=["Position"], fixed=["View", "Up"])
r_coords = np.transpose(sofa_reference_coordinates[1], (0, 2, 1))
sofafile.Receiver.Position.set_values(r_coords)
sofafile.Emitter.initialize(fixed=["Position", "View", "Up"], count=1)
sofafile.Data.Type = 'FIR'
sofafile.Data.initialize()
sofafile.Data.IR = noise_two_by_three_channel.time
sofafile.Data.SamplingRate = noise_two_by_three_channel.sampling_rate
sofafile.Data.SamplingRate.Units = 'not_hertz'
sofafile.close()
return filename
@pytest.fixture
def generate_sofa_postype_error(
tmpdir, noise_two_by_three_channel, sofa_reference_coordinates):
""" Generate the reference sofa files of type GeneralFIR
with incorrect position type.
"""
sofatype = 'GeneralFIR'
n_measurements = noise_two_by_three_channel.cshape[0]
n_receivers = noise_two_by_three_channel.cshape[1]
n_samples = noise_two_by_three_channel.n_samples
dimensions = {"M": n_measurements, "R": n_receivers, "N": n_samples}
filename = os.path.join(tmpdir, (sofatype + '.sofa'))
sofafile = sofa.Database.create(filename, sofatype, dimensions=dimensions)
sofafile.Listener.initialize(fixed=["Position", "View", "Up"])
sofafile.Source.initialize(variances=["Position"], fixed=["View", "Up"])
sofafile.Source.Position.set_values(sofa_reference_coordinates[0])
sofafile.Receiver.initialize(variances=["Position"], fixed=["View", "Up"])
r_coords = np.transpose(sofa_reference_coordinates[1], (0, 2, 1))
sofafile.Receiver.Position.set_values(r_coords)
sofafile.Emitter.initialize(fixed=["Position", "View", "Up"], count=1)
sofafile.Data.Type = 'FIR'
sofafile.Data.initialize()
sofafile.Data.IR = noise_two_by_three_channel.time
sofafile.Data.SamplingRate = noise_two_by_three_channel.sampling_rate
sofafile.Source.Position.Type = 'wrong_type'
sofafile.close()
return filename
@pytest.fixture
def views():
""" Used for the creation of Orientation objects with
`Orientations.from_view_up`
"""
return [[1, 0, 0], [2, 0, 0], [-1, 0, 0]]
@pytest.fixture
def ups():
""" Used for the creation of Orientation objects with
`Orientations.from_view_up`
"""
return [[0, 1, 0], [0, -2, 0], [0, 1, 0]]
@pytest.fixture
def positions():
""" Used for the visualization of Orientation objects with
`Orientations.show`
"""
return [[0, 0.5, 0], [0, -0.5, 0], [1, 1, 1]]
@pytest.fixture
def orientations(views, ups):
""" Orientations object uses fixtures `views` and `ups`.
"""
return Orientations.from_view_up(views, ups)
@pytest.fixture
def coordinates():
""" Coordinates object.
"""
return Coordinates([0, 1], [2, 3], [4, 5])
@pytest.fixture
def coeffs():
return np.array([[[1, 0, 0], [1, 0, 0]]])
@pytest.fixture
def state():
return np.array([[[1, 0]]])
@pytest.fixture
def filter(coeffs, state):
""" Filter object.
"""
return fo.Filter(coefficients=coeffs, state=state)
@pytest.fixture
def filterFIR():
""" FilterFIR objectr.
"""
coeff = np.array([
[1, 1 / 2, 0],
[1, 1 / 4, 1 / 8]])
return fo.FilterFIR(coeff, sampling_rate=2*np.pi)
@pytest.fixture
def filterIIR():
""" FilterIIR object.
"""
coeff = np.array([[1, 1 / 2, 0], [1, 0, 0]])
return fo.FilterIIR(coeff, sampling_rate=2 * np.pi)
@pytest.fixture
def filterSOS():
""" FilterSOS objectr.
"""
sos = np.array([[1, 1 / 2, 0, 1, 0, 0]])
return fo.FilterSOS(sos, sampling_rate=2 * np.pi)
@pytest.fixture
def sphericalvoronoi():
""" SphericalVoronoi object.
"""
points = np.array(
[[0, 0, 1], [0, 0, -1], [1, 0, 0], [0, 1, 0], [0, -1, 0], [-1, 0, 0]])
sampling = Coordinates(points[:, 0], points[:, 1], points[:, 2])
return SphericalVoronoi(sampling)
@pytest.fixture
def any_obj():
""" Any object acting as placeholder for non-PyFar-objects.
"""
return stub_utils.AnyClass()
@pytest.fixture
def no_encode_obj():
""" Any object acting as placeholder for non-PyFar-objects.
"""
return stub_utils.NoEncodeClass()
@pytest.fixture
def no_decode_obj():
""" Any object acting as placeholder for non-PyFar-objects.
"""
return stub_utils.NoDecodeClass()
@pytest.fixture
def flat_data():
""" Class being primarily used as a subclass of the nested data object.
"""
return stub_utils.FlatData()
@pytest.fixture
def nested_data():
""" General nested data structure primarily used to illustrate mechanism of
`io.write` and `io.read`.
"""
return stub_utils.NestedData.create()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.