hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f726d15285b7c2f6ec452d3585c75aaddfd2bc1d | 594 | py | Python | examples/vagrant_todo/provision/recipes/project.py | avladev/pypro | 7eb98c5ebd9830104689d105c36424b24c72b475 | [
"MIT"
] | null | null | null | examples/vagrant_todo/provision/recipes/project.py | avladev/pypro | 7eb98c5ebd9830104689d105c36424b24c72b475 | [
"MIT"
] | null | null | null | examples/vagrant_todo/provision/recipes/project.py | avladev/pypro | 7eb98c5ebd9830104689d105c36424b24c72b475 | [
"MIT"
] | 1 | 2019-07-15T21:35:03.000Z | 2019-07-15T21:35:03.000Z | import pypro.core
import os
class CreateConfig(pypro.core.Recipe):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def run(self, runner, arguments=None):
# Read the template file
content = ''
with open(self.source, 'r') as f:
content = f.read(os.path.getsize(self.source))
# Replace notations with actual values
content = pypro.core.Variables.replace(content)
# Write the config file
with open(self.destination, 'w') as f:
f.write(content) | 27 | 58 | 0.622896 | import pypro.core
import os
class CreateConfig(pypro.core.Recipe):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def run(self, runner, arguments=None):
content = ''
with open(self.source, 'r') as f:
content = f.read(os.path.getsize(self.source))
content = pypro.core.Variables.replace(content)
with open(self.destination, 'w') as f:
f.write(content) | true | true |
f726d1ac18979248f061387ecccea5858da651fb | 974 | py | Python | for python/data/ggiramahor/pframe.py | aerolalit/Auto-Testing-Python-Programs | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | 4 | 2019-10-03T21:16:51.000Z | 2019-10-04T01:28:08.000Z | for python/data/ggiramahor/pframe.py | aerolalit/Auto-Testing | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | null | null | null | for python/data/ggiramahor/pframe.py | aerolalit/Auto-Testing | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | null | null | null | #350111
#a3-p10.py
#Gloria Giramahoro
#g.giramahoro@jacobs-university.de
#1.defining a function that prints a rectangle made of a character
def print_frame(n,m,c):
count = 1
if (n >= m):
product1 = n*c
print (product1)
for count in range(1,m-1):
words1 = str(' ')
words = (n-4)*words1
print (c,words,c)
count = count+1
print (product1)
else :
product2 = m*c
print (product2)
for count in range(1,n-1):
words2 = str(' ')
words = (m-4)*words2
print (c,words,c)
count = count+1
print (product2)
#2.inputing 2 integers n and m and a character c
print("enter an integer value of n")
integer1 = input()
n = 4
print("enter an integer value of m")
integer2 = input()
m = 7
print("enter a character value of c")
character = input()
c = '$'
print_frame(n,m,c)
| 22.651163 | 67 | 0.532854 |
def print_frame(n,m,c):
count = 1
if (n >= m):
product1 = n*c
print (product1)
for count in range(1,m-1):
words1 = str(' ')
words = (n-4)*words1
print (c,words,c)
count = count+1
print (product1)
else :
product2 = m*c
print (product2)
for count in range(1,n-1):
words2 = str(' ')
words = (m-4)*words2
print (c,words,c)
count = count+1
print (product2)
print("enter an integer value of n")
integer1 = input()
n = 4
print("enter an integer value of m")
integer2 = input()
m = 7
print("enter a character value of c")
character = input()
c = '$'
print_frame(n,m,c)
| true | true |
f726d2195174ef150cf9c6dca642b46141ce4e9e | 13,720 | py | Python | demystifying/feature_extraction/mlp_feature_extractor.py | delemottelab/demystifying | e8527b52d5fbe0570cd391921ecda5aefceb797a | [
"MIT"
] | 16 | 2020-01-04T14:46:03.000Z | 2021-07-10T05:54:05.000Z | demystifying/feature_extraction/mlp_feature_extractor.py | delemottelab/demystifying | e8527b52d5fbe0570cd391921ecda5aefceb797a | [
"MIT"
] | 11 | 2020-01-10T16:18:17.000Z | 2022-03-20T09:53:33.000Z | demystifying/feature_extraction/mlp_feature_extractor.py | delemottelab/demystifying | e8527b52d5fbe0570cd391921ecda5aefceb797a | [
"MIT"
] | 3 | 2020-03-16T04:35:01.000Z | 2022-02-10T12:39:01.000Z | from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from .. import relevance_propagation as relprop
from .feature_extractor import FeatureExtractor
from ..postprocessing import PerFrameImportancePostProcessor
logger = logging.getLogger("mlp")
class MlpFeatureExtractor(FeatureExtractor):
def __init__(self,
name="MLP",
activation=relprop.relu,
randomize=True,
supervised=True,
one_vs_rest=False,
per_frame_importance_outfile=None,
per_frame_importance_samples=None,
per_frame_importance_labels=None,
classifier_kwargs={},
**kwargs):
FeatureExtractor.__init__(self,
name=name,
supervised=supervised,
**kwargs)
self.backend = "scikit-learn" # Only available option for now, more to come probably
if activation not in [relprop.relu, relprop.logistic_sigmoid]:
Exception("Relevance propagation currently only supported for relu or logistic")
self.activation = activation
self.randomize = randomize
self.classifier_kwargs = classifier_kwargs.copy()
if classifier_kwargs.get('activation', None) is not None and \
classifier_kwargs.get('activation') != self.activation:
logger.warn("Conflicting activation properiies. '%s' will be overwritten with '%s'",
classifier_kwargs.get('activation'),
self.activation)
self.classifier_kwargs['activation'] = self.activation
if not self.randomize:
self.classifier_kwargs['random_state'] = 89274
self.frame_importances = None
self.per_frame_importance_outfile = per_frame_importance_outfile
self.per_frame_importance_samples = per_frame_importance_samples
self.per_frame_importance_labels = per_frame_importance_labels
if self.use_regression:
self.one_vs_rest = False
else:
self.one_vs_rest = one_vs_rest
logger.debug("Initializing MLP with the following parameters:"
" activation function %s, randomize %s, classifier_kwargs %s,"
" per_frame_importance_outfile %s, backend %s, per_frame_importance_samples %s, one_vs_rest %s",
activation, randomize, classifier_kwargs, per_frame_importance_outfile, self.backend,
None if per_frame_importance_samples is None else per_frame_importance_samples.shape,
self.one_vs_rest)
def _train_one_vs_rest(self, data, labels):
n_clusters = labels.shape[1]
n_points = data.shape[0]
classifiers = []
for i_cluster in range(n_clusters):
classifiers.append(self._create_classifier())
binary_labels = np.zeros((n_points, 2))
binary_labels[labels[:, i_cluster] == 1, 0] = 1
binary_labels[labels[:, i_cluster] != 1, 1] = 1
classifiers[i_cluster].fit(data, binary_labels)
return classifiers
def train(self, train_set, train_labels):
"""
TODO code duplication below for on_vs_the_rest logic, refactor with KL and RF into common superclass
:param train_set:
:param train_labels:
:return:
"""
# Construct and train classifier
logger.debug("Training %s with %s samples and %s features ...", self.name, train_set.shape[0],
train_set.shape[1])
if self.one_vs_rest:
return self._train_one_vs_rest(train_set, train_labels)
else:
classifier = self._create_classifier()
classifier.fit(train_set, train_labels)
return classifier
def _normalize_relevance_per_frame(self, relevance_per_frame):
for i in range(relevance_per_frame.shape[0]):
# Not removing negative relevance in per frame analysis
# ind_negative = np.where(relevance_per_frame[i, :] < 0)[0]
# relevance_per_frame[i, ind_negative] = 0
relevance_per_frame[i, :] = (relevance_per_frame[i, :] - np.min(relevance_per_frame[i, :])) / \
(np.max(relevance_per_frame[i, :]) - np.min(relevance_per_frame[i, :]) + 1e-9)
return relevance_per_frame
def _perform_lrp(self, classifier, data, labels):
nclusters = labels.shape[1] if self.supervised else 1
nfeatures = data.shape[1]
relevance_per_cluster = np.zeros((nfeatures, nclusters))
per_frame_relevance = np.zeros(data.shape)
for c_idx in range(nclusters):
# Get all frames belonging to a cluster
if self.supervised:
frame_indices = labels[:, c_idx] == 1
cluster_data = data[frame_indices]
cluster_labels = np.zeros((len(cluster_data), nclusters))
cluster_labels[:, c_idx] = 1 # Only look at one class at the time
else:
# TODO refactor to break unsupervised code out of here. Unsupervised method have no concept of clusters/labels
cluster_labels = labels
frame_indices = [i for i in range(len(data))]
cluster_data = data
if len(cluster_data) == 0:
continue
# Now see what makes these frames belong to that class
# Time for LRP
layers = self._create_layers(classifier)
propagator = relprop.RelevancePropagator(layers)
cluster_frame_relevance = propagator.propagate(cluster_data, cluster_labels)
# Rescale relevance according to min and max relevance in each frame
cluster_frame_relevance = self._normalize_relevance_per_frame(cluster_frame_relevance)
relevance_per_cluster[:, c_idx] = cluster_frame_relevance.mean(axis=0)
per_frame_relevance[frame_indices] += cluster_frame_relevance
per_frame_relevance = self._normalize_relevance_per_frame(per_frame_relevance)
return per_frame_relevance, relevance_per_cluster
def get_feature_importance(self, classifier, data, labels):
logger.debug("Extracting feature importance using MLP ...")
if self.one_vs_rest:
return self._get_feature_importance_binaryclass(classifier, data, labels)
else:
return self._get_feature_importance_multiclass(classifier, data, labels)
def _get_feature_importance_binaryclass(self, classifiers, data, labels):
n_features = data.shape[1]
n_frames = data.shape[0]
n_states = labels.shape[1] if len(labels.shape) > 1 else 1
feature_importances = np.zeros((n_features, self.n_clusters))
for i_cluster in range(n_states):
# TODO a bit inefficent approach below where we consistenly compute LRP for all other clusters and don't use those results.
cluster_frames = labels[:, i_cluster] == 1
binary_labels = np.zeros((n_frames, 2))
binary_labels[cluster_frames, 0] = 1
binary_labels[~cluster_frames, 1] = 1
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifiers[i_cluster], data, binary_labels)
feature_importances[:, i_cluster] = relevance_per_cluster[:, 0]
if self.per_frame_importance_outfile is not None:
cluster_frame_importances, other_labels = self._compute_frame_relevance(classifiers[i_cluster],
relevance_per_frame,
data,
labels)
if self.frame_importances is None:
self.frame_importances = np.zeros((len(other_labels), cluster_frame_importances.shape[1]))
other_cluster_frames = other_labels[:, 0] == 1
if len(other_labels[other_cluster_frames]) == 0:
# No frames in this state, just move on
continue
nclusters_per_frame = other_labels[other_cluster_frames].sum(axis=1)[:, np.newaxis]
self.frame_importances[other_cluster_frames, :] += cluster_frame_importances[
other_cluster_frames] / nclusters_per_frame
return feature_importances
def _get_feature_importance_multiclass(self, classifier, data, labels):
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifier, data, labels)
if self.per_frame_importance_outfile is not None:
frame_importances, _ = self._compute_frame_relevance(classifier, relevance_per_frame, data, labels)
self.frame_importances = frame_importances if self.frame_importances is None else self.frame_importances + frame_importances
return relevance_per_cluster
def _compute_frame_relevance(self, classifier, relevance_per_frame, data, labels):
if self.per_frame_importance_samples is not None:
if self.indices_for_filtering is None:
other_samples = self.per_frame_importance_samples
else:
other_samples = self.per_frame_importance_samples[:, self.indices_for_filtering]
if self.per_frame_importance_labels is None:
other_labels = classifier.predict(other_samples)
else:
other_labels = self.per_frame_importance_labels
other_samples = self.scaler.transform(other_samples)
frame_relevance, _ = self._perform_lrp(classifier, other_samples, other_labels)
else:
logger.info("Using same trajectory for per frame importance as was used for training.")
if self.n_splits != 1:
logger.error(
"Cannot average frame importance to outfile if n_splits != 1. n_splits is now set to %s",
self.n_splits)
if self.shuffle_datasets:
logger.error("Data set has been shuffled, per frame importance will not be properly mapped")
frame_relevance = relevance_per_frame
other_labels = labels
# for every feature in every frame...
frame_importances = np.zeros(
(data if self.per_frame_importance_samples is None else self.per_frame_importance_samples).shape) - 1
if self.indices_for_filtering is not None:
frame_importances[:, self.indices_for_filtering] = 0
niters = self.n_iterations * self.n_splits
for frame_idx, rel in enumerate(frame_relevance):
if self.indices_for_filtering is None:
frame_importances[frame_idx] += rel / niters
else:
frame_importances[frame_idx, self.indices_for_filtering] += rel / niters
return frame_importances, other_labels
def _create_layers(self, classifier):
weights = classifier.coefs_
biases = classifier.intercepts_
layers = []
for idx, weight in enumerate(weights):
if idx == 0:
l = relprop.FirstLinear(min_val=0, max_val=1, weight=weight, bias=biases[idx])
else:
l = relprop.layer_for_string(self.activation, weight=weight, bias=biases[idx])
if l is None:
raise Exception(
"Cannot create layer at index {} for activation function {}".format(idx, self.activation))
layers.append(l)
if idx < len(weights) - 1:
# Add activation to all except output layer
activation = relprop.layer_activation_for_string(self.activation)
if activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(activation)
else:
if self.backend == 'scikit-learn':
# For scikit implementation see # https://stats.stackexchange.com/questions/243588/how-to-apply-softmax-as-activation-function-in-multi-layer-perceptron-in-scikit
# or https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/multilayer_perceptron.py
out_activation = relprop.layer_activation_for_string(classifier.out_activation_)
if out_activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(out_activation)
else:
raise Exception("Unsupported MLP backend {}".format(self.backend))
return layers
def _create_classifier(self):
return MLPRegressor(**self.classifier_kwargs) if self.use_regression \
else MLPClassifier(**self.classifier_kwargs)
def postprocessing(self, **kwargs):
return PerFrameImportancePostProcessor(extractor=self,
per_frame_importance_outfile=self.per_frame_importance_outfile,
frame_importances=self.frame_importances,
**kwargs)
| 52.769231 | 183 | 0.624344 | from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from .. import relevance_propagation as relprop
from .feature_extractor import FeatureExtractor
from ..postprocessing import PerFrameImportancePostProcessor
logger = logging.getLogger("mlp")
class MlpFeatureExtractor(FeatureExtractor):
def __init__(self,
name="MLP",
activation=relprop.relu,
randomize=True,
supervised=True,
one_vs_rest=False,
per_frame_importance_outfile=None,
per_frame_importance_samples=None,
per_frame_importance_labels=None,
classifier_kwargs={},
**kwargs):
FeatureExtractor.__init__(self,
name=name,
supervised=supervised,
**kwargs)
self.backend = "scikit-learn"
if activation not in [relprop.relu, relprop.logistic_sigmoid]:
Exception("Relevance propagation currently only supported for relu or logistic")
self.activation = activation
self.randomize = randomize
self.classifier_kwargs = classifier_kwargs.copy()
if classifier_kwargs.get('activation', None) is not None and \
classifier_kwargs.get('activation') != self.activation:
logger.warn("Conflicting activation properiies. '%s' will be overwritten with '%s'",
classifier_kwargs.get('activation'),
self.activation)
self.classifier_kwargs['activation'] = self.activation
if not self.randomize:
self.classifier_kwargs['random_state'] = 89274
self.frame_importances = None
self.per_frame_importance_outfile = per_frame_importance_outfile
self.per_frame_importance_samples = per_frame_importance_samples
self.per_frame_importance_labels = per_frame_importance_labels
if self.use_regression:
self.one_vs_rest = False
else:
self.one_vs_rest = one_vs_rest
logger.debug("Initializing MLP with the following parameters:"
" activation function %s, randomize %s, classifier_kwargs %s,"
" per_frame_importance_outfile %s, backend %s, per_frame_importance_samples %s, one_vs_rest %s",
activation, randomize, classifier_kwargs, per_frame_importance_outfile, self.backend,
None if per_frame_importance_samples is None else per_frame_importance_samples.shape,
self.one_vs_rest)
def _train_one_vs_rest(self, data, labels):
n_clusters = labels.shape[1]
n_points = data.shape[0]
classifiers = []
for i_cluster in range(n_clusters):
classifiers.append(self._create_classifier())
binary_labels = np.zeros((n_points, 2))
binary_labels[labels[:, i_cluster] == 1, 0] = 1
binary_labels[labels[:, i_cluster] != 1, 1] = 1
classifiers[i_cluster].fit(data, binary_labels)
return classifiers
def train(self, train_set, train_labels):
logger.debug("Training %s with %s samples and %s features ...", self.name, train_set.shape[0],
train_set.shape[1])
if self.one_vs_rest:
return self._train_one_vs_rest(train_set, train_labels)
else:
classifier = self._create_classifier()
classifier.fit(train_set, train_labels)
return classifier
def _normalize_relevance_per_frame(self, relevance_per_frame):
for i in range(relevance_per_frame.shape[0]):
relevance_per_frame[i, :] = (relevance_per_frame[i, :] - np.min(relevance_per_frame[i, :])) / \
(np.max(relevance_per_frame[i, :]) - np.min(relevance_per_frame[i, :]) + 1e-9)
return relevance_per_frame
def _perform_lrp(self, classifier, data, labels):
nclusters = labels.shape[1] if self.supervised else 1
nfeatures = data.shape[1]
relevance_per_cluster = np.zeros((nfeatures, nclusters))
per_frame_relevance = np.zeros(data.shape)
for c_idx in range(nclusters):
if self.supervised:
frame_indices = labels[:, c_idx] == 1
cluster_data = data[frame_indices]
cluster_labels = np.zeros((len(cluster_data), nclusters))
cluster_labels[:, c_idx] = 1
else:
cluster_labels = labels
frame_indices = [i for i in range(len(data))]
cluster_data = data
if len(cluster_data) == 0:
continue
layers = self._create_layers(classifier)
propagator = relprop.RelevancePropagator(layers)
cluster_frame_relevance = propagator.propagate(cluster_data, cluster_labels)
cluster_frame_relevance = self._normalize_relevance_per_frame(cluster_frame_relevance)
relevance_per_cluster[:, c_idx] = cluster_frame_relevance.mean(axis=0)
per_frame_relevance[frame_indices] += cluster_frame_relevance
per_frame_relevance = self._normalize_relevance_per_frame(per_frame_relevance)
return per_frame_relevance, relevance_per_cluster
def get_feature_importance(self, classifier, data, labels):
logger.debug("Extracting feature importance using MLP ...")
if self.one_vs_rest:
return self._get_feature_importance_binaryclass(classifier, data, labels)
else:
return self._get_feature_importance_multiclass(classifier, data, labels)
def _get_feature_importance_binaryclass(self, classifiers, data, labels):
n_features = data.shape[1]
n_frames = data.shape[0]
n_states = labels.shape[1] if len(labels.shape) > 1 else 1
feature_importances = np.zeros((n_features, self.n_clusters))
for i_cluster in range(n_states):
cluster_frames = labels[:, i_cluster] == 1
binary_labels = np.zeros((n_frames, 2))
binary_labels[cluster_frames, 0] = 1
binary_labels[~cluster_frames, 1] = 1
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifiers[i_cluster], data, binary_labels)
feature_importances[:, i_cluster] = relevance_per_cluster[:, 0]
if self.per_frame_importance_outfile is not None:
cluster_frame_importances, other_labels = self._compute_frame_relevance(classifiers[i_cluster],
relevance_per_frame,
data,
labels)
if self.frame_importances is None:
self.frame_importances = np.zeros((len(other_labels), cluster_frame_importances.shape[1]))
other_cluster_frames = other_labels[:, 0] == 1
if len(other_labels[other_cluster_frames]) == 0:
# No frames in this state, just move on
continue
nclusters_per_frame = other_labels[other_cluster_frames].sum(axis=1)[:, np.newaxis]
self.frame_importances[other_cluster_frames, :] += cluster_frame_importances[
other_cluster_frames] / nclusters_per_frame
return feature_importances
def _get_feature_importance_multiclass(self, classifier, data, labels):
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifier, data, labels)
if self.per_frame_importance_outfile is not None:
frame_importances, _ = self._compute_frame_relevance(classifier, relevance_per_frame, data, labels)
self.frame_importances = frame_importances if self.frame_importances is None else self.frame_importances + frame_importances
return relevance_per_cluster
def _compute_frame_relevance(self, classifier, relevance_per_frame, data, labels):
if self.per_frame_importance_samples is not None:
if self.indices_for_filtering is None:
other_samples = self.per_frame_importance_samples
else:
other_samples = self.per_frame_importance_samples[:, self.indices_for_filtering]
if self.per_frame_importance_labels is None:
other_labels = classifier.predict(other_samples)
else:
other_labels = self.per_frame_importance_labels
other_samples = self.scaler.transform(other_samples)
frame_relevance, _ = self._perform_lrp(classifier, other_samples, other_labels)
else:
logger.info("Using same trajectory for per frame importance as was used for training.")
if self.n_splits != 1:
logger.error(
"Cannot average frame importance to outfile if n_splits != 1. n_splits is now set to %s",
self.n_splits)
if self.shuffle_datasets:
logger.error("Data set has been shuffled, per frame importance will not be properly mapped")
frame_relevance = relevance_per_frame
other_labels = labels
# for every feature in every frame...
frame_importances = np.zeros(
(data if self.per_frame_importance_samples is None else self.per_frame_importance_samples).shape) - 1
if self.indices_for_filtering is not None:
frame_importances[:, self.indices_for_filtering] = 0
niters = self.n_iterations * self.n_splits
for frame_idx, rel in enumerate(frame_relevance):
if self.indices_for_filtering is None:
frame_importances[frame_idx] += rel / niters
else:
frame_importances[frame_idx, self.indices_for_filtering] += rel / niters
return frame_importances, other_labels
def _create_layers(self, classifier):
weights = classifier.coefs_
biases = classifier.intercepts_
layers = []
for idx, weight in enumerate(weights):
if idx == 0:
l = relprop.FirstLinear(min_val=0, max_val=1, weight=weight, bias=biases[idx])
else:
l = relprop.layer_for_string(self.activation, weight=weight, bias=biases[idx])
if l is None:
raise Exception(
"Cannot create layer at index {} for activation function {}".format(idx, self.activation))
layers.append(l)
if idx < len(weights) - 1:
# Add activation to all except output layer
activation = relprop.layer_activation_for_string(self.activation)
if activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(activation)
else:
if self.backend == 'scikit-learn':
# For scikit implementation see # https://stats.stackexchange.com/questions/243588/how-to-apply-softmax-as-activation-function-in-multi-layer-perceptron-in-scikit
# or https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/multilayer_perceptron.py
out_activation = relprop.layer_activation_for_string(classifier.out_activation_)
if out_activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(out_activation)
else:
raise Exception("Unsupported MLP backend {}".format(self.backend))
return layers
def _create_classifier(self):
return MLPRegressor(**self.classifier_kwargs) if self.use_regression \
else MLPClassifier(**self.classifier_kwargs)
def postprocessing(self, **kwargs):
return PerFrameImportancePostProcessor(extractor=self,
per_frame_importance_outfile=self.per_frame_importance_outfile,
frame_importances=self.frame_importances,
**kwargs)
| true | true |
f726d5a556fdd6e0a03f5723767785d9c1e98fa3 | 2,358 | py | Python | parallelization/collect.py | allisonChilton/Reed-Solomon | 62c367ba44940df24c7dfa23331e491f35607abc | [
"MIT"
] | null | null | null | parallelization/collect.py | allisonChilton/Reed-Solomon | 62c367ba44940df24c7dfa23331e491f35607abc | [
"MIT"
] | null | null | null | parallelization/collect.py | allisonChilton/Reed-Solomon | 62c367ba44940df24c7dfa23331e491f35607abc | [
"MIT"
] | null | null | null | import sys
import os
import subprocess
import re
import time
from dataclasses import dataclass
from typing import List
import pandas
time_reg = re.compile("Checkpoint \d: ([\d\\.]{1,})")
def run_cmd(cmd):
print(f"Running {cmd}")
proc = subprocess.run(cmd, shell=True, capture_output=True)
stdout = proc.stdout.decode()
stderr = proc.stderr.decode()
return stdout, stderr
@dataclass
class Result:
program: str
checkpoints: List[float]
threads: int
filesize: float
@property
def encoding_time(self):
return self.checkpoints[2]
@property
def decoding_time(self):
return self.checkpoints[4]
def asdict(self):
d = self.__dict__
d['encoding_time'] = self.encoding_time
d['decoding_time'] = self.decoding_time
del d['checkpoints']
return d
if __name__ == "__main__":
in_dir = "../../inputs"
inputs = sorted(os.listdir(in_dir))
program = ["mpi.sh", "baseline", "baseline-8ecc", "omp", "omp-8ecc"]
results = []
for p in program:
for i in inputs:
if "7.txt" in i and "mpi" in p:
continue
for threads in range(1,17):
if "baseline" in p and threads > 1:
break
if p == "omp":
os.environ['OMP_NUM_THREADS'] = str(threads)
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
if "mpi" in p:
for threads in [32,48,64,96]:
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
df = pandas.DataFrame([x.asdict() for x in results])
df.to_csv("results.csv")
print(df)
| 31.026316 | 83 | 0.563189 | import sys
import os
import subprocess
import re
import time
from dataclasses import dataclass
from typing import List
import pandas
time_reg = re.compile("Checkpoint \d: ([\d\\.]{1,})")
def run_cmd(cmd):
print(f"Running {cmd}")
proc = subprocess.run(cmd, shell=True, capture_output=True)
stdout = proc.stdout.decode()
stderr = proc.stderr.decode()
return stdout, stderr
@dataclass
class Result:
program: str
checkpoints: List[float]
threads: int
filesize: float
@property
def encoding_time(self):
return self.checkpoints[2]
@property
def decoding_time(self):
return self.checkpoints[4]
def asdict(self):
d = self.__dict__
d['encoding_time'] = self.encoding_time
d['decoding_time'] = self.decoding_time
del d['checkpoints']
return d
if __name__ == "__main__":
in_dir = "../../inputs"
inputs = sorted(os.listdir(in_dir))
program = ["mpi.sh", "baseline", "baseline-8ecc", "omp", "omp-8ecc"]
results = []
for p in program:
for i in inputs:
if "7.txt" in i and "mpi" in p:
continue
for threads in range(1,17):
if "baseline" in p and threads > 1:
break
if p == "omp":
os.environ['OMP_NUM_THREADS'] = str(threads)
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
if "mpi" in p:
for threads in [32,48,64,96]:
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
df = pandas.DataFrame([x.asdict() for x in results])
df.to_csv("results.csv")
print(df)
| true | true |
f726d81ab8d4dbd5bfa8f4889d90ea24f3a749f0 | 6,230 | py | Python | ckanext/reclineview/tests/test_view.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 2 | 2015-07-17T19:09:52.000Z | 2017-08-30T20:23:44.000Z | ckanext/reclineview/tests/test_view.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 12 | 2015-01-19T18:03:56.000Z | 2016-04-11T16:40:33.000Z | ckanext/reclineview/tests/test_view.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 3 | 2015-03-31T06:19:42.000Z | 2016-06-27T15:32:28.000Z | import paste.fixture
import pylons.config as config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
'''
Test can_view with acceptable formats when datastore_active is False
(DataProxy in use).
'''
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
'''
Test can_view with incorrect formats when datastore_active is False.
'''
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
| 34.804469 | 78 | 0.643499 | import paste.fixture
import pylons.config as config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
| true | true |
f726d92eda80cb6386391bf319320971dd446ebc | 3,824 | py | Python | Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py | JayWu7/Machine-Learning-Courses-Study-Record | 7586c3429514bc21c7cfe42f85ca8c0fcf8f072b | [
"Apache-2.0"
] | 1 | 2019-12-04T12:03:11.000Z | 2019-12-04T12:03:11.000Z | Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py | JayWu7/Machine-Learning-Courses-Study-Record | 7586c3429514bc21c7cfe42f85ca8c0fcf8f072b | [
"Apache-2.0"
] | null | null | null | Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py | JayWu7/Machine-Learning-Courses-Study-Record | 7586c3429514bc21c7cfe42f85ca8c0fcf8f072b | [
"Apache-2.0"
] | 1 | 2019-11-18T11:20:58.000Z | 2019-11-18T11:20:58.000Z | import numpy as np
from sklearn.cluster import KMeans
import time
from scipy.sparse.linalg import eigs
from scipy.sparse import csr_matrix
class Graph:
def __init__(self, data_name):
self.filename = data_name
self.n = None
self.k = None
self.edges = self.form_graph()
# self.e = None # number of edges
self.adj = None # adjacency list
self.lap = None
self.U = None
self.labels = None
def form_graph(self):
'''
form a graph from the .txt file
:param file: data file
:return: graph, in the shape used latter
n, k
'''
with open('./data/{}'.format(self.filename), 'r') as f:
first_line = f.readline()[:-1] # remove '\n' at the end
meta = first_line.split(' ')
yield int(meta[2]), int(meta[-1])
for i, edge in enumerate(f.readlines()):
s, t = edge[:-1].split(' ')
yield int(s), int(t)
def generate_adj(self):
'''
generate the adjacency matrix of a graph
:param graph: the edges of a graph
:param n: the number of vertices in this graph
:return: adjacency matrix
'''
a = time.time()
self.n, self.k = next(self.edges)
adj = [set() for _ in range(self.n)]
for s, t in self.edges:
adj[s].add(t)
adj[t].add(s)
b = time.time()
print('Generate adjacency matrix cost: {}s'.format(b-a))
return adj
def generate_lap(self):
'''
From adjacency matrix and diagonal matrix build Laplacian matrix
:param dia: diagonal matrix
:param adj: adjacency matrix
:return: Laplacian matrix
'''
a = time.time()
self.lap = np.ndarray((self.n, self.n))
for i, row in enumerate(self.adj):
row_dia = np.zeros(self.n)
row_dia[i] = len(row)
row_adj = [1 if j in row else 0 for j in range(self.n)]
self.lap[i] = row_dia - row_adj
x = np.linalg.norm(self.lap)
self.lap = self.lap / x
b = time.time()
print('Genearte Laplacian matrix cost: {}s'.format(b-a))
def get_U(self):
'''
Using scipy.sparse.linalg.eigs to calculate matrix U that we need for kmeans algorithm
:param lap: laplacian matrix
:param k: a number
:return: matrix U
'''
s = time.time()
self.lap = csr_matrix(self.lap)
_, first_k = eigs(self.lap, self.k, sigma=0)
U = first_k.real
# normalize U
x = np.linalg.norm(U)
U = U / x
t = time.time()
print('Generate U cost: {}s'.format(t - s))
return U
def k_means(self):
'''
Using K-means algorithm to cluster the data
:param data: n points
:param k: number of clusters
:return: clusters
'''
s = time.time()
kmeans = KMeans(n_clusters=self.k, algorithm='auto')
kmeans.fit(self.U)
t = time.time()
print('Run k-means algorithm cost: {}s'.format(t - s))
return kmeans.labels_
def write_clusters(self):
'''
return the clusters of vertices
:param labels: labels generated from kmeans method
:return: clusters
'''
with open('./result/{}_res.txt'.format(self.filename[:-4]), 'w') as f:
for i, l in enumerate(self.labels):
f.write('{} {}\n'.format(i, l))
def main(self):
self.adj = self.generate_adj()
self.generate_lap()
self.U = self.get_U()
self.labels = self.k_means()
self.write_clusters()
if __name__ == '__main__':
graph = Graph('soc-Epinions1.txt')
graph.main()
| 30.110236 | 94 | 0.537918 | import numpy as np
from sklearn.cluster import KMeans
import time
from scipy.sparse.linalg import eigs
from scipy.sparse import csr_matrix
class Graph:
def __init__(self, data_name):
self.filename = data_name
self.n = None
self.k = None
self.edges = self.form_graph()
= None
self.lap = None
self.U = None
self.labels = None
def form_graph(self):
with open('./data/{}'.format(self.filename), 'r') as f:
first_line = f.readline()[:-1]
meta = first_line.split(' ')
yield int(meta[2]), int(meta[-1])
for i, edge in enumerate(f.readlines()):
s, t = edge[:-1].split(' ')
yield int(s), int(t)
def generate_adj(self):
a = time.time()
self.n, self.k = next(self.edges)
adj = [set() for _ in range(self.n)]
for s, t in self.edges:
adj[s].add(t)
adj[t].add(s)
b = time.time()
print('Generate adjacency matrix cost: {}s'.format(b-a))
return adj
def generate_lap(self):
a = time.time()
self.lap = np.ndarray((self.n, self.n))
for i, row in enumerate(self.adj):
row_dia = np.zeros(self.n)
row_dia[i] = len(row)
row_adj = [1 if j in row else 0 for j in range(self.n)]
self.lap[i] = row_dia - row_adj
x = np.linalg.norm(self.lap)
self.lap = self.lap / x
b = time.time()
print('Genearte Laplacian matrix cost: {}s'.format(b-a))
def get_U(self):
s = time.time()
self.lap = csr_matrix(self.lap)
_, first_k = eigs(self.lap, self.k, sigma=0)
U = first_k.real
x = np.linalg.norm(U)
U = U / x
t = time.time()
print('Generate U cost: {}s'.format(t - s))
return U
def k_means(self):
s = time.time()
kmeans = KMeans(n_clusters=self.k, algorithm='auto')
kmeans.fit(self.U)
t = time.time()
print('Run k-means algorithm cost: {}s'.format(t - s))
return kmeans.labels_
def write_clusters(self):
with open('./result/{}_res.txt'.format(self.filename[:-4]), 'w') as f:
for i, l in enumerate(self.labels):
f.write('{} {}\n'.format(i, l))
def main(self):
self.adj = self.generate_adj()
self.generate_lap()
self.U = self.get_U()
self.labels = self.k_means()
self.write_clusters()
if __name__ == '__main__':
graph = Graph('soc-Epinions1.txt')
graph.main()
| true | true |
f726d9f05387af7ecf63d8618efca4e9f2591141 | 1,539 | py | Python | python/test/crawl_stocks/crawlstocks/spiders/GuchengBlockCodes.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | 2 | 2017-06-07T03:20:42.000Z | 2020-01-07T09:14:26.000Z | python/test/crawl_stocks/crawlstocks/spiders/GuchengBlockCodes.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | python/test/crawl_stocks/crawlstocks/spiders/GuchengBlockCodes.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import scrapy
from crawlstocks.items import GuchengStockCodeItem
class GuchengblockcodesSpider(scrapy.Spider):
name = 'GuchengBlockCodes'
allowed_domains = ['hq.gucheng.com']
custom_settings = {
'ITEM_PIPELINES' : {'crawlstocks.pipelines.file.GuchengCrawlListPipeline':200}
}
def __init__(self, blockname='xiongan'):
if blockname == 'xiongan':
blid = '003813' # 雄安新区
elif blockname == 'jingjinyi':
blid = '003684' # 京津翼一体化
else:
blid = '003813' # 雄安新区
self.start_urls = ['https://hq.gucheng.com/blockInfo/' + blid + '/']
def parse(self, response):
# self.logger.info(response.url)
# <td class="stock_phone stock_textLeft"><a href="/SZ300353/" target="_blank">东土科技</a></td>
item = GuchengStockCodeItem()
for css in response.css('tbody tr td.stock_phone.stock_textLeft a'):
item['name'] = re.sub(r'\s+', '', css.xpath('./text()').get())
item['code'] = css.xpath('./@href').get()[1:-1]
yield item
# not work
# next = response.css('div.stock_page span a[text*="下一页"]::text').get()
# /html/body/article/div/div[4]/section/div/span[8]/a
next_page = response.xpath('//div[contains(@class, \
"stock_page")]/span/a[contains(.//text(), "下一页")]/@href').get()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
| 38.475 | 100 | 0.578298 |
import re
import scrapy
from crawlstocks.items import GuchengStockCodeItem
class GuchengblockcodesSpider(scrapy.Spider):
name = 'GuchengBlockCodes'
allowed_domains = ['hq.gucheng.com']
custom_settings = {
'ITEM_PIPELINES' : {'crawlstocks.pipelines.file.GuchengCrawlListPipeline':200}
}
def __init__(self, blockname='xiongan'):
if blockname == 'xiongan':
blid = '003813'
elif blockname == 'jingjinyi':
blid = '003684'
else:
blid = '003813'
self.start_urls = ['https://hq.gucheng.com/blockInfo/' + blid + '/']
def parse(self, response):
item = GuchengStockCodeItem()
for css in response.css('tbody tr td.stock_phone.stock_textLeft a'):
item['name'] = re.sub(r'\s+', '', css.xpath('./text()').get())
item['code'] = css.xpath('./@href').get()[1:-1]
yield item
next_page = response.xpath('//div[contains(@class, \
"stock_page")]/span/a[contains(.//text(), "下一页")]/@href').get()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
| true | true |
f726da1790877622a36dac64245198de83414f60 | 2,545 | py | Python | bigml/tests/create_projection_steps.py | devs-cloud/python_ml | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | [
"Apache-2.0"
] | null | null | null | bigml/tests/create_projection_steps.py | devs-cloud/python_ml | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | [
"Apache-2.0"
] | null | null | null | bigml/tests/create_projection_steps.py | devs-cloud/python_ml | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from nose.tools import assert_almost_equals, eq_, assert_is_not_none
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED, FAULTY
from bigml.api import get_status
from read_projection_steps import i_get_the_projection
def i_create_a_projection(step, data=None):
if data is None:
data = "{}"
pca = world.pca['resource']
data = json.loads(data)
resource = world.api.create_projection(pca, data)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.projection = resource['object']
world.projections.append(resource['resource'])
def the_projection_is(step, projection):
if projection is None:
projection = "{}"
projection = json.loads(projection)
eq_(len(projection.keys()),
len(world.projection['projection']['result'].keys()))
for name, value in projection.items():
eq_(world.projection['projection']['result'][name], projection[name],
"remote: %s, %s - expected: %s" % ( \
name, world.projection['projection']['result'][name],
projection[name]))
def wait_until_projection_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
eq_(status['code'], int(code1))
def the_projection_is_finished_in_less_than(step, secs):
wait_until_projection_status_code_is(step, FINISHED, FAULTY, secs)
| 35.84507 | 77 | 0.703733 |
import json
import time
from nose.tools import assert_almost_equals, eq_, assert_is_not_none
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED, FAULTY
from bigml.api import get_status
from read_projection_steps import i_get_the_projection
def i_create_a_projection(step, data=None):
if data is None:
data = "{}"
pca = world.pca['resource']
data = json.loads(data)
resource = world.api.create_projection(pca, data)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.projection = resource['object']
world.projections.append(resource['resource'])
def the_projection_is(step, projection):
if projection is None:
projection = "{}"
projection = json.loads(projection)
eq_(len(projection.keys()),
len(world.projection['projection']['result'].keys()))
for name, value in projection.items():
eq_(world.projection['projection']['result'][name], projection[name],
"remote: %s, %s - expected: %s" % ( \
name, world.projection['projection']['result'][name],
projection[name]))
def wait_until_projection_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
eq_(status['code'], int(code1))
def the_projection_is_finished_in_less_than(step, secs):
wait_until_projection_status_code_is(step, FINISHED, FAULTY, secs)
| true | true |
f726da9544773e11f11ee7b9f04bc69fd7f46c4b | 8,615 | py | Python | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | 3 | 2021-08-28T10:55:12.000Z | 2021-12-01T20:42:38.000Z | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | null | null | null | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | 1 | 2021-09-26T16:07:24.000Z | 2021-09-26T16:07:24.000Z | import os
import re
import datetime
import unittest
from io import StringIO
from unittest.mock import patch
import pandas as pd
import EOD_api as eod
TOKEN = os.environ["EOD_TOKEN"]
def date_parser(string):
date_pattern = re.compile("([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]", re.VERBOSE)
return date_pattern.sub(r"\1T", string)
class TestGetEod(unittest.TestCase):
# @classmethod
# def setUp(cls):
# pass
# def tearDown(cls):
# pass
def test_idempotent__addtickers(self):
d1 = eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
).add_tickers(["MSFT.US"])
d2 = (
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
)
.add_tickers(["MSFT.US"])
.add_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_idempotent_truncate_dates(self):
d1 = eod.Fundamental(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).truncate_dates("2020-10-14", "2020-10-16")
d2 = (
eod.Fundamental(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17")
.truncate_dates("2020-10-14", "2020-10-16")
.truncate_dates("2020-10-14", "2020-10-16")
)
self.assertEqual(d1, d2)
def test_idempotent_remove_tickers(self):
d1 = eod.Fundamental(
["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17"
).remove_tickers(["MSFT.US"])
d2 = (
eod.Fundamental(["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17")
.remove_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_add_remove(self):
d1 = eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
d2 = (
eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
.add_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_remove_all_tickers(self):
with self.assertRaises(Exception):
eod.Ohlcv(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17").remove_tickers(
["AAPL.US"]
).retrieve_data()
def test_misspelled_input(self):
with self.assertRaises(Exception):
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="Daoly"
)
def test_ohlcv_data_format_hasnt_changed(
self,
): # Cambiar de antes de formatting a después de formatting
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
url = "https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d".format(
TOKEN
)
actual = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
expected = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)
def test_index_formatting(self):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
expected_aapl_formatted = pd.read_csv(
StringIO(
date_parser(
"""
Stock Date Open High Low Close Adjusted_close Volume
AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0
AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0
AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0
AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0
"""
)
),
sep="\\s+",
index_col=[0, 1],
converters={"Date": lambda col: datetime.datetime.fromisoformat(col)},
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
formatted_mock = eod.Ohlcv(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).retrieve_data()
pd.testing.assert_frame_equal(
formatted_mock, expected_aapl_formatted, rtol=5e-3
)
# TODO? Write more tests:
# Check that the data is concated/merged/joined properly, particularly when the indexes come with Nans
# Check except clauses
# Check duplicate df values
# Assert errors with wrong args
# etc
# expected_ohlcv_concatted = pd.read_csv( StringIO( date_parser( """
# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns
# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601
# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660
# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826
# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648
# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419
# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550
# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651
# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377
# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900
# """ ) ), sep="\\s+", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \
# , 'Datetime' : lambda col: pd.to_datetime(col, format='%Y-%m-%dT%H:%M:%S', utc=True) } )
if __name__ == "__main__":
unittest.main()
| 43.075 | 165 | 0.51863 | import os
import re
import datetime
import unittest
from io import StringIO
from unittest.mock import patch
import pandas as pd
import EOD_api as eod
TOKEN = os.environ["EOD_TOKEN"]
def date_parser(string):
date_pattern = re.compile("([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]", re.VERBOSE)
return date_pattern.sub(r"\1T", string)
class TestGetEod(unittest.TestCase):
def test_idempotent__addtickers(self):
d1 = eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
).add_tickers(["MSFT.US"])
d2 = (
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
)
.add_tickers(["MSFT.US"])
.add_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_idempotent_truncate_dates(self):
d1 = eod.Fundamental(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).truncate_dates("2020-10-14", "2020-10-16")
d2 = (
eod.Fundamental(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17")
.truncate_dates("2020-10-14", "2020-10-16")
.truncate_dates("2020-10-14", "2020-10-16")
)
self.assertEqual(d1, d2)
def test_idempotent_remove_tickers(self):
d1 = eod.Fundamental(
["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17"
).remove_tickers(["MSFT.US"])
d2 = (
eod.Fundamental(["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17")
.remove_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_add_remove(self):
d1 = eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
d2 = (
eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
.add_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_remove_all_tickers(self):
with self.assertRaises(Exception):
eod.Ohlcv(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17").remove_tickers(
["AAPL.US"]
).retrieve_data()
def test_misspelled_input(self):
with self.assertRaises(Exception):
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="Daoly"
)
def test_ohlcv_data_format_hasnt_changed(
self,
):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
url = "https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d".format(
TOKEN
)
actual = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
expected = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)
def test_index_formatting(self):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
expected_aapl_formatted = pd.read_csv(
StringIO(
date_parser(
"""
Stock Date Open High Low Close Adjusted_close Volume
AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0
AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0
AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0
AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0
"""
)
),
sep="\\s+",
index_col=[0, 1],
converters={"Date": lambda col: datetime.datetime.fromisoformat(col)},
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
formatted_mock = eod.Ohlcv(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).retrieve_data()
pd.testing.assert_frame_equal(
formatted_mock, expected_aapl_formatted, rtol=5e-3
)
# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns
# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601
# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660
# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826
# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648
# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419
# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550
# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651
# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377
# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900
# """ ) ), sep="\\s+", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \
if __name__ == "__main__":
unittest.main()
| true | true |
f726daae43a8790a611a80a7e3876da1fd12b7ee | 2,804 | py | Python | var/spack/repos/builtin/packages/r-pmcmrplus/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-pmcmrplus/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-pmcmrplus/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPmcmrplus(RPackage):
"""Calculate Pairwise Multiple Comparisons of Mean Rank Sums Extended.
For one-way layout experiments the one-way ANOVA can be performed as an
omnibus test. All-pairs multiple comparisons tests (Tukey-Kramer test,
Scheffe test, LSD-test) and many-to-one tests (Dunnett test) for normally
distributed residuals and equal within variance are available. Furthermore,
all-pairs tests (Games-Howell test, Tamhane's T2 test, Dunnett T3 test,
Ury-Wiggins-Hochberg test) and many-to-one (Tamhane-Dunnett Test) for
normally distributed residuals and heterogeneous variances are provided.
Van der Waerden's normal scores test for omnibus, all-pairs and many-to-one
tests is provided for non-normally distributed residuals and homogeneous
variances. The Kruskal-Wallis, BWS and Anderson-Darling omnibus test and
all-pairs tests (Nemenyi test, Dunn test, Conover test,
Dwass-Steele-Critchlow- Fligner test) as well as many-to-one (Nemenyi test,
Dunn test, U-test) are given for the analysis of variance by ranks.
Non-parametric trend tests (Jonckheere test, Cuzick test, Johnson-Mehrotra
test, Spearman test) are included. In addition, a Friedman-test for
one-way ANOVA with repeated measures on ranks (CRBD) and Skillings-Mack
test for unbalanced CRBD is provided with consequent all-pairs tests
(Nemenyi test, Siegel test, Miller test, Conover test, Exact test) and
many-to-one tests (Nemenyi test, Demsar test, Exact test). A trend can be
tested with Pages's test. Durbin's test for a two-way balanced incomplete
block design (BIBD) is given in this package as well as Gore's test for
CRBD with multiple observations per cell is given. Outlier tests, Mandel's
k- and h statistic as well as functions for Type I error and Power analysis
as well as generic summary, print and plot methods are provided."""
cran = "PMCMRplus"
version('1.9.3', sha256='76baba60f57343fa5bb6f6d2ea27aab77178e02b0d2f9d5d74abde7d18994f03')
depends_on('r@3.5.0:', type=('build', 'run'))
depends_on('r-mvtnorm@1.0:', type=('build', 'run'))
depends_on('r-multcompview', type=('build', 'run'))
depends_on('r-gmp', type=('build', 'run'))
depends_on('r-rmpfr', type=('build', 'run'))
depends_on('r-suppdists', type=('build', 'run'))
depends_on('r-ksamples@1.2.7:', type=('build', 'run'))
depends_on('r-bwstest@0.2.1:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('gmp@4.2.3:')
depends_on('mpfr@3.0.0:')
| 53.923077 | 95 | 0.722183 |
from spack import *
class RPmcmrplus(RPackage):
cran = "PMCMRplus"
version('1.9.3', sha256='76baba60f57343fa5bb6f6d2ea27aab77178e02b0d2f9d5d74abde7d18994f03')
depends_on('r@3.5.0:', type=('build', 'run'))
depends_on('r-mvtnorm@1.0:', type=('build', 'run'))
depends_on('r-multcompview', type=('build', 'run'))
depends_on('r-gmp', type=('build', 'run'))
depends_on('r-rmpfr', type=('build', 'run'))
depends_on('r-suppdists', type=('build', 'run'))
depends_on('r-ksamples@1.2.7:', type=('build', 'run'))
depends_on('r-bwstest@0.2.1:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('gmp@4.2.3:')
depends_on('mpfr@3.0.0:')
| true | true |
f726dce4683e7d5956b6554b0e5f04d2913f0e26 | 4,225 | py | Python | session4/e_animations_2axis.py | Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | 94cafa0581ec36a305867ebfdcb91c787aa77a16 | [
"Apache-2.0"
] | null | null | null | session4/e_animations_2axis.py | Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | 94cafa0581ec36a305867ebfdcb91c787aa77a16 | [
"Apache-2.0"
] | null | null | null | session4/e_animations_2axis.py | Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | 94cafa0581ec36a305867ebfdcb91c787aa77a16 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
##################################################
# This script shows how to create animated plots using matplotlib and a basic dataset
# Multiple tutorials inspired the current design but they mostly came from:
# hhttps://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1
# Note: the project keeps updating every course almost yearly
##################################################
#
##################################################
# Author: Diego Pajarito
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
# We need to import numpy and matplotlib library
# importing libraries
import pandas as pd
import seaborn as sns
# Read files and prepare data
data = pd.read_csv('../data/2021_seguiment-covid19-bcn.csv')
#data = pd.read_csv('https://opendata-ajuntament.barcelona.cat/data/dataset/4f3ffbda-d5be-4f2a-a836-26a77be6df1a/resource/f627ac0a-d05f-416d-9773-eeb464a3fc44/download')
data.columns = ['date_indicator', 'frequency_indicator', 'place', 'name_indicator',
'name_variable', 'value', 'unit', 'source']
# We will use two datasets to generate plots
data_daily = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (diari)']
data_accumulated = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (acumulat)']
# We need the data to be in time format to calculate values in days after day zero
data_daily.loc[:, 'date_indicator'] = pd.to_datetime(data_daily['date_indicator'])
initial_day = data_daily['date_indicator'].min()
data_daily.loc[:, 'day_after_zero'] = data_daily['date_indicator'] - initial_day
data_daily.loc[:, 'day_after_zero'] = data_daily['day_after_zero']/np.timedelta64(1, 'D')
# We need the data to be in time format to calculate values in days after day zero
data_accumulated.loc[:, 'date_indicator'] = pd.to_datetime(data_accumulated['date_indicator'])
data_accumulated.loc[:, 'day_after_zero'] = data_accumulated['date_indicator'] - initial_day
data_accumulated.loc[:, 'day_after_zero'] = data_accumulated['day_after_zero']/np.timedelta64(1, 'D')
# we also extract some values to set the plot limits
max_day = data_daily['day_after_zero'].max().astype(int)
max_cases_daily = data_daily['value'].max()
max_cases_accumulated = data_accumulated['value'].max()
title = 'Barcelona: '
# We then prepare the writer and animation file options
Writer = animation.writers['ffmpeg']
writer = Writer(fps=20, metadata=dict(artist='MaCTResearcher'), bitrate=1800)
# If error using anaconda try to install ffmpeg
# conda install -c conda-forge ffmpeg
# We create an initial plot with basic configuration a single line
fig, ax1 = plt.subplots()
fig.set_size_inches(10, 6)
plt.title(title + 'Covid-19 cases', fontsize=18)
plt.xlabel('Day after case 1', fontsize=14)
plt.ylim(0, max_cases_accumulated)
plt.ylabel('Accumulated', fontsize=18)
# # now we configure the secondary axis
ax2 = ax1.twinx()
plt.ylim(0, max_cases_daily*2)
cases_ticks = np.arange(0, max_day, 50)
# We need to set an animation function to handle individual behaviour per frame
# variable "i" is the frame id that can be used to handle queries or filters for your data
def animate(i):
frame_data_daily = data_daily[data_daily['day_after_zero'] <= i]
frame_data_accumulated = data_accumulated[data_accumulated['day_after_zero'] <= i]
sns.lineplot(x='day_after_zero', y='value', data=frame_data_accumulated, color="r", ax=ax1)
sns.barplot(x='day_after_zero', y='value', data=frame_data_daily, color='b', ax=ax2)
plt.ylabel('Daily', fontsize=18)
plt.xlim(0, max_day)
plt.xticks(cases_ticks)
plt.xlabel('Day after case 1', fontsize=18)
# Handling secondary axis implies different management in the animate function
ani = matplotlib.animation.FuncAnimation(fig, animate, frames=max_day, repeat=True)
ani.save('covid_cases_bcn_2axis.mp4', writer=writer)
print('end')
| 46.428571 | 169 | 0.725444 | true | true | |
f726ddf1c1dac0d3d3a8df65efc42e4d30590ce6 | 9,073 | py | Python | mars/lib/nvutils.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/lib/nvutils.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/lib/nvutils.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | # -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
# Some constants taken from cuda.h
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100
# nvml constants
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
NVML_DRIVER_NOT_LOADED = 9
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass # opaque handle
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
_is_windows: bool = sys.platform.startswith('win')
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_init_pid = None
_gpu_count = None
_driver_info = None
_device_infos = dict()
_no_device_warned = False
class NVError(Exception):
def __init__(self, msg, *args, errno=None):
self._errno = errno
super().__init__(msg or 'Unknown error', *args)
def __str__(self):
return f'({self._errno}) {super().__str__()}'
@property
def errno(self):
return self._errno
@property
def message(self):
return super().__str__()
class NVDeviceAPIError(NVError):
pass
class NVMLAPIError(NVError):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVDeviceAPIError(_error_str.value.decode(), errno=result)
_nvmlErrorString = None
def _nvml_check_error(result):
global _nvmlErrorString
if _nvmlErrorString is None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
raise NVMLAPIError(_error_str.decode(), errno=result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib, _no_device_warned
if _init_pid == os.getpid():
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll', 'nvcuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVDeviceAPIError as ex:
if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:
_cuda_lib = None
if not _no_device_warned:
logger.warning('No CUDA device detected')
_no_device_warned = True
else:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib, _no_device_warned
if _init_pid == os.getpid():
return
nvml_paths = ['libnvidia-ml.so', 'libnvidia-ml.so.1', 'libnvidia-ml.dylib', 'nvml.dll']
if _is_windows:
nvml_paths.append(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"),
"NVIDIA Corporation/NVSMI/nvml.dll"))
_nvml_lib = _load_nv_library(*nvml_paths)
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVMLAPIError as ex:
if ex.errno == NVML_DRIVER_NOT_LOADED:
_nvml_lib = None
if not _no_device_warned:
logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)
_no_device_warned = True
else:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _init_pid
_init_cp()
_init_nvml()
if _nvml_lib is not None and _cuda_lib is not None:
_init_pid = os.getpid()
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if not devices:
_gpu_count = 0
else:
_gpu_count = len(devices.split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='.'.join(str(v) for v in divmod(cuda_version.value, 1000))
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if _init_pid is None:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
if 'CUDA_VISIBLE_DEVICES' in os.environ:
real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]
else:
real_dev_index = dev_index
info = _device_infos[dev_index] = _cu_device_info(
index=real_dev_index,
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if _init_pid is None:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = ('GPU-' + str(dev_uuid)).encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
| 27.831288 | 109 | 0.689518 |
import logging
import os
import sys
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
NVML_DRIVER_NOT_LOADED = 9
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
_is_windows: bool = sys.platform.startswith('win')
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_init_pid = None
_gpu_count = None
_driver_info = None
_device_infos = dict()
_no_device_warned = False
class NVError(Exception):
def __init__(self, msg, *args, errno=None):
self._errno = errno
super().__init__(msg or 'Unknown error', *args)
def __str__(self):
return f'({self._errno}) {super().__str__()}'
@property
def errno(self):
return self._errno
@property
def message(self):
return super().__str__()
class NVDeviceAPIError(NVError):
pass
class NVMLAPIError(NVError):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVDeviceAPIError(_error_str.value.decode(), errno=result)
_nvmlErrorString = None
def _nvml_check_error(result):
global _nvmlErrorString
if _nvmlErrorString is None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
raise NVMLAPIError(_error_str.decode(), errno=result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib, _no_device_warned
if _init_pid == os.getpid():
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll', 'nvcuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVDeviceAPIError as ex:
if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:
_cuda_lib = None
if not _no_device_warned:
logger.warning('No CUDA device detected')
_no_device_warned = True
else:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib, _no_device_warned
if _init_pid == os.getpid():
return
nvml_paths = ['libnvidia-ml.so', 'libnvidia-ml.so.1', 'libnvidia-ml.dylib', 'nvml.dll']
if _is_windows:
nvml_paths.append(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"),
"NVIDIA Corporation/NVSMI/nvml.dll"))
_nvml_lib = _load_nv_library(*nvml_paths)
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVMLAPIError as ex:
if ex.errno == NVML_DRIVER_NOT_LOADED:
_nvml_lib = None
if not _no_device_warned:
logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)
_no_device_warned = True
else:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _init_pid
_init_cp()
_init_nvml()
if _nvml_lib is not None and _cuda_lib is not None:
_init_pid = os.getpid()
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if not devices:
_gpu_count = 0
else:
_gpu_count = len(devices.split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='.'.join(str(v) for v in divmod(cuda_version.value, 1000))
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if _init_pid is None:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
if 'CUDA_VISIBLE_DEVICES' in os.environ:
real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]
else:
real_dev_index = dev_index
info = _device_infos[dev_index] = _cu_device_info(
index=real_dev_index,
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if _init_pid is None:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = ('GPU-' + str(dev_uuid)).encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
| true | true |
f726de42bea9102ed23d3fe9ef9fa07cf1e1fe0c | 595 | py | Python | dbaas/account/admin/__init__.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | dbaas/account/admin/__init__.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | dbaas/account/admin/__init__.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User, Group
from ..models import Team, Role, AccountUser, Organization
from .user import CustomUserAdmin
from .role import RoleAdmin
from .team import TeamAdmin
from .organization import OrganizationAdmin
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.register(AccountUser, CustomUserAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(Organization, OrganizationAdmin)
| 31.315789 | 58 | 0.820168 |
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User, Group
from ..models import Team, Role, AccountUser, Organization
from .user import CustomUserAdmin
from .role import RoleAdmin
from .team import TeamAdmin
from .organization import OrganizationAdmin
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.register(AccountUser, CustomUserAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(Organization, OrganizationAdmin)
| true | true |
f726dea29d24103ee493a83474a24f027af1befb | 11,256 | py | Python | theano/gof/tests/test_destroyhandler.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | theano/gof/tests/test_destroyhandler.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | theano/gof/tests/test_destroyhandler.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from six.moves import xrange
from theano.gof.type import Type
from theano.gof import graph
from theano.gof.graph import Variable, Apply
from theano.gof.op import Op
from theano.gof.opt import * # noqa
from theano.gof import destroyhandler
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.toolbox import ReplaceValidate
from copy import copy
def PatternOptimizer(p1, p2, ign=True):
return OpKeyOptimizer(PatternSub(p1, p2), ignore_newtrees=ign)
def OpSubOptimizer(op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True):
return TopoOptimizer(OpSub(op1, op2),
ignore_newtrees=ign, failure_callback=fail)
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def filter(self, data):
return data
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(), None, None, name=name)
def MyConstant(data):
return graph.Constant(MyType(), data=data)
class MyOp(Op):
def __init__(self, nin, name, vmap=None, dmap=None, nout=1,
destroyhandler_tolerate_same=None,
destroyhandler_tolerate_aliased=None):
if vmap is None:
vmap = {}
if dmap is None:
dmap = {}
if destroyhandler_tolerate_same is None:
destroyhandler_tolerate_same = []
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.nin = nin
self.nout = nout
self.name = name
self.destroy_map = dmap
self.view_map = vmap
self.destroyhandler_tolerate_same = destroyhandler_tolerate_same
self.destroyhandler_tolerate_aliased = destroyhandler_tolerate_aliased
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyVariable(self.name + "_R") for i in xrange(self.nout)]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, 'Sigmoid')
transpose_view = MyOp(1, 'TransposeView', vmap={0: [0]})
add = MyOp(2, 'Add')
add_in_place = MyOp(2, 'AddInPlace', dmap={0: [0]})
add_in_place_2 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_same=[(0, 1)])
add_in_place_3 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_aliased=[(0, 1)])
dot = MyOp(2, 'Dot')
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return x, y, z
def Env(inputs, outputs, validate=True):
e = FunctionGraph(inputs, outputs, clone=False)
e.attach_feature(destroyhandler.DestroyHandler())
e.attach_feature(ReplaceValidate())
if validate:
e.validate()
return e
class FailureWatch:
# when passed to OpSubOptimizer or PatternOptimizer, counts the
# number of failures
def __init__(self):
self.failures = 0
def __call__(self, exc, nav, pairs, lopt, node):
assert isinstance(exc, InconsistencyError)
self.failures += 1
def consistent(g):
# print "Testing consistent:", g
try:
assert g.consistent()
except AssertionError:
print("Test failed! The graph was marked as NOT consistent.")
raise
# print "Test OK"
def inconsistent(g):
# print "Testing NOT consistent:", g
try:
assert not g.consistent()
except AssertionError:
print("Test failed! The graph was marked as consistent.")
raise
# print "Test OK"
#################
# Test protocol #
#################
def test_misc():
x, y, z = inputs()
e = transpose_view(transpose_view(transpose_view(transpose_view(x))))
g = Env([x, y, z], [e])
consistent(g)
chk = g.checkpoint()
PatternOptimizer((transpose_view, (transpose_view, 'x')), 'x').optimize(g)
assert str(g) == "[x]"
new_e = add(x, y)
g.replace_validate(x, new_e)
assert str(g) == "[Add(x, y)]"
g.replace(new_e, dot(add_in_place(x, y), transpose_view(x)))
assert str(g) == "[Dot(AddInPlace(x, y), TransposeView(x))]"
inconsistent(g)
g.revert(chk)
consistent(g)
assert str(g) == "[TransposeView(TransposeView(TransposeView(TransposeView(x))))]"
######################
# Test protocol skip #
######################
def test_aliased_inputs_replacement():
x, y, z = inputs()
tv = transpose_view(x)
tvv = transpose_view(tv)
sx = sigmoid(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
inconsistent(g)
g.replace(tv, sx)
consistent(g)
g.replace(sx, tv)
inconsistent(g)
g.replace(tv, tvv)
inconsistent(g)
g.replace(tv, sx)
consistent(g)
def test_indestructible():
x, y, z = inputs()
x.tag.indestructible = True
x = copy(x)
# checking if indestructible survives the copy!
assert x.tag.indestructible
e = add_in_place(x, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(e, add(x, y))
consistent(g)
def test_usage_loop_through_views_2():
x, y, z = inputs()
e0 = transpose_view(transpose_view(sigmoid(x)))
e = dot(add_in_place(x, y), transpose_view(e0))
g = Env([x, y, z], [e])
consistent(g) # because sigmoid can do the copy
g.replace(e0, x)
inconsistent(g) # we cut off the path to the sigmoid
def test_destroyers_loop():
# AddInPlace(x, y) and AddInPlace(y, x) should not coexist
x, y, z = inputs()
e1 = add(x, y)
e2 = add(y, x)
g = Env([x, y, z], [e1, e2])
chk = g.checkpoint()
consistent(g)
g.replace_validate(e1, add_in_place(x, y))
consistent(g)
try:
g.replace_validate(e2, add_in_place(y, x))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
g.revert(chk)
g.replace_validate(e2, add_in_place(y, x))
consistent(g)
try:
g.replace_validate(e1, add_in_place(x, y))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
########
# Misc #
########
def test_aliased_inputs():
x, y, z = inputs()
e = add_in_place(x, x)
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs2():
x, y, z = inputs()
e = add_in_place(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs_tolerate():
x, y, z = inputs()
e = add_in_place_2(x, x)
g = Env([x], [e], False)
consistent(g)
def test_aliased_inputs_tolerate2():
x, y, z = inputs()
e = add_in_place_2(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_same_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, x)
g = Env([x], [e], False)
consistent(g)
def test_different_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, transpose_view(x))
g = Env([x], [e], False)
consistent(g)
# warning - don't run this because it would produce the wrong answer
# add_in_place_3 is actually not correct when aliasing of inputs
# is ignored.
def test_indestructible_through_views():
x, y, z = inputs()
x.tag.indestructible = True
tv = transpose_view(x)
e = add_in_place(tv, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(tv, sigmoid(x))
consistent(g)
def test_indirect():
x, y, z = inputs()
e0 = add_in_place(x, y)
e = dot(sigmoid(e0), transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(x, y)
g.replace(e0, new_e0)
consistent(g)
g.replace(new_e0, add_in_place(x, y))
inconsistent(g)
def test_indirect_2():
x, y, z = inputs()
e0 = transpose_view(x)
e = dot(sigmoid(add_in_place(x, y)), e0)
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(e0, y)
g.replace(e0, new_e0)
consistent(g)
def test_long_destroyers_loop():
x, y, z = inputs()
e = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add(z, x))
g = Env([x, y, z], [e])
consistent(g)
OpSubOptimizer(add, add_in_place).optimize(g)
consistent(g)
# we don't want to see that!
assert str(g) != "[Dot(Dot(AddInPlace(x, y), AddInPlace(y, z)), AddInPlace(z, x))]"
e2 = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add_in_place(z, x))
try:
Env(*graph.clone([x, y, z], [e2]))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
def test_misc_2():
x, y, z = inputs()
tv = transpose_view(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
inconsistent(g)
g.replace(tv, x)
inconsistent(g)
def test_multi_destroyers():
x, y, z = inputs()
e = add(add_in_place(x, y), add_in_place(x, y))
try:
Env([x, y, z], [e])
raise Exception("Shouldn't have reached this point.")
except InconsistencyError as e:
pass
def test_multi_destroyers_through_views():
x, y, z = inputs()
e = dot(add(transpose_view(z), y), add(z, x))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(add, add_in_place, fail).optimize(g)
consistent(g)
assert fail.failures == 1 # should have succeeded once and failed once
def test_repair_destroy_path():
x, y, z = inputs()
e1 = transpose_view(transpose_view(x))
e2 = transpose_view(transpose_view(e1))
e3 = add_in_place(e2, y)
e4 = add_in_place(e1, z)
g = Env([x, y, z], [e3, e4], False)
inconsistent(g)
g.replace(e2, transpose_view(x))
inconsistent(g)
def test_usage_loop():
x, y, z = inputs()
g = Env([x, y, z], [dot(add_in_place(x, z), x)], False)
inconsistent(g)
# replace add_in_place with add
OpSubOptimizer(add_in_place, add).optimize(g)
consistent(g)
def test_usage_loop_through_views():
x, y, z = inputs()
aip = add_in_place(x, y)
e = dot(aip, transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(aip, add(x, z))
consistent(g)
def test_usage_loop_insert_views():
x, y, z = inputs()
e = dot(add_in_place(x, add(y, z)),
sigmoid(sigmoid(sigmoid(sigmoid(sigmoid(x))))))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(sigmoid, transpose_view, fail).optimize(g)
consistent(g)
# it must keep one sigmoid in the long sigmoid chain
assert fail.failures == 1
def test_value_repl():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, MyConstant("abc"))
consistent(g)
def test_value_repl_2():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, transpose_view(MyConstant("abc")))
consistent(g)
| 25.875862 | 87 | 0.613184 | from __future__ import print_function
from six.moves import xrange
from theano.gof.type import Type
from theano.gof import graph
from theano.gof.graph import Variable, Apply
from theano.gof.op import Op
from theano.gof.opt import *
from theano.gof import destroyhandler
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.toolbox import ReplaceValidate
from copy import copy
def PatternOptimizer(p1, p2, ign=True):
return OpKeyOptimizer(PatternSub(p1, p2), ignore_newtrees=ign)
def OpSubOptimizer(op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True):
return TopoOptimizer(OpSub(op1, op2),
ignore_newtrees=ign, failure_callback=fail)
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def filter(self, data):
return data
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(), None, None, name=name)
def MyConstant(data):
return graph.Constant(MyType(), data=data)
class MyOp(Op):
def __init__(self, nin, name, vmap=None, dmap=None, nout=1,
destroyhandler_tolerate_same=None,
destroyhandler_tolerate_aliased=None):
if vmap is None:
vmap = {}
if dmap is None:
dmap = {}
if destroyhandler_tolerate_same is None:
destroyhandler_tolerate_same = []
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.nin = nin
self.nout = nout
self.name = name
self.destroy_map = dmap
self.view_map = vmap
self.destroyhandler_tolerate_same = destroyhandler_tolerate_same
self.destroyhandler_tolerate_aliased = destroyhandler_tolerate_aliased
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyVariable(self.name + "_R") for i in xrange(self.nout)]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, 'Sigmoid')
transpose_view = MyOp(1, 'TransposeView', vmap={0: [0]})
add = MyOp(2, 'Add')
add_in_place = MyOp(2, 'AddInPlace', dmap={0: [0]})
add_in_place_2 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_same=[(0, 1)])
add_in_place_3 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_aliased=[(0, 1)])
dot = MyOp(2, 'Dot')
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return x, y, z
def Env(inputs, outputs, validate=True):
e = FunctionGraph(inputs, outputs, clone=False)
e.attach_feature(destroyhandler.DestroyHandler())
e.attach_feature(ReplaceValidate())
if validate:
e.validate()
return e
class FailureWatch:
def __init__(self):
self.failures = 0
def __call__(self, exc, nav, pairs, lopt, node):
assert isinstance(exc, InconsistencyError)
self.failures += 1
def consistent(g):
try:
assert g.consistent()
except AssertionError:
print("Test failed! The graph was marked as NOT consistent.")
raise
def inconsistent(g):
try:
assert not g.consistent()
except AssertionError:
print("Test failed! The graph was marked as consistent.")
raise
assert str(g) == "[x]"
new_e = add(x, y)
g.replace_validate(x, new_e)
assert str(g) == "[Add(x, y)]"
g.replace(new_e, dot(add_in_place(x, y), transpose_view(x)))
assert str(g) == "[Dot(AddInPlace(x, y), TransposeView(x))]"
inconsistent(g)
g.revert(chk)
consistent(g)
assert str(g) == "[TransposeView(TransposeView(TransposeView(TransposeView(x))))]"
e = True
x = copy(x)
assert x.tag.indestructible
e = add_in_place(x, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(e, add(x, y))
consistent(g)
def test_usage_loop_through_views_2():
x, y, z = inputs()
e0 = transpose_view(transpose_view(sigmoid(x)))
e = dot(add_in_place(x, y), transpose_view(e0))
g = Env([x, y, z], [e])
consistent(g)
g.replace(e0, x)
inconsistent(g)
def test_destroyers_loop():
x, y, z = inputs()
e1 = add(x, y)
e2 = add(y, x)
g = Env([x, y, z], [e1, e2])
chk = g.checkpoint()
consistent(g)
g.replace_validate(e1, add_in_place(x, y))
consistent(g)
try:
g.replace_validate(e2, add_in_place(y, x))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
g.revert(chk)
g.replace_validate(e2, add_in_place(y, x))
consistent(g)
try:
g.replace_validate(e1, add_in_place(x, y))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
e = add_in_place(x, x)
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs2():
x, y, z = inputs()
e = add_in_place(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs_tolerate():
x, y, z = inputs()
e = add_in_place_2(x, x)
g = Env([x], [e], False)
consistent(g)
def test_aliased_inputs_tolerate2():
x, y, z = inputs()
e = add_in_place_2(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_same_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, x)
g = Env([x], [e], False)
consistent(g)
def test_different_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, transpose_view(x))
g = Env([x], [e], False)
consistent(g)
# add_in_place_3 is actually not correct when aliasing of inputs
# is ignored.
def test_indestructible_through_views():
x, y, z = inputs()
x.tag.indestructible = True
tv = transpose_view(x)
e = add_in_place(tv, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(tv, sigmoid(x))
consistent(g)
def test_indirect():
x, y, z = inputs()
e0 = add_in_place(x, y)
e = dot(sigmoid(e0), transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(x, y)
g.replace(e0, new_e0)
consistent(g)
g.replace(new_e0, add_in_place(x, y))
inconsistent(g)
def test_indirect_2():
x, y, z = inputs()
e0 = transpose_view(x)
e = dot(sigmoid(add_in_place(x, y)), e0)
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(e0, y)
g.replace(e0, new_e0)
consistent(g)
def test_long_destroyers_loop():
x, y, z = inputs()
e = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add(z, x))
g = Env([x, y, z], [e])
consistent(g)
OpSubOptimizer(add, add_in_place).optimize(g)
consistent(g)
# we don't want to see that!
assert str(g) != "[Dot(Dot(AddInPlace(x, y), AddInPlace(y, z)), AddInPlace(z, x))]"
e2 = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add_in_place(z, x))
try:
Env(*graph.clone([x, y, z], [e2]))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
def test_misc_2():
x, y, z = inputs()
tv = transpose_view(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
inconsistent(g)
g.replace(tv, x)
inconsistent(g)
def test_multi_destroyers():
x, y, z = inputs()
e = add(add_in_place(x, y), add_in_place(x, y))
try:
Env([x, y, z], [e])
raise Exception("Shouldn't have reached this point.")
except InconsistencyError as e:
pass
def test_multi_destroyers_through_views():
x, y, z = inputs()
e = dot(add(transpose_view(z), y), add(z, x))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(add, add_in_place, fail).optimize(g)
consistent(g)
assert fail.failures == 1
def test_repair_destroy_path():
x, y, z = inputs()
e1 = transpose_view(transpose_view(x))
e2 = transpose_view(transpose_view(e1))
e3 = add_in_place(e2, y)
e4 = add_in_place(e1, z)
g = Env([x, y, z], [e3, e4], False)
inconsistent(g)
g.replace(e2, transpose_view(x))
inconsistent(g)
def test_usage_loop():
x, y, z = inputs()
g = Env([x, y, z], [dot(add_in_place(x, z), x)], False)
inconsistent(g)
OpSubOptimizer(add_in_place, add).optimize(g)
consistent(g)
def test_usage_loop_through_views():
x, y, z = inputs()
aip = add_in_place(x, y)
e = dot(aip, transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(aip, add(x, z))
consistent(g)
def test_usage_loop_insert_views():
x, y, z = inputs()
e = dot(add_in_place(x, add(y, z)),
sigmoid(sigmoid(sigmoid(sigmoid(sigmoid(x))))))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(sigmoid, transpose_view, fail).optimize(g)
consistent(g)
assert fail.failures == 1
def test_value_repl():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, MyConstant("abc"))
consistent(g)
def test_value_repl_2():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, transpose_view(MyConstant("abc")))
consistent(g)
| true | true |
f726ded0f21d12ce2859ff426b0a1110e948ea9e | 3,510 | py | Python | inlp/tag/ltp.py | IgowWang/IgorNLP | 3d1bd119bed19f386f30ca1ad4bad98f4200661a | [
"Apache-2.0"
] | 2 | 2016-02-26T09:13:58.000Z | 2017-01-28T13:15:19.000Z | inlp/tag/ltp.py | IgowWang/IgorNLP | 3d1bd119bed19f386f30ca1ad4bad98f4200661a | [
"Apache-2.0"
] | null | null | null | inlp/tag/ltp.py | IgowWang/IgorNLP | 3d1bd119bed19f386f30ca1ad4bad98f4200661a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# IgorNLP:ltp 词性标注模块
#
# Author: Igor
import os
import tempfile
from subprocess import PIPE
from nltk.internals import overridden, compat
from inlp.tag.api import TaggerI
from inlp.utils import ltp_cmd
class LtpPosTagger(TaggerI):
'''
ltp 词性标注模块
#test:
sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']]
path_ltp = '/home/igor/PycharmProjects/ltp'
ltpTagger = LtpPosTagger(path_to_ltp=path_ltp)
print(ltpTagger.tag_sents(sentences))
print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
output:
[[('这', 'r'), ('是', 'v'), ('哈工大', 'j'), ('分词器', 'n'), ('。', 'wp')], [('哈工大', 'j'), ('的', 'u'), ('分词器', 'n'), ('测试', 'v')]]
[('这', 'r'), ('是', 'v'), ('哈工大', 'j'), ('分词器', 'n'), ('。', 'wp')]
'''
def __init__(self, path_to_ltp, path_to_model=None, path_to_lexicon=None, threads=1,
encoding='utf8'):
'''
初始化分词模型:指定ltp的位置
:param path_to_ltp: ltp工程的根目录
:param path_to_model: ltp词性标注模型
:param path_to_lexicon: 人工添加指定的词典
'''
self._path_to_ltp = path_to_ltp
self._path_to_model = path_to_model
self._path_to_lexicon = path_to_lexicon
self._threads = threads
self._encoding = encoding
def tag_file(self, input_file_path):
'''
为分词后的文件进行词性标注
构造cmd命令,执行返回标准输出
:param input_file_path:输入的文件
:return:分词后的结果,保留ltp标注后的结果,方便调用下一个部件
'''
if self._path_to_model is None:
self._path_to_model = os.path.join(self._path_to_ltp, 'ltp_data/pos.model')
cws_cmdline = os.path.join(self._path_to_ltp, 'bin/examples/pos_cmdline')
cmd = [
cws_cmdline,
'--input', input_file_path,
'--threads', repr(self._threads),
'--postagger-model', self._path_to_model,
]
if self._path_to_lexicon:
cmd.extend(['--postagger-lexicon', self._path_to_lexicon])
stdout = self._execute(cmd)
return stdout
def tag(self, tokens):
'''
标注单个句子
:param tokens:list
:return:list(tuple(str,str))
'''
if overridden(self.tag_sents):
return self.tag_sents([tokens])[0]
else:
raise NotImplementedError()
def tag_sents(self, sentences):
encoding = self._encoding
# create temporary input file
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
# Write the actural sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join('\t'.join(x) for x in sentences)
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
stdout = self.tag_file(self._input_file_path)
return [[tuple(token.split('_')) for token in sent.split('\t')]
for sent in stdout.strip().split('\n')]
def _execute(self, cmd):
encoding = self._encoding
stdout, _stderr = ltp_cmd(cmd, stdout=PIPE, stderr=PIPE)
stdout = stdout.decode(encoding)
return stdout
if __name__ == '__main__':
sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']]
path_ltp = '/home/igor/PycharmProjects/ltp'
ltpTagger = LtpPosTagger(path_to_ltp=path_ltp)
print(ltpTagger.tag_sents(sentences))
print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
| 30.258621 | 126 | 0.582906 |
import os
import tempfile
from subprocess import PIPE
from nltk.internals import overridden, compat
from inlp.tag.api import TaggerI
from inlp.utils import ltp_cmd
class LtpPosTagger(TaggerI):
def __init__(self, path_to_ltp, path_to_model=None, path_to_lexicon=None, threads=1,
encoding='utf8'):
self._path_to_ltp = path_to_ltp
self._path_to_model = path_to_model
self._path_to_lexicon = path_to_lexicon
self._threads = threads
self._encoding = encoding
def tag_file(self, input_file_path):
if self._path_to_model is None:
self._path_to_model = os.path.join(self._path_to_ltp, 'ltp_data/pos.model')
cws_cmdline = os.path.join(self._path_to_ltp, 'bin/examples/pos_cmdline')
cmd = [
cws_cmdline,
'--input', input_file_path,
'--threads', repr(self._threads),
'--postagger-model', self._path_to_model,
]
if self._path_to_lexicon:
cmd.extend(['--postagger-lexicon', self._path_to_lexicon])
stdout = self._execute(cmd)
return stdout
def tag(self, tokens):
if overridden(self.tag_sents):
return self.tag_sents([tokens])[0]
else:
raise NotImplementedError()
def tag_sents(self, sentences):
encoding = self._encoding
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join('\t'.join(x) for x in sentences)
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
stdout = self.tag_file(self._input_file_path)
return [[tuple(token.split('_')) for token in sent.split('\t')]
for sent in stdout.strip().split('\n')]
def _execute(self, cmd):
encoding = self._encoding
stdout, _stderr = ltp_cmd(cmd, stdout=PIPE, stderr=PIPE)
stdout = stdout.decode(encoding)
return stdout
if __name__ == '__main__':
sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']]
path_ltp = '/home/igor/PycharmProjects/ltp'
ltpTagger = LtpPosTagger(path_to_ltp=path_ltp)
print(ltpTagger.tag_sents(sentences))
print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
| true | true |
f726df462e44abc76e9c11946685af130da6d59c | 96 | py | Python | boa3_test/test_sc/relational_test/BoolEquality.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/relational_test/BoolEquality.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/relational_test/BoolEquality.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from boa3.builtin import public
@public
def Main(a: bool, b: bool) -> bool:
return a == b
| 13.714286 | 35 | 0.645833 | from boa3.builtin import public
@public
def Main(a: bool, b: bool) -> bool:
return a == b
| true | true |
f726e11a06f3a64832e31beeb29cda0f35f7559f | 12,310 | py | Python | tasks.py | nautobot/nautobot-plugin-chatops-aci | d5e92cbaa261e4fbcb175131d03fc6f4e63bc241 | [
"Apache-2.0"
] | null | null | null | tasks.py | nautobot/nautobot-plugin-chatops-aci | d5e92cbaa261e4fbcb175131d03fc6f4e63bc241 | [
"Apache-2.0"
] | 4 | 2021-12-01T19:20:21.000Z | 2022-02-24T22:05:18.000Z | tasks.py | nautobot/nautobot-plugin-chatops-aci | d5e92cbaa261e4fbcb175131d03fc6f4e63bc241 | [
"Apache-2.0"
] | 1 | 2022-01-06T16:37:34.000Z | 2022-01-06T16:37:34.000Z | """Tasks for use with Invoke.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.util import strtobool
from invoke import Collection, task as invoke_task
import os
def is_truthy(arg):
"""Convert "truthy" strings into Booleans.
Examples:
>>> is_truthy('yes')
True
Args:
arg (str): Truthy string (True values are y, yes, t, true, on and 1; false values are n, no,
f, false, off and 0. Raises ValueError if val is anything else.
"""
if isinstance(arg, bool):
return arg
return bool(strtobool(arg))
# Use pyinvoke configuration for default values, see http://docs.pyinvoke.org/en/stable/concepts/configuration.html
# Variables may be overwritten in invoke.yml or by the environment variables INVOKE_NAUTOBOT_PLUGIN_CHATOPS_aci_xxx
namespace = Collection("nautobot_plugin_chatops_aci")
namespace.configure(
{
"nautobot_plugin_chatops_aci": {
"nautobot_ver": "latest",
"project_name": "nautobot-plugin-chatops-aci",
"python_ver": "3.8",
"local": False,
"compose_dir": os.path.join(os.path.dirname(__file__), "development"),
"compose_files": [
"docker-compose.requirements.yml",
"docker-compose.base.yml",
"docker-compose.dev.yml",
],
}
}
)
def task(function=None, *args, **kwargs):
"""Task decorator to override the default Invoke task decorator and add each task to the invoke namespace."""
def task_wrapper(function=None):
"""Wrapper around invoke.task to add the task to the namespace as well."""
if args or kwargs:
task_func = invoke_task(*args, **kwargs)(function)
else:
task_func = invoke_task(function)
namespace.add_task(task_func)
return task_func
if function:
# The decorator was called with no arguments
return task_wrapper(function)
# The decorator was called with arguments
return task_wrapper
def docker_compose(context, command, **kwargs):
"""Helper function for running a specific docker-compose command with all appropriate parameters and environment.
Args:
context (obj): Used to run specific commands
command (str): Command string to append to the "docker-compose ..." command, such as "build", "up", etc.
**kwargs: Passed through to the context.run() call.
"""
build_env = {
"NAUTOBOT_VER": context.nautobot_plugin_chatops_aci.nautobot_ver,
"PYTHON_VER": context.nautobot_plugin_chatops_aci.python_ver,
}
compose_command = f'docker-compose --project-name {context.nautobot_plugin_chatops_aci.project_name} --project-directory "{context.nautobot_plugin_chatops_aci.compose_dir}"'
for compose_file in context.nautobot_plugin_chatops_aci.compose_files:
compose_file_path = os.path.join(context.nautobot_plugin_chatops_aci.compose_dir, compose_file)
compose_command += f' -f "{compose_file_path}"'
compose_command += f" {command}"
print(f'Running docker-compose command "{command}"')
return context.run(compose_command, env=build_env, **kwargs)
def run_command(context, command, **kwargs):
"""Wrapper to run a command locally or inside the nautobot container."""
if is_truthy(context.nautobot_plugin_chatops_aci.local):
context.run(command, **kwargs)
else:
# Check if netbox is running, no need to start another netbox container to run a command
docker_compose_status = "ps --services --filter status=running"
results = docker_compose(context, docker_compose_status, hide="out")
if "nautobot" in results.stdout:
compose_command = f"exec nautobot {command}"
else:
compose_command = f"run --entrypoint '{command}' nautobot"
docker_compose(context, compose_command, pty=True)
# ------------------------------------------------------------------------------
# BUILD
# ------------------------------------------------------------------------------
@task(
help={
"force_rm": "Always remove intermediate containers",
"cache": "Whether to use Docker's cache when building the image (defaults to enabled)",
}
)
def build(context, force_rm=False, cache=True):
"""Build Nautobot docker image."""
command = "build"
if not cache:
command += " --no-cache"
if force_rm:
command += " --force-rm"
print(f"Building Nautobot with Python {context.nautobot_plugin_chatops_aci.python_ver}...")
docker_compose(context, command)
@task
def generate_packages(context):
"""Generate all Python packages inside docker and copy the file locally under dist/."""
command = "poetry build"
run_command(context, command)
# ------------------------------------------------------------------------------
# START / STOP / DEBUG
# ------------------------------------------------------------------------------
@task
def debug(context):
"""Start Nautobot and its dependencies in debug mode."""
print("Starting Nautobot in debug mode...")
docker_compose(context, "up")
@task
def start(context):
"""Start Nautobot and its dependencies in detached mode."""
print("Starting Nautobot in detached mode...")
docker_compose(context, "up --detach")
@task
def restart(context):
"""Gracefully restart all containers."""
print("Restarting Nautobot...")
docker_compose(context, "restart")
@task
def stop(context):
"""Stop Nautobot and its dependencies."""
print("Stopping Nautobot...")
docker_compose(context, "down")
@task
def destroy(context):
"""Destroy all containers and volumes."""
print("Destroying Nautobot...")
docker_compose(context, "down --volumes")
@task
def vscode(context):
"""Launch Visual Studio Code with the appropriate Environment variables to run in a container."""
command = "code nautobot.code-workspace"
context.run(command)
# ------------------------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------------------------
@task
def nbshell(context):
"""Launch an interactive nbshell session."""
command = "nautobot-server nbshell"
run_command(context, command)
@task
def cli(context):
"""Launch a bash shell inside the running Nautobot container."""
run_command(context, "bash")
@task(
help={
"user": "name of the superuser to create (default: admin)",
}
)
def createsuperuser(context, user="admin"):
"""Create a new Nautobot superuser account (default: "admin"), will prompt for password."""
command = f"nautobot-server createsuperuser --username {user}"
run_command(context, command)
@task(
help={
"name": "name of the migration to be created; if unspecified, will autogenerate a name",
}
)
def makemigrations(context, name=""):
"""Perform makemigrations operation in Django."""
command = "nautobot-server makemigrations nautobot_plugin_chatops_aci"
if name:
command += f" --name {name}"
run_command(context, command)
@task
def migrate(context):
"""Perform migrate operation in Django."""
command = "nautobot-server migrate"
run_command(context, command)
@task(help={})
def post_upgrade(context):
"""
Performs Nautobot common post-upgrade operations using a single entrypoint.
This will run the following management commands with default settings, in order:
- migrate
- trace_paths
- collectstatic
- remove_stale_contenttypes
- clearsessions
- invalidate all
"""
command = "nautobot-server post_upgrade"
run_command(context, command)
# ------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------
@task(
help={
"autoformat": "Apply formatting recommendations automatically, rather than failing if formatting is incorrect.",
}
)
def black(context, autoformat=False):
"""Check Python code style with Black."""
if autoformat:
black_command = "black"
else:
black_command = "black --check --diff"
command = f"{black_command} ."
run_command(context, command)
@task
def flake8(context):
"""Check for PEP8 compliance and other style issues."""
command = "flake8 ."
run_command(context, command)
@task
def hadolint(context):
"""Check Dockerfile for hadolint compliance and other style issues."""
command = "hadolint development/Dockerfile"
run_command(context, command)
@task
def pylint(context):
"""Run pylint code analysis."""
command = (
'pylint --init-hook "import nautobot; nautobot.setup()" --rcfile pyproject.toml nautobot_plugin_chatops_aci'
)
run_command(context, command)
@task
def pydocstyle(context):
"""Run pydocstyle to validate docstring formatting adheres to NTC defined standards."""
# We exclude the /migrations/ directory since it is autogenerated code
command = "pydocstyle ."
run_command(context, command)
@task
def yamllint(context):
"""Run yamllint to validate formating adheres to NTC defined YAML standards.
Args:
context (obj): Used to run specific commands
"""
command = "yamllint . --format standard"
run_command(context, command)
@task
def bandit(context):
"""Run bandit to validate basic static code security analysis."""
command = "bandit --recursive . --configfile .bandit.yml"
run_command(context, command)
@task
def check_migrations(context):
"""Check for missing migrations."""
command = "nautobot-server --config=nautobot/core/tests/nautobot_config.py makemigrations --dry-run --check"
run_command(context, command)
@task(
help={
"keepdb": "save and re-use test database between test runs for faster re-testing.",
"label": "specify a directory or module to test instead of running all Nautobot tests",
"failfast": "fail as soon as a single test fails don't run the entire test suite",
"buffer": "Discard output from passing tests",
}
)
def unittest(context, keepdb=False, label="nautobot_plugin_chatops_aci", failfast=False, buffer=True):
"""Run Nautobot unit tests."""
command = f"coverage run --module nautobot.core.cli test {label}"
if keepdb:
command += " --keepdb"
if failfast:
command += " --failfast"
if buffer:
command += " --buffer"
run_command(context, command)
@task
def unittest_coverage(context):
"""Report on code test coverage as measured by 'invoke unittest'."""
command = "coverage report --skip-covered --include 'nautobot_plugin_chatops_aci/*' --omit *migrations*"
run_command(context, command)
@task(
help={
"failfast": "fail as soon as a single test fails don't run the entire test suite",
}
)
def tests(context, failfast=False):
"""Run all tests for this plugin."""
# If we are not running locally, start the docker containers so we don't have to for each test
if not is_truthy(context.nautobot_plugin_chatops_aci.local):
print("Starting Docker Containers...")
start(context)
# Sorted loosely from fastest to slowest
print("Running black...")
black(context)
print("Running flake8...")
flake8(context)
print("Running bandit...")
bandit(context)
print("Running pydocstyle...")
pydocstyle(context)
print("Running yamllint...")
yamllint(context)
print("Running pylint...")
pylint(context)
print("Running unit tests...")
unittest(context, failfast=failfast)
print("All tests have passed!")
unittest_coverage(context)
| 31.564103 | 177 | 0.646304 |
from distutils.util import strtobool
from invoke import Collection, task as invoke_task
import os
def is_truthy(arg):
if isinstance(arg, bool):
return arg
return bool(strtobool(arg))
namespace = Collection("nautobot_plugin_chatops_aci")
namespace.configure(
{
"nautobot_plugin_chatops_aci": {
"nautobot_ver": "latest",
"project_name": "nautobot-plugin-chatops-aci",
"python_ver": "3.8",
"local": False,
"compose_dir": os.path.join(os.path.dirname(__file__), "development"),
"compose_files": [
"docker-compose.requirements.yml",
"docker-compose.base.yml",
"docker-compose.dev.yml",
],
}
}
)
def task(function=None, *args, **kwargs):
def task_wrapper(function=None):
if args or kwargs:
task_func = invoke_task(*args, **kwargs)(function)
else:
task_func = invoke_task(function)
namespace.add_task(task_func)
return task_func
if function:
return task_wrapper(function)
return task_wrapper
def docker_compose(context, command, **kwargs):
build_env = {
"NAUTOBOT_VER": context.nautobot_plugin_chatops_aci.nautobot_ver,
"PYTHON_VER": context.nautobot_plugin_chatops_aci.python_ver,
}
compose_command = f'docker-compose --project-name {context.nautobot_plugin_chatops_aci.project_name} --project-directory "{context.nautobot_plugin_chatops_aci.compose_dir}"'
for compose_file in context.nautobot_plugin_chatops_aci.compose_files:
compose_file_path = os.path.join(context.nautobot_plugin_chatops_aci.compose_dir, compose_file)
compose_command += f' -f "{compose_file_path}"'
compose_command += f" {command}"
print(f'Running docker-compose command "{command}"')
return context.run(compose_command, env=build_env, **kwargs)
def run_command(context, command, **kwargs):
if is_truthy(context.nautobot_plugin_chatops_aci.local):
context.run(command, **kwargs)
else:
docker_compose_status = "ps --services --filter status=running"
results = docker_compose(context, docker_compose_status, hide="out")
if "nautobot" in results.stdout:
compose_command = f"exec nautobot {command}"
else:
compose_command = f"run --entrypoint '{command}' nautobot"
docker_compose(context, compose_command, pty=True)
@task(
help={
"force_rm": "Always remove intermediate containers",
"cache": "Whether to use Docker's cache when building the image (defaults to enabled)",
}
)
def build(context, force_rm=False, cache=True):
command = "build"
if not cache:
command += " --no-cache"
if force_rm:
command += " --force-rm"
print(f"Building Nautobot with Python {context.nautobot_plugin_chatops_aci.python_ver}...")
docker_compose(context, command)
@task
def generate_packages(context):
command = "poetry build"
run_command(context, command)
# ------------------------------------------------------------------------------
# START / STOP / DEBUG
# ------------------------------------------------------------------------------
@task
def debug(context):
print("Starting Nautobot in debug mode...")
docker_compose(context, "up")
@task
def start(context):
print("Starting Nautobot in detached mode...")
docker_compose(context, "up --detach")
@task
def restart(context):
print("Restarting Nautobot...")
docker_compose(context, "restart")
@task
def stop(context):
print("Stopping Nautobot...")
docker_compose(context, "down")
@task
def destroy(context):
print("Destroying Nautobot...")
docker_compose(context, "down --volumes")
@task
def vscode(context):
command = "code nautobot.code-workspace"
context.run(command)
# ------------------------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------------------------
@task
def nbshell(context):
command = "nautobot-server nbshell"
run_command(context, command)
@task
def cli(context):
run_command(context, "bash")
@task(
help={
"user": "name of the superuser to create (default: admin)",
}
)
def createsuperuser(context, user="admin"):
command = f"nautobot-server createsuperuser --username {user}"
run_command(context, command)
@task(
help={
"name": "name of the migration to be created; if unspecified, will autogenerate a name",
}
)
def makemigrations(context, name=""):
command = "nautobot-server makemigrations nautobot_plugin_chatops_aci"
if name:
command += f" --name {name}"
run_command(context, command)
@task
def migrate(context):
command = "nautobot-server migrate"
run_command(context, command)
@task(help={})
def post_upgrade(context):
command = "nautobot-server post_upgrade"
run_command(context, command)
# ------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------
@task(
help={
"autoformat": "Apply formatting recommendations automatically, rather than failing if formatting is incorrect.",
}
)
def black(context, autoformat=False):
if autoformat:
black_command = "black"
else:
black_command = "black --check --diff"
command = f"{black_command} ."
run_command(context, command)
@task
def flake8(context):
command = "flake8 ."
run_command(context, command)
@task
def hadolint(context):
command = "hadolint development/Dockerfile"
run_command(context, command)
@task
def pylint(context):
command = (
'pylint --init-hook "import nautobot; nautobot.setup()" --rcfile pyproject.toml nautobot_plugin_chatops_aci'
)
run_command(context, command)
@task
def pydocstyle(context):
# We exclude the /migrations/ directory since it is autogenerated code
command = "pydocstyle ."
run_command(context, command)
@task
def yamllint(context):
command = "yamllint . --format standard"
run_command(context, command)
@task
def bandit(context):
command = "bandit --recursive . --configfile .bandit.yml"
run_command(context, command)
@task
def check_migrations(context):
command = "nautobot-server --config=nautobot/core/tests/nautobot_config.py makemigrations --dry-run --check"
run_command(context, command)
@task(
help={
"keepdb": "save and re-use test database between test runs for faster re-testing.",
"label": "specify a directory or module to test instead of running all Nautobot tests",
"failfast": "fail as soon as a single test fails don't run the entire test suite",
"buffer": "Discard output from passing tests",
}
)
def unittest(context, keepdb=False, label="nautobot_plugin_chatops_aci", failfast=False, buffer=True):
command = f"coverage run --module nautobot.core.cli test {label}"
if keepdb:
command += " --keepdb"
if failfast:
command += " --failfast"
if buffer:
command += " --buffer"
run_command(context, command)
@task
def unittest_coverage(context):
command = "coverage report --skip-covered --include 'nautobot_plugin_chatops_aci/*' --omit *migrations*"
run_command(context, command)
@task(
help={
"failfast": "fail as soon as a single test fails don't run the entire test suite",
}
)
def tests(context, failfast=False):
# If we are not running locally, start the docker containers so we don't have to for each test
if not is_truthy(context.nautobot_plugin_chatops_aci.local):
print("Starting Docker Containers...")
start(context)
print("Running black...")
black(context)
print("Running flake8...")
flake8(context)
print("Running bandit...")
bandit(context)
print("Running pydocstyle...")
pydocstyle(context)
print("Running yamllint...")
yamllint(context)
print("Running pylint...")
pylint(context)
print("Running unit tests...")
unittest(context, failfast=failfast)
print("All tests have passed!")
unittest_coverage(context)
| true | true |
f726e32c037672a3a1015b66f43061d44ada00cc | 1,226 | py | Python | tryalgo/dist_grid.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | tryalgo/dist_grid.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | tryalgo/dist_grid.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distances in a grid
# jill-jenn vie et christoph durr - 2014-2015
from collections import deque
# snip{
def dist_grid(grid, source, target=None):
"""Distances in a grid by BFS
:param grid: matrix with 4-neighborhood
:param (int,int) source: pair of row, column indices
:param (int,int) target: exploration stops if target is reached
:complexity: linear in grid size
"""
rows = len(grid)
cols = len(grid[0])
dir = [(0, +1, '>'), (0, -1, '<'), (+1, 0, 'v'), (-1, 0, '^')]
i, j = source
grid[i][j] = 's'
Q = deque()
Q.append(source)
while Q:
i1, j1 = Q.popleft()
for di, dj, symbol in dir: # explorer toutes les directions
i2 = i1 + di
j2 = j1 + dj
if not (0 <= i2 and i2 < rows and 0 <= j2 and j2 < cols):
continue # bord de la grille dépassé
if grid[i2][j2] != ' ': # case inacc. ou déjà visitée
continue
grid[i2][j2] = symbol # marquer visite
if (i2, j2) == target:
grid[i2][j2] = 't' # but atteint
return
Q.append((i2, j2))
# snip}
| 30.65 | 69 | 0.513866 |
from collections import deque
def dist_grid(grid, source, target=None):
rows = len(grid)
cols = len(grid[0])
dir = [(0, +1, '>'), (0, -1, '<'), (+1, 0, 'v'), (-1, 0, '^')]
i, j = source
grid[i][j] = 's'
Q = deque()
Q.append(source)
while Q:
i1, j1 = Q.popleft()
for di, dj, symbol in dir:
i2 = i1 + di
j2 = j1 + dj
if not (0 <= i2 and i2 < rows and 0 <= j2 and j2 < cols):
continue
if grid[i2][j2] != ' ':
continue
grid[i2][j2] = symbol
if (i2, j2) == target:
grid[i2][j2] = 't'
return
Q.append((i2, j2))
| true | true |
f726e3fffed7bf64ee84e30593164304e7fa5261 | 83,112 | py | Python | genepattern/utils/clustering.py | genepattern/genepattern-utils | 950d748301b3c4d07ad8d24c9b037bbb9b4c80e2 | [
"BSD-3-Clause"
] | null | null | null | genepattern/utils/clustering.py | genepattern/genepattern-utils | 950d748301b3c4d07ad8d24c9b037bbb9b4c80e2 | [
"BSD-3-Clause"
] | null | null | null | genepattern/utils/clustering.py | genepattern/genepattern-utils | 950d748301b3c4d07ad8d24c9b037bbb9b4c80e2 | [
"BSD-3-Clause"
] | null | null | null | """
Copied and modified from the dev branch of:
https://github.com/genepattern/HierarchicalClustering
on 2018-01-31
"""
import sys
import numpy as np
from statistics import mode
from sklearn.metrics import pairwise
from sklearn import metrics
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import itertools
from sklearn.cluster import AgglomerativeClustering
import scipy
import itertools
from collections import defaultdict
from .elemental import *
from .information import *
# check if these are repeated:
import os
import sys
tasklib_path = os.path.dirname(os.path.realpath(sys.argv[0]))
# sys.path.append(tasklib_path + "/ccalnoir")
# 2018-02-06 Maybe uncomment these next two
# import matplotlib as mpl
# mpl.use('Agg')
# This is forprinting the hyperlink
from IPython.core.display import display, HTML
# import pandas as pd
# import numpy as np
import scipy
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import gridspec
from sklearn.cluster import AgglomerativeClustering
# from time import time
# import cuzcatlan as cusca
sns.set_style("white")
import matplotlib as mpl
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['axes.labelsize'] = 20
SIGNIFICANT_DIGITS = 7
input_col_distance_dict = {
# These are the values I expect
"No column clustering": "No_column_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_column_clustering": "No_column_clustering",
# These are the values the GpUnit tests give
"0": "No_column_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
# These are the values I expect from the comand line
"no_col": "No_column_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_row_distance_dict = {
# These are the values I expect
"No row clustering": "No_row_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_row_clustering": "No_row_clustering",
# These are the values the GpUnit tests give
"0": "No_row_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
# These are the values I expect from the comand line
"no_row": "No_row_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_clustering_method = {
# These are the values I expect
'Pairwise complete-linkage': 'complete',
'Pairwise average-linkage': 'average',
'Pairwise ward-linkage': 'ward',
# These are the values the GpUnit test give
'm': 'complete',
'a': 'average', # I think this is the default
}
input_row_centering = {
# These are the values I expect
'No': None,
'Subtract the mean from each row': 'Mean',
'Subtract the median from each row': 'Median',
# These are the values the GpUnit test give
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_row_normalize = {
# These are the values I expect
'No': False,
'Yes': True,
# These are the values the GpUnit test give
'False': False,
'True': True,
}
input_col_centering = {
# These are the values I expect
'No': None,
'Subtract the mean from each column': 'Mean',
'Subtract the median from each column': 'Median',
# These are the values the GpUnit test give
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_col_normalize = {
# These are the values I expect
'No': False,
'Yes': True,
# These are the values the GpUnit test give
'False': False,
'True': True,
}
def parse_inputs(args=sys.argv):
# inp = []
# inp = args
# Error handling:
arg_n = len(args)
if arg_n == 1:
sys.exit("Not enough parameters files were provided. This module needs a GCT file to work.")
elif arg_n == 2:
gct_name = args[1]
col_distance_metric = 'euclidean'
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric = euclidean (default value)")
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 3:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", input_col_distance_dict[col_distance_metric])
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 4:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 5:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = 'Pairwise average-linkage'
# clustering_method = 'Pairwise complete-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 6:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if clustering_method not in linkage_dic.keys():
exit("Clustering method chosen not supported. This should not have happened.")
if (linkage_dic[clustering_method] == 'ward') and (col_distance_metric != 'average'):
exit("When choosing 'Pairwise ward-linkage' the distance metric *must* be 'average' ")
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 7:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 8:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 9:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
# Row normalization
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
# Column normalization
col_normalization = input_col_normalize[col_normalization]
# if (col_normalization == 'False') or (col_normalization == 'F') \
# or (col_normalization == 'false') or (col_normalization == 'f'):
# col_normalization = False
# else:
# col_normalization = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 10:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
# Row normalization
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
# Column normalization
col_normalization = input_col_normalize[col_normalization]
# if (col_normalization == 'False') or (col_normalization == 'F') \
# or (col_normalization == 'false') or (col_normalization == 'f'):
# col_normalization = False
# else:
# col_normalization = True
# row_centering
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 11:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = args[10]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
# Row normalization
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
# Column normalization
col_normalization = input_col_normalize[col_normalization]
# if (col_normalization == 'False') or (col_normalization == 'F') \
# or (col_normalization == 'false') or (col_normalization == 'f'):
# col_normalization = False
# else:
# col_normalization = True
# row_centering
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
# col_centering
col_centering = input_col_centering[col_centering]
if (col_centering == 'None') or (col_centering == 'N') \
or (col_centering == 'none') or (col_centering == 'n'):
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering)
else:
sys.exit("Too many inputs. This module needs only a GCT file to work, "
"plus an optional input choosing between Pearson Correlation or Information Coefficient.")
print(args)
return gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
row_normalization, col_normalization, row_centering, col_centering
def plot_dendrogram(model, data, tree, axis, dist=mydist, clustering_method='average',
title='no_title.png', color_threshold=None, orientation='top', **kwargs):
# plt.clf()
# modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files
# Children of hierarchical clustering
children = model.children_
# Distances between each pair of children
# TODO: Fix this mydist
# distance = dendodist(children, euclidian_similarity)
# distance = dendodist(children, dist)
og_distances = better_dendodist(children, dist, tree, data, axis=axis, clustering_method=clustering_method)
# print(og_distances)
# og_distances = [abs(temp) for temp in og_distances]
# Turn similarity into non-negative value Scipy's dendrogram needs this
if dist in [custom_euclidean_sim, absolute_uncentered_pearson_corr, absolute_pearson_corr]:
# These similarities are already nonnegative [0,inf) or [0,1]
# og_distances = og_distances
pass
else: # all the correlation similarities [-1,-1]
og_distances = [temp + 1 for temp in og_distances]
# Now that all similarities are nonnegative, we turn them into a distance for plotting purposes
og_distances = [1 / temp for temp in og_distances]
# print(og_distances)
distance = np.cumsum(og_distances)
# distance = og_distances
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# norm_distances = []
# for value in distance:
# norm_distances.append(1/value)
# norm_distances = distance
list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
no_of_observations = [len(i) for i in list_of_children if i]
no_of_observations.append(len(no_of_observations) + 1)
# print(len(no_of_observations))
# print(children)
# print(list(tree.values()))
# print(norm_distances)
# print(distance)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# print(distance)
# print(np.cumsum(distance))
# The number of observations contained in each cluster level
# no_of_observations = np.arange(2, children.shape[0]+2)
# print(no_of_observations)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, np.cumsum(distance), no_of_observations]).astype(float)
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, norm_distances, no_of_observations]).astype(float)
# print(linkage_matrix)
# Plot the corresponding dendrogram
# print(scipy.cluster.hierarchy.cut_tree(linkage_matrix, n_clusters=5))
# print(color_threshold)
# find what the height at which to cut the dendrogram
if color_threshold is not None:
if color_threshold == 1:
color_threshold = 2
if color_threshold > (len(linkage_matrix) + 1):
color_threshold = (len(linkage_matrix) + 1)
# print('Finding the right cut')
color_threshold = linkage_matrix[-(color_threshold - 1)][2] - np.finfo(float).eps
# color_threshold = linkage_matrix[-(color_threshold - 1)][2] + 10*np.finfo(float).eps # Adding more wiggle room
# print(color_threshold)
R = dendrogram(linkage_matrix, color_threshold=color_threshold, orientation=orientation, **kwargs)
# R = dendrogram(linkage_matrix, **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_columns = R['ivl']
# # print(order_of_columns)
# plt.gca().get_yaxis().set_visible(False)
# plt.savefig(title, dpi=300)
# plt.show()
# n = len(linkage_matrix) + 1
# cache = dict()
# for k in range(len(linkage_matrix)):
# c1, c2 = int(linkage_matrix[k][0]), int(linkage_matrix[k][1])
# c1 = [c1] if c1 < n else cache.pop(c1)
# c2 = [c2] if c2 < n else cache.pop(c2)
# cache[n + k] = c1 + c2
# order_of_columns = cache[2 * len(linkage_matrix)]
# print(order_of_columns)
# print(linkage_matrix)
# print("---")
# print(no_of_observations)
# print("---")
# print(list_of_children)
# print("---")
#
# print(len(order_of_columns))
# print(color_threshold)
# clusters2idxs, idxs2clusters = get_cluster_classes(R)
#
# print(clusters2idxs)
# print(idxs2clusters)
# print("---")
# print(get_children(tree, leaves_are_self_children=False))
# print("---")
# print(get_children(tree, leaves_are_self_children=False, only_leaves_are_children=False))
return order_of_columns, linkage_matrix
def get_clusters(tree):
return
def get_cluster_classes(den, label='ivl'):
# from http://www.nxn.se/valent/extract-cluster-elements-by-color-in-python
clusters2idxs = defaultdict(list)
idxs2clusters = {}
# for c, pi in zip(den['color_list'], den['icoord']):
# for leg in pi[1:3]:
# i = (leg - 5.0) / 10.0
# if abs(i - int(i)) < 1e-5:
# clusters2idxs[c].append(int(i))
# idxs2clusters[int(i)] = c
# # print(c, i)
# cluster_classes = Clusters()
# for c, l in cluster_idxs.items():
# i_l = [den[label][i] for i in l]
# cluster_classes[c] = i_l
# Trying something new:
print(den.keys())
print(len(den['icoord']))
print(len(den['dcoord']))
print(len(den['ivl']))
print(len(den['leaves']))
print(den['leaves'])
print(len(den['color_list']))
print(den['color_list'])
return clusters2idxs, idxs2clusters
def order_leaves(model, data, tree, labels, axis=0, dist=mydist, reverse=False):
# Adapted from here: https://stackoverflow.com/questions/12572436/calculate-ordering-of-dendrogram-leaves
children = model.children_
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# if all(value == 0 for value in distance):
# distance = np.arange(len(distance))
# list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
# no_of_observations = [len(i) for i in list_of_children if i]
# no_of_observations.append(len(no_of_observations)+1)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
pseudo_linkage_matrix = np.column_stack([children]).astype(float)
n = len(pseudo_linkage_matrix) + 1
# This orders leaves by number of clusters
cache = dict()
for k in range(len(pseudo_linkage_matrix)):
c1, c2 = int(pseudo_linkage_matrix[k][0]), int(pseudo_linkage_matrix[k][1])
c1 = [c1] if c1 < n else cache.pop(c1)
c2 = [c2] if c2 < n else cache.pop(c2)
cache[n + k] = c1 + c2
numeric_order_of_leaves = cache[2 * len(pseudo_linkage_matrix)]
if reverse:
numeric_order_of_leaves = list(reversed(numeric_order_of_leaves))
return [labels[i] for i in numeric_order_of_leaves]
def two_plot_two_dendrogram(model, dist=mydist, **kwargs):
# modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files
# Children of hierarchical clustering
children = model.children_
# Distances between each pair of children
distance = dendodist(children, dist)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# The number of observations contained in each cluster level
no_of_observations = np.arange(2, children.shape[0] + 2)
# Create linkage matrix and then plot the dendrogram
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# Plot the corresponding dendrogram
R = dendrogram(linkage_matrix, color_threshold=0, orientation='left', **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_rows = R['ivl']
# print(order_of_columns)
plt.gca().get_xaxis().set_visible(False)
return list(reversed(order_of_rows))
def my_affinity_generic(M, metric):
return np.array([np.array([metric(a, b) for a in M]) for b in M])
def my_affinity_i(M):
return np.array([[information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_ai(M):
return np.array([[absolute_information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_p(M):
return np.array([[custom_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_s(M):
return np.array([[custom_spearman_dist(a, b) for a in M] for b in M])
def my_affinity_k(M):
return np.array([[custom_kendall_tau_dist(a, b) for a in M] for b in M])
def my_affinity_ap(M):
return np.array([[absolute_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_u(M):
return np.array([[uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_au(M):
return np.array([[absolute_uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_l1(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_l2(M):
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def my_affinity_m(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_c(M):
return np.array([[custom_cosine_dist(a, b) for a in M] for b in M])
def my_affinity_e(M):
# global dist_matrix
# dist_matrix = np.array([[mydist(a, b) for a in M]for b in M])
# return dist_matrix
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def count_diff(x):
count = 0
compare = x[0]
for i in x:
if i != compare:
count += 1
return count
def count_mislabels(labels, true_labels):
# 2017-08-17: I will make the assumption that clusters have only 2 values.
# clusters = np.unique(true_labels)
# mislabels = 0
# for curr_clust in clusters:
# print("for label", curr_clust)
# print("\t", labels[(true_labels == curr_clust)])
# compare_to = mode(labels[(true_labels == curr_clust)])
# print("\tcompare to:", compare_to, "mislables: ", np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to))
# mislabels += np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to)
set_a = labels[true_labels == 0]
set_b = labels[true_labels == 1]
if len(set_a) <= len(set_b):
shorter = set_a
longer = set_b
else:
shorter = set_b
longer = set_a
long_mode = mode(longer) # this what the label of the longer cluster should be.
short_mode = 1 if long_mode == 0 else 0 # Choose the other value for the label of the shorter cluster
# start with the longer vector:
# print("The long set is", longer, "it has", np.count_nonzero(longer != long_mode), 'mislabels.')
# print("The short set is", shorter, "it has", np.count_nonzero(shorter != short_mode), 'mislabels.')
# np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
return np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
def plot_heatmap(df, col_order, row_order, top=5, title_text='differentially expressed genes per phenotype'):
if not (len(col_order), len(list(df))):
exit("Number of columns in dataframe do not match the columns provided for ordering.")
if not (len(row_order), len(df)):
exit("Number of rows in dataframe do not match the columns provided for ordering.")
# print(list(df), col_order)
df = df[col_order]
df = df.reindex(row_order)
plt.clf()
sns.heatmap(df.iloc[np.r_[0:top, -top:0], :], cmap='viridis')
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title('Top {} {}'.format(top, title_text))
plt.ylabel('Genes')
plt.xlabel('Sample')
plt.savefig('heatmap.png', dpi=300, bbox_inches="tight")
def parse_data(gct_name, row_normalization=False, col_normalization=False, row_centering=None, col_centering=None):
# if validators.url(gct_name):
# urlfile, __ = urllib.request.urlretrieve(gct_name)
# else:
# urlfile = gct_name
# f = open(urlfile)
# f.readline()
# size = f.readline().strip('\n').split('\t')
try:
data_df = pd.read_csv(gct_name, sep='\t', skiprows=2)
except ValueError:
data_df = gct_name
# print(size)
# print(list(data_df))
# exit(data_df.shape)
if data_df.index.name is 'Name':
data_df['Name'] = data_df.index
else:
if 'Name' not in list(data_df):
data_df['Name'] = data_df.iloc[:, 0]
data_df.drop(data_df.columns[0], axis=1, inplace=True)
if 'Description' not in list(data_df):
data_df['Description'] = data_df['Name']
data_df.set_index(data_df['Name'], inplace=True)
og_full_gct = data_df.copy()
og_full_gct.drop(['Name'], axis=1, inplace=True)
data_df.drop(['Name', 'Description'], axis=1, inplace=True)
plot_labels = list(og_full_gct.drop(['Description'], axis=1, inplace=False))
data = data_df.as_matrix()
row_labels = data_df.index.values
og_data = data.copy()
# if row_centering is not None:
# if row_centering == 'Mean':
# row_means = np.mean(data, axis=1)
# row_means_col_vec = row_means.reshape((data.shape[0], 1))
# data = data - row_means_col_vec
# if row_centering == 'Median':
# row_medians = np.median(data, axis=1)
# row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
# data = data - row_medians_col_vec
#
# if row_normalization:
# row_norm = np.sum(data * data, axis=1)
# row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
# data = data / np.sqrt(row_norm_col_vec)
#
# if col_centering is not None:
# if col_centering == 'Mean':
# col_means = np.mean(data, axis=0)
# data = data - col_means
# if col_centering == 'Median':
# col_medians = np.median(data, axis=0)
# data = data - col_medians
#
# if col_normalization:
# col_norm = np.sum(data*data, axis=0)
# data = data/np.sqrt(col_norm)
data = normalize_dataframe(data_df, log_normalize=None,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization).as_matrix()
# print(data_df)
# print(data)
new_data_df = pd.DataFrame(data=data, index=data_df.index, columns=list(data_df))
# print(new_data_df)
# print(og_full_gct)
new_full_gct = new_data_df.copy()
new_full_gct.insert(0, column='Description', value=og_full_gct['Description'])
# print(new_full_gct)
# exit()
return og_data, data_df, data, new_data_df, plot_labels, row_labels, og_full_gct, new_full_gct
str2func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': 'l1',
'l2': 'l2',
'manhattan': 'manhattan',
'cosine': 'cosine',
'euclidean': 'euclidean',
}
str2affinity_func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': my_affinity_l1,
'l2': my_affinity_l2,
'manhattan': my_affinity_m,
'cosine': my_affinity_c,
'euclidean': my_affinity_e,
}
str2dist = {
'custom_euclidean': custom_euclidean_dist,
'uncentered_pearson': uncentered_pearson_dist,
'absolute_uncentered_pearson': absolute_uncentered_pearson_dist,
'information_coefficient': information_coefficient_dist,
'pearson': custom_pearson_dist,
'spearman': custom_spearman_dist,
'kendall': custom_kendall_tau_dist,
'absolute_pearson': absolute_pearson_dist,
'l1': custom_manhattan_dist,
'l2': custom_euclidean_dist,
'manhattan': custom_manhattan_dist,
'cosine': custom_cosine_dist,
'euclidean': custom_euclidean_dist,
}
str2similarity = {
'custom_euclidean': custom_euclidean_sim,
'uncentered_pearson': uncentered_pearson_corr,
'absolute_uncentered_pearson': absolute_uncentered_pearson_corr,
'information_coefficient': information_coefficient,
'pearson': custom_pearson_corr,
'spearman': custom_spearman_corr,
'kendall': custom_kendall_tau_corr,
'absolute_pearson': absolute_pearson_corr,
'l1': custom_manhattan_sim,
'l2': custom_euclidean_sim,
'manhattan': custom_manhattan_sim,
'cosine': custom_cosine_sim,
# 'euclidean': pairwise.paired_euclidean_distances,
'euclidean': custom_euclidean_sim,
# 'euclidean': custom_euclidean_dist,
}
linkage_dic = {
'Pairwise average-linkage': 'average',
'Pairwise complete-linkage': 'complete',
'Pairwise ward-linkage': 'ward',
'average': 'average',
'complete': 'complete',
'ward': 'ward',
}
def make_tree(model, data=None):
"""
Modified from:
https://stackoverflow.com/questions/27386641/how-to-traverse-a-tree-from-sklearn-agglomerativeclustering
import numpy as np
from sklearn.cluster import AgglomerativeClustering
import itertools
X = np.concatenate([np.random.randn(3, 10), np.random.randn(2, 10) + 100])
model = AgglomerativeClustering(linkage="average", affinity="cosine")
model.fit(X)
ii = itertools.count(X.shape[0])
[{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]
---
You can also do dict(enumerate(model.children_, model.n_leaves_))
which will give you a dictionary where the each key is the ID of a node
and the value is the pair of IDs of its children. – user76284
:param model:
:return: a dictionary where the each key is the ID of a node and the value is the pair of IDs of its children.
"""
# ii = itertools.count(data.shape[0]) # Setting the counter at the number of leaves.
# tree = [{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]
# print(tree)
# return tree
return dict(enumerate(model.children_, model.n_leaves_))
# return dict(enumerate(model.children_, 1))
def make_cdt(data, order_of_columns, order_of_rows, name='test.cdt', atr_companion=True, gtr_companion=False):
# TODO: if order_of_columns == None, then do arange(len(list(data)))
# TODO: if order_of_rows == None, then do arange(len(list(data)))
# exit(data.to_csv())
data.index.name = "ID"
data.rename(columns={'Description': 'Name'}, inplace=True)
temp = np.ones(len(data))
data.insert(loc=1, column='GWEIGHT', value=temp) # adding an extra column
# These three lines add a row
data.loc['EWEIGHT'] = list(np.ones(len(list(data))))
newIndex = ['EWEIGHT'] + [ind for ind in data.index if ind != 'EWEIGHT']
data = data.reindex(index=newIndex)
if atr_companion:
new_AID = ['', '']
for element in range(len(order_of_columns)):
temp = 'ARRY' + str(element) + 'X'
new_AID.append(temp)
data.loc['AID'] = new_AID
newIndex = ['AID'] + [ind for ind in data.index if ind != 'AID']
data = data.reindex(index=newIndex)
data = data[['Name', 'GWEIGHT'] + order_of_columns]
if gtr_companion:
new_GID = ['']
if atr_companion:
new_GID = ['AID', 'EWEIGHT'] # This is to make sure we fit the CDT format
# for element in np.sort(np.unique(GID)):
# if 'NODE' in element:
# # print(element, 'GTR delete')
# pass
# else:
# new_GID.append(element)
for element in range(len(order_of_rows)):
temp = 'GENE' + str(element) + 'X'
new_GID.append(temp)
data.insert(loc=0, column='GID', value=new_GID) # adding an extra column
data.insert(loc=0, column=data.index.name, value=data.index) # Making the index a column
# reorder to match dendogram
temp = ['AID', 'EWEIGHT'] + order_of_rows
# data = data.loc[temp]
# print(data['GID'])
data = data.reindex(temp)
# print(data['GID'])
# print(list(data.index))
# print(data['GID'])
# print(data['Name'])
# Making the 'GID' the index -- for printing purposes
data.index = data['GID']
data.index.name = 'GID'
data.drop(['GID'], axis=1, inplace=True)
# print(list(data.index))
# The first three lines need to be written separately due to a quirk in the CDT file format:
# print(data.to_csv(sep='\t', index=True, header=True))
f = open(name, 'w')
f.write(data.to_csv(sep='\t', index=True, header=True))
# f.write(data.to_csv(sep='\t', index=True, header=True))
f.close()
# pd.options.display.float_format = '{:3.3f}'.format
data = data.round(2)
# print(data.to_csv())
# exit()
# exit(data.to_csv(sep=' ', index=True, header=True, float_format='2',))
return
def make_atr(col_tree_dic, data, dist, clustering_method='average', file_name='test.atr'):
max_val = len(col_tree_dic)
# AID = []
# compute distances
distance_dic = {}
for node, children in col_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=col_tree_dic, data=data, axis=1,
distance=dist, clustering_method=clustering_method)
# print(dist, children, val)
# print("Value is", val)
distance_dic[node] = val
# if dist == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# low_norm = min(distance_dic.values())
# high_norm = max(distance_dic.values())
# for key in distance_dic.keys():
# # distance -= norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = (distance_dic[key]-low_norm)/high_norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = ((1/distance_dic[key])-high_norm)/low_norm
# print(distance_dic[key])
f = open(file_name, 'w')
for node, children in col_tree_dic.items():
elements = [translate_tree(node, max_val, 'atr'), translate_tree(children[0], max_val, 'atr'),
translate_tree(children[1], max_val, 'atr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# print('\t', '\t'.join(elements))
# AID.append(translate_tree(children[0], max_val, 'atr'))
# AID.append(translate_tree(children[1], max_val, 'atr'))
f.write('\t'.join(elements) + '\n')
# print('\t'.join(elements) + '\n')
f.close()
return
def make_gtr(row_tree_dic, data, dist, clustering_method='average', file_name='test.gtr'):
max_val = len(row_tree_dic)
# GID = []
# compute distances
distance_dic = {}
for node, children in row_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=row_tree_dic, data=data, axis=0,
distance=dist, clustering_method=clustering_method)
distance_dic[node] = val
f = open(file_name, 'w')
for node, children in row_tree_dic.items():
elements = [translate_tree(node, max_val, 'gtr'), translate_tree(children[0], max_val, 'gtr'),
translate_tree(children[1], max_val, 'gtr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# GID.append(translate_tree(children[0], max_val, 'gtr'))
# GID.append(translate_tree(children[1], max_val, 'gtr'))
f.write('\t'.join(elements) + '\n')
# val -= 1
f.close()
return
def translate_tree(what, length, g_or_a):
if 'a' in g_or_a:
if what <= length:
translation = 'ARRY' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
elif 'g' in g_or_a:
if what <= length:
translation = 'GENE' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
else:
translation = []
print('This function does not support g_or_a=', g_or_a)
return translation
# def get_children_recursively(k, model, node_dict, leaf_count, n_samples, data, verbose=False, left=None, right=None):
# # print(k)
# i, j = model.children_[k]
#
# if k in node_dict:
# return node_dict[k]['children']
#
# if i < leaf_count:
# # print("i if")
# left = [i]
# else:
# # print("i else")
# # read the AgglomerativeClustering doc. to see why I select i-n_samples
# left, node_dict = get_children_recursively(i - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if j < leaf_count:
# # print("j if")
# right = [j]
# else:
# # print("j else")
# right, node_dict = get_children_recursively(j - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if verbose:
# print(k, i, j, left, right)
# temp = map(lambda ii: data[ii], left)
# left_pos = np.mean(list(temp), axis=0)
# temp = map(lambda ii: data[ii], right)
# right_pos = np.mean(list(temp), axis=0)
#
# # this assumes that agg_cluster used euclidean distances
# dist = metrics.pairwise_distances([left_pos, right_pos], metric='euclidean')[0, 1]
#
# all_children = [x for y in [left, right] for x in y]
# pos = np.mean(list(map(lambda ii: data[ii], all_children)), axis=0)
#
# # store the results to speed up any additional or recursive evaluations
# node_dict[k] = {'top_child': [i, j], 'children': all_children, 'pos': pos, 'dist': dist,
# 'node_i': k + n_samples}
# return all_children, node_dict
# def recursive_atr
def get_children(tree, leaves_are_self_children=False):
# this is a recursive function
expanded_tree = {}
for node in range(max(tree.keys())):
if node <= len(tree):
if leaves_are_self_children:
expanded_tree[node] = [node]
else:
expanded_tree[node] = []
else:
# expanded_tree[node] = list_children_single_node(node, tree)
expanded_tree[node] = list_children_single_node(node, tree, leaves_are_self_children)
return expanded_tree
def list_children_single_node(node, tree, leaves_are_self_children=False, only_leaves_are_children=True):
# children = []
if node <= len(tree):
if leaves_are_self_children:
children = [node]
else:
children = []
else:
children = list(tree[node])
# Check each child, and add their children to the list
for child in children:
if child <= len(tree):
pass
else:
children += list_children_single_node(child, tree, only_leaves_are_children=True)
if only_leaves_are_children:
# print(sorted(np.unique(i for i in children if i <= len(tree))))
# print()
return [i for i in sorted(np.unique(children)) if i <= len(tree)]
else:
return sorted(np.unique(children))
def centroid_distances(node_a, node_b, tree, data, axis=0, distance=mydist, clustering_method='average'):
if axis == 0:
pass
elif axis == 1:
data = np.transpose(data)
else:
exit("Variable 'data' does not have that many axises (╯°□°)╯︵ ┻━┻")
children_of_a = list_children_single_node(node_a, tree=tree, leaves_are_self_children=True)
children_of_b = list_children_single_node(node_b, tree=tree, leaves_are_self_children=True)
# if distance == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# distance = custom_euclidean_dist
distances_list = []
if clustering_method == 'average':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.average(distances_list)
elif clustering_method == 'complete':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.min(distances_list)
else:
exit("Ony 'average' and 'complete' clustering methods are accepted at the moment (>_<)")
def euclidian_similarity(x, y):
dist = mydist(x, y)
# return 1/(1+dist)
return 1 / (np.exp(dist))
def better_dendodist(children, distance, tree, data, axis, clustering_method='average'):
distances_list = []
for pair in children:
distances_list.append(centroid_distances(pair[0], pair[1], tree, data, axis, distance=distance,
clustering_method=clustering_method))
# print(distance, pair, distances_list[-1])
return distances_list
def HierarchicalClustering(pwd: "The current directory",
gct_name: "Gene expression data filename (.gct file) or Pandas DataFrame "
"where rows are genes and columns are samples",
col_distance_metric: "The function to be used when comparing the distance/similarity of "
"the columns in the gct_name dataset",
row_distance_metric: "The function to be used when comparing the distance/similarity of "
"the rows in the gct_name dataset",
clustering_method: "Type of linkage to use" = 'average',
output_base_name: "Base name for output file" = 'HC_output',
row_normalization: "Whether to normalize each row (gene) in the data" = False,
col_normalization: "Whether to normalize each column (sample) in the data" = False,
row_centering: "How to center each row (gene) in the data" = 'Mean',
col_centering: "How to center each column (sample) in the data" = 'Mean',
output_distances: "Whether or not output the pair-wise distance matrix. "
"If true, the distance between each column will be called, "
"which can be very computationally intensive. "
"If unsure, leave as False." = False,
custom_plot: "Plot the dendrograms by Genes, Samples, or Both" = 'Both',
clusters_to_highlight: "How many clusters to highlight in the dendrogram" = 2,
show: "Whether to show the plot at the end" = False):
"""
This function performs hierarchical clustering to group samples (columns) with similar phenotypes
and/or genes (rows) with similar expression profiles.
:param pwd: The current directory
:param gct_name: Gene expression data filename (.gct file) or Pandas DataFrame where rows are genes and
columns are samples
:param col_distance_metric: The function to be used when comparing the distance/similarity of
the columns in the gct_name dataset
:param row_distance_metric: The function to be used when comparing the distance/similarity of
the rows in the gct_name dataset
:param clustering_method: Type of linkage to use
:param output_base_name: Base name for output file
:param row_normalization: Whether to normalize each row (gene) in the data
:param col_normalization: Whether to normalize each column (sample) in the data
:param row_centering: How to center each row (gene) in the data
:param col_centering: How to center each column (sample) in the data
:param output_distances: Whether or not output the pair-wise distance matrix.
If true, the distance between each column will be called,
which can be very computationally intensive.
If unsure, leave as False
:param custom_plot: Plot the dendrograms by Genes, Samples, or Both
:param clusters_to_highlight: How many clusters to highlight in the dendrogram
:param show: Whether to show the plot at the end
:return:
"""
# gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
# row_normalization, col_normalization, row_centering, col_centering = parse_inputs(sys.argv)
if col_distance_metric == "No_column_clustering":
custom_plot = 'Genes'
if row_distance_metric == "No_row_clustering":
custom_plot = 'Samples'
og_data, og_data_df, data, data_df, col_labels, row_labels, og_full_gct, new_full_gct = \
parse_data(gct_name, row_normalization, col_normalization, row_centering, col_centering)
order_of_columns = list(data_df)
order_of_rows = list(data_df.index)
data_transpose = np.transpose(data)
# print(data)
# print(data_df)
atr_companion = False
col_model = None
col_tree = None
gtr_companion = False
row_model = None
row_tree = None
AID = None
GID = None
if col_distance_metric != 'No_column_clustering':
atr_companion = True
col_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[col_distance_metric])
col_model.fit(data_transpose)
col_tree = make_tree(col_model)
order_of_columns = order_leaves(col_model, tree=col_tree, data=data_transpose,
dist=str2similarity[col_distance_metric], labels=col_labels, reverse=True)
path_to_atr = output_base_name + '.atr'
make_atr(col_tree, file_name=path_to_atr, data=data,
dist=str2similarity[col_distance_metric], clustering_method=linkage_dic[clustering_method])
if row_distance_metric != 'No_row_clustering':
gtr_companion = True
row_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[row_distance_metric])
# y_col = row_model.fit_predict(np.transpose(data))
# print(y_col)
row_model.fit(data)
row_tree = make_tree(row_model)
order_of_rows = order_leaves(row_model, tree=row_tree, data=data,
dist=str2similarity[row_distance_metric], labels=row_labels)
path_to_gtr = output_base_name + '.gtr'
make_gtr(row_tree, data=data, file_name=output_base_name + '.gtr', dist=str2similarity[row_distance_metric])
if output_distances:
# TODO: check which col or row was selected, or both
row_distance_matrix = str2affinity_func[row_distance_metric](data)
# col_distance_matrix = str2affinity_func[col_distance_metric](np.transpose(data))
dist_file = open(output_base_name + '_pairwise_distances.csv', 'w')
dist_file.write('labels,')
dist_file.write(",".join(col_model.labels_.astype(str)) + "\n")
dist_file.write('samples,')
dist_file.write(",".join(list(data_df)) + "\n")
i = 0
for row in row_distance_matrix:
dist_file.write('distances row=' + str(i) + "," + ",".join(row.astype(str)) + "\n")
i += 1
path_to_cdt = output_base_name + '.cdt'
make_cdt(data=new_full_gct, name=path_to_cdt, atr_companion=atr_companion,
gtr_companion=gtr_companion,
order_of_columns=order_of_columns, order_of_rows=order_of_rows)
if custom_plot == 'Samples':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[0]) # Doing dendrogram first
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
# print(col_order)
named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(col_model.labels_)
ax1 = plt.subplot(gs[1])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The ATR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_atr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
# plt.show()
pass
# col_order = [int(i) for i in col_order]
# print(col_order)
# named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(idxs2clusters)
cls_list = col_model.labels_
# for i in range(len(col_order)):
# cls_list.append(idxs2clusters[i])
# print(cls_list)
# order_by = [col_order.index(i) for i in range(len(col_order))]
# list2intlist(cls_list, custom_order=order_by)
# in_list = np.array(cls_list)
# print(cls_list)
# print(np.array(list2intlist(cls_list, custom_order=order_by)))
list2cls(np.array(list2intlist(cls_list)), name_of_out=output_base_name+'.cls', sep=' ')
if custom_plot == 'Genes':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[1]) # Doing dendrogram first
ax0.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# row_order = [int(i) for i in row_order]
# named_row_order = [row_labels[i] for i in row_order]
ax1 = plt.subplot(gs[0])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df.iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
if custom_plot == 'Both':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
# Doing TOP dendrogram first
ax0 = plt.subplot(gs[0])
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
named_col_order = [col_labels[i] for i in col_order]
# Doing RIGHT dendrogram
ax3 = plt.subplot(gs[3])
ax3.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# Plotting the heatmap now
ax1 = plt.subplot(gs[2])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order].iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
return col_model, row_model
def hc_samples(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the columns in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
"""
Perform hierarchical clustering to group samples with similar phenotypes.
:param input_gene_expression: str; gene expression data filename (.gct file)
where rows are genes and columns are samples
:param clustering_type: str; single or consensus
:param distance_metric: str; the function to be used when comparing the distance/similarity of the columns
in the input_gene_expression dataset
:param file_basename: str; the name to use when naming output files
:param clusters_to_highlight: int; how many clusters to highlight in the dendrogram
:return: object; Sklearn's AgglomerativeClustering fitted model
"""
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = distance_metric
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Samples'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return col_model
def hc_genes(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the rows in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
"""
Perform hierarchical clustering to group genes with similar expression profile.
:param input_gene_expression: str; gene expression data filename (.gct file)
where rows are genes and columns are samples
:param clustering_type: str; single or consensus
:param distance_metric: str; the function to be used when comparing the distance/similarity of the rows
in the input_gene_expression dataset
:param file_basename: str; the name to use when naming output files
:param clusters_to_highlight: int; how many clusters to highlight in the dendrogram
:return: object; Sklearn's AgglomerativeClustering fitted model
"""
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = 'No_column_clustering'
output_distances = False
row_distance_metric = distance_metric
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Genes'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return row_model
def normalize_dataframe(df, log_normalize=None,
row_centering='Mean', row_normalization=True,
col_centering='Mean', col_normalization=True):
"""
This function Takes in a DataFrame and some flags and normalizes the data it contains. Order of operations is:
1- Log-normalize
2- Row (gene) center
3- Row (gene) normalize
4- Column (sample) center
5- Column (sample) normalize
:param df: (Pandas DataFrame) A DataFrame to be normalized
:param log_normalize:(float, None) Whether to log-normalize the data. Value is the base of the logarithm to use
:param row_centering: Whether or not to subtract the mean or median from every element of each row
:param row_normalization: Whether or not to set the maximum value of a row to 1 and the minimum value to 0
:param col_centering: Whether or not to subtract the mean or median from every element of each column
:param col_normalization: Whether or not to set the maximum value of a column to 1 and the minimum value to 0
:return:
"""
if (log_normalize is None) \
and (row_centering == 'No') and (col_centering == 'No') \
and (row_normalization is False) and (col_normalization is False):
print("No normalization has been requested ಠ_ಠ¯")
return df
data = df.as_matrix()
# Log Normalizing
if log_normalize is not None:
print("I'm sorry, log-normalization is not supported at the moment (u_u)")
# Row Centering
if row_centering != 'No':
if row_centering == 'Mean':
row_means = np.mean(data, axis=1)
row_means_col_vec = row_means.reshape((data.shape[0], 1))
data = data - row_means_col_vec
elif row_centering == 'Median':
row_medians = np.median(data, axis=1)
row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
data = data - row_medians_col_vec
else:
print("row_centering has an unexpected value:", row_centering)
# Row Normalizing
if row_normalization:
row_norm = np.sum(data * data, axis=1)
row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
data = data / np.sqrt(row_norm_col_vec)
# Column Centering
if col_centering != 'No':
if col_centering == 'Mean':
col_means = np.mean(data, axis=0)
data = data - col_means
elif col_centering == 'Median':
col_medians = np.median(data, axis=0)
data = data - col_medians
else:
print("col_centering has an unexpected value: ", col_centering)
# Column Normalizing
if col_normalization:
col_norm = np.sum(data * data, axis=0)
data = data / np.sqrt(col_norm)
normalized_df = pd.DataFrame(data=data, index=df.index, columns=list(df))
return normalized_df
def display_heatmap(data,
name='heatmap',
log_normalize=None,
row_centering: "How to center each row (gene) in the data" = 'No',
row_normalization: "Whether to normalize each row (gene) in the data" = True,
col_centering: "How to center each column (sample) in the data" = 'No',
col_normalization: "Whether to normalize each column (sample) in the data" = False,
mostrar=False):
if isinstance(data, pd.DataFrame):
data_to_plot = data.copy()
elif os.path.isfile(data):
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
else:
try:
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
except urllib.error.HTTPError:
print("I don't know what the variable 'data' contains.")
print('data=')
print(data)
exit("If this is a url it may not be accessible.\n"
"(╯°□°)╯︵ ┻━┻")
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
data_to_plot = normalize_dataframe(data_to_plot, log_normalize=log_normalize,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization)
plt.clf()
# # figure reshape from:
# # https://stackoverflow.com/questions/35127920/overlapping-yticklabels-is-it-possible-to-control-cell-size-of-heatmap-in-seabo
# # and from:
# # https://matplotlib.org/users/customizing.html
# get the tick label font size
fontsize_pt = plt.rcParams['ytick.labelsize']
dpi = 72.27
# compute the matrix height in points and inches
matrix_height_pt = fontsize_pt * data_to_plot.as_matrix().shape[0]
matrix_height_in = (matrix_height_pt / dpi) * 1.2
# compute the required figure height
top_margin = 0.01 # in percentage of the figure height
bottom_margin = 0.01 # in percentage of the figure height
figure_height = matrix_height_in / (1 - top_margin - bottom_margin)
# build the figure instance with the desired height
fig, ax = plt.subplots(
figsize=(6, figure_height),
gridspec_kw=dict(top=1 - top_margin, bottom=bottom_margin))
sns.heatmap(data_to_plot, cmap='bwr', yticklabels=True, square=True,
cbar_kws={'use_gridspec': False,
'location': "right",
'shrink': 0.5,
'label': ''}
)
if not name.endswith('.pdf'):
name = name + '.pdf'
plt.savefig(name, dpi=dpi, bbox_inches='tight')
# plt.savefig(name, dpi=dpi)
print(name, "has been created!")
if mostrar:
# print(data_to_plot.head())
plt.show()
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + name + '" target="_blank">PDF of the heatmap</a>'))
return
| 41.66015 | 132 | 0.623869 | import sys
import numpy as np
from statistics import mode
from sklearn.metrics import pairwise
from sklearn import metrics
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import itertools
from sklearn.cluster import AgglomerativeClustering
import scipy
import itertools
from collections import defaultdict
from .elemental import *
from .information import *
import os
import sys
tasklib_path = os.path.dirname(os.path.realpath(sys.argv[0]))
from IPython.core.display import display, HTML
import scipy
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import gridspec
from sklearn.cluster import AgglomerativeClustering
sns.set_style("white")
import matplotlib as mpl
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['axes.labelsize'] = 20
SIGNIFICANT_DIGITS = 7
input_col_distance_dict = {
"No column clustering": "No_column_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_column_clustering": "No_column_clustering",
"0": "No_column_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
"no_col": "No_column_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_row_distance_dict = {
"No row clustering": "No_row_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_row_clustering": "No_row_clustering",
"0": "No_row_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
"no_row": "No_row_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_clustering_method = {
'Pairwise complete-linkage': 'complete',
'Pairwise average-linkage': 'average',
'Pairwise ward-linkage': 'ward',
'm': 'complete',
'a': 'average',
}
input_row_centering = {
'No': None,
'Subtract the mean from each row': 'Mean',
'Subtract the median from each row': 'Median',
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_row_normalize = {
'No': False,
'Yes': True,
'False': False,
'True': True,
}
input_col_centering = {
'No': None,
'Subtract the mean from each column': 'Mean',
'Subtract the median from each column': 'Median',
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_col_normalize = {
'No': False,
'Yes': True,
'False': False,
'True': True,
}
def parse_inputs(args=sys.argv):
arg_n = len(args)
if arg_n == 1:
sys.exit("Not enough parameters files were provided. This module needs a GCT file to work.")
elif arg_n == 2:
gct_name = args[1]
col_distance_metric = 'euclidean'
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric = euclidean (default value)")
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 3:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", input_col_distance_dict[col_distance_metric])
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 4:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 5:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 6:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if clustering_method not in linkage_dic.keys():
exit("Clustering method chosen not supported. This should not have happened.")
if (linkage_dic[clustering_method] == 'ward') and (col_distance_metric != 'average'):
exit("When choosing 'Pairwise ward-linkage' the distance metric *must* be 'average' ")
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 7:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 8:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 9:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
col_normalization = input_col_normalize[col_normalization]
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 10:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
col_normalization = input_col_normalize[col_normalization]
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 11:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = args[10]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
col_normalization = input_col_normalize[col_normalization]
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
col_centering = input_col_centering[col_centering]
if (col_centering == 'None') or (col_centering == 'N') \
or (col_centering == 'none') or (col_centering == 'n'):
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering)
else:
sys.exit("Too many inputs. This module needs only a GCT file to work, "
"plus an optional input choosing between Pearson Correlation or Information Coefficient.")
print(args)
return gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
row_normalization, col_normalization, row_centering, col_centering
def plot_dendrogram(model, data, tree, axis, dist=mydist, clustering_method='average',
title='no_title.png', color_threshold=None, orientation='top', **kwargs):
children = model.children_
og_distances = better_dendodist(children, dist, tree, data, axis=axis, clustering_method=clustering_method)
if dist in [custom_euclidean_sim, absolute_uncentered_pearson_corr, absolute_pearson_corr]:
# These similarities are already nonnegative [0,inf) or [0,1]
# og_distances = og_distances
pass
else: # all the correlation similarities [-1,-1]
og_distances = [temp + 1 for temp in og_distances]
# Now that all similarities are nonnegative, we turn them into a distance for plotting purposes
og_distances = [1 / temp for temp in og_distances]
# print(og_distances)
distance = np.cumsum(og_distances)
# distance = og_distances
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# norm_distances = []
# for value in distance:
# norm_distances.append(1/value)
# norm_distances = distance
list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
no_of_observations = [len(i) for i in list_of_children if i]
no_of_observations.append(len(no_of_observations) + 1)
# print(len(no_of_observations))
# print(children)
# print(list(tree.values()))
# print(norm_distances)
# print(distance)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# print(distance)
# print(np.cumsum(distance))
# The number of observations contained in each cluster level
# no_of_observations = np.arange(2, children.shape[0]+2)
# print(no_of_observations)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, np.cumsum(distance), no_of_observations]).astype(float)
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, norm_distances, no_of_observations]).astype(float)
# print(linkage_matrix)
# Plot the corresponding dendrogram
# print(scipy.cluster.hierarchy.cut_tree(linkage_matrix, n_clusters=5))
# print(color_threshold)
# find what the height at which to cut the dendrogram
if color_threshold is not None:
if color_threshold == 1:
color_threshold = 2
if color_threshold > (len(linkage_matrix) + 1):
color_threshold = (len(linkage_matrix) + 1)
# print('Finding the right cut')
color_threshold = linkage_matrix[-(color_threshold - 1)][2] - np.finfo(float).eps
# color_threshold = linkage_matrix[-(color_threshold - 1)][2] + 10*np.finfo(float).eps # Adding more wiggle room
# print(color_threshold)
R = dendrogram(linkage_matrix, color_threshold=color_threshold, orientation=orientation, **kwargs)
# R = dendrogram(linkage_matrix, **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_columns = R['ivl']
# # print(order_of_columns)
# plt.gca().get_yaxis().set_visible(False)
# plt.savefig(title, dpi=300)
# plt.show()
# n = len(linkage_matrix) + 1
# cache = dict()
# for k in range(len(linkage_matrix)):
# c1, c2 = int(linkage_matrix[k][0]), int(linkage_matrix[k][1])
# c1 = [c1] if c1 < n else cache.pop(c1)
# c2 = [c2] if c2 < n else cache.pop(c2)
# cache[n + k] = c1 + c2
# order_of_columns = cache[2 * len(linkage_matrix)]
# print(order_of_columns)
# print(linkage_matrix)
# print("---")
# print(no_of_observations)
# print("---")
# print(list_of_children)
# print("---")
#
# print(len(order_of_columns))
# print(color_threshold)
# clusters2idxs, idxs2clusters = get_cluster_classes(R)
#
# print(clusters2idxs)
# print(idxs2clusters)
# print("---")
# print(get_children(tree, leaves_are_self_children=False))
# print("---")
# print(get_children(tree, leaves_are_self_children=False, only_leaves_are_children=False))
return order_of_columns, linkage_matrix
def get_clusters(tree):
return
def get_cluster_classes(den, label='ivl'):
# from http://www.nxn.se/valent/extract-cluster-elements-by-color-in-python
clusters2idxs = defaultdict(list)
idxs2clusters = {}
# for c, pi in zip(den['color_list'], den['icoord']):
# for leg in pi[1:3]:
# i = (leg - 5.0) / 10.0
# if abs(i - int(i)) < 1e-5:
# clusters2idxs[c].append(int(i))
# idxs2clusters[int(i)] = c
# # print(c, i)
# cluster_classes = Clusters()
# for c, l in cluster_idxs.items():
# i_l = [den[label][i] for i in l]
# cluster_classes[c] = i_l
# Trying something new:
print(den.keys())
print(len(den['icoord']))
print(len(den['dcoord']))
print(len(den['ivl']))
print(len(den['leaves']))
print(den['leaves'])
print(len(den['color_list']))
print(den['color_list'])
return clusters2idxs, idxs2clusters
def order_leaves(model, data, tree, labels, axis=0, dist=mydist, reverse=False):
# Adapted from here: https://stackoverflow.com/questions/12572436/calculate-ordering-of-dendrogram-leaves
children = model.children_
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# if all(value == 0 for value in distance):
# distance = np.arange(len(distance))
# list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
# no_of_observations = [len(i) for i in list_of_children if i]
# no_of_observations.append(len(no_of_observations)+1)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
pseudo_linkage_matrix = np.column_stack([children]).astype(float)
n = len(pseudo_linkage_matrix) + 1
# This orders leaves by number of clusters
cache = dict()
for k in range(len(pseudo_linkage_matrix)):
c1, c2 = int(pseudo_linkage_matrix[k][0]), int(pseudo_linkage_matrix[k][1])
c1 = [c1] if c1 < n else cache.pop(c1)
c2 = [c2] if c2 < n else cache.pop(c2)
cache[n + k] = c1 + c2
numeric_order_of_leaves = cache[2 * len(pseudo_linkage_matrix)]
if reverse:
numeric_order_of_leaves = list(reversed(numeric_order_of_leaves))
return [labels[i] for i in numeric_order_of_leaves]
def two_plot_two_dendrogram(model, dist=mydist, **kwargs):
# modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files
# Children of hierarchical clustering
children = model.children_
# Distances between each pair of children
distance = dendodist(children, dist)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# The number of observations contained in each cluster level
no_of_observations = np.arange(2, children.shape[0] + 2)
# Create linkage matrix and then plot the dendrogram
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# Plot the corresponding dendrogram
R = dendrogram(linkage_matrix, color_threshold=0, orientation='left', **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_rows = R['ivl']
# print(order_of_columns)
plt.gca().get_xaxis().set_visible(False)
return list(reversed(order_of_rows))
def my_affinity_generic(M, metric):
return np.array([np.array([metric(a, b) for a in M]) for b in M])
def my_affinity_i(M):
return np.array([[information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_ai(M):
return np.array([[absolute_information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_p(M):
return np.array([[custom_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_s(M):
return np.array([[custom_spearman_dist(a, b) for a in M] for b in M])
def my_affinity_k(M):
return np.array([[custom_kendall_tau_dist(a, b) for a in M] for b in M])
def my_affinity_ap(M):
return np.array([[absolute_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_u(M):
return np.array([[uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_au(M):
return np.array([[absolute_uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_l1(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_l2(M):
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def my_affinity_m(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_c(M):
return np.array([[custom_cosine_dist(a, b) for a in M] for b in M])
def my_affinity_e(M):
# global dist_matrix
# dist_matrix = np.array([[mydist(a, b) for a in M]for b in M])
# return dist_matrix
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def count_diff(x):
count = 0
compare = x[0]
for i in x:
if i != compare:
count += 1
return count
def count_mislabels(labels, true_labels):
# 2017-08-17: I will make the assumption that clusters have only 2 values.
# clusters = np.unique(true_labels)
# mislabels = 0
# for curr_clust in clusters:
# print("for label", curr_clust)
# print("\t", labels[(true_labels == curr_clust)])
# compare_to = mode(labels[(true_labels == curr_clust)])
# print("\tcompare to:", compare_to, "mislables: ", np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to))
# mislabels += np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to)
set_a = labels[true_labels == 0]
set_b = labels[true_labels == 1]
if len(set_a) <= len(set_b):
shorter = set_a
longer = set_b
else:
shorter = set_b
longer = set_a
long_mode = mode(longer) # this what the label of the longer cluster should be.
short_mode = 1 if long_mode == 0 else 0 # Choose the other value for the label of the shorter cluster
# start with the longer vector:
# print("The long set is", longer, "it has", np.count_nonzero(longer != long_mode), 'mislabels.')
# print("The short set is", shorter, "it has", np.count_nonzero(shorter != short_mode), 'mislabels.')
# np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
return np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
def plot_heatmap(df, col_order, row_order, top=5, title_text='differentially expressed genes per phenotype'):
if not (len(col_order), len(list(df))):
exit("Number of columns in dataframe do not match the columns provided for ordering.")
if not (len(row_order), len(df)):
exit("Number of rows in dataframe do not match the columns provided for ordering.")
# print(list(df), col_order)
df = df[col_order]
df = df.reindex(row_order)
plt.clf()
sns.heatmap(df.iloc[np.r_[0:top, -top:0], :], cmap='viridis')
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title('Top {} {}'.format(top, title_text))
plt.ylabel('Genes')
plt.xlabel('Sample')
plt.savefig('heatmap.png', dpi=300, bbox_inches="tight")
def parse_data(gct_name, row_normalization=False, col_normalization=False, row_centering=None, col_centering=None):
# if validators.url(gct_name):
# urlfile, __ = urllib.request.urlretrieve(gct_name)
# else:
# urlfile = gct_name
# f = open(urlfile)
# f.readline()
# size = f.readline().strip('\n').split('\t')
try:
data_df = pd.read_csv(gct_name, sep='\t', skiprows=2)
except ValueError:
data_df = gct_name
# print(size)
# print(list(data_df))
# exit(data_df.shape)
if data_df.index.name is 'Name':
data_df['Name'] = data_df.index
else:
if 'Name' not in list(data_df):
data_df['Name'] = data_df.iloc[:, 0]
data_df.drop(data_df.columns[0], axis=1, inplace=True)
if 'Description' not in list(data_df):
data_df['Description'] = data_df['Name']
data_df.set_index(data_df['Name'], inplace=True)
og_full_gct = data_df.copy()
og_full_gct.drop(['Name'], axis=1, inplace=True)
data_df.drop(['Name', 'Description'], axis=1, inplace=True)
plot_labels = list(og_full_gct.drop(['Description'], axis=1, inplace=False))
data = data_df.as_matrix()
row_labels = data_df.index.values
og_data = data.copy()
# if row_centering is not None:
# if row_centering == 'Mean':
# row_means = np.mean(data, axis=1)
# row_means_col_vec = row_means.reshape((data.shape[0], 1))
# data = data - row_means_col_vec
# if row_centering == 'Median':
# row_medians = np.median(data, axis=1)
# row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
# data = data - row_medians_col_vec
#
# if row_normalization:
# row_norm = np.sum(data * data, axis=1)
# row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
# data = data / np.sqrt(row_norm_col_vec)
#
# if col_centering is not None:
# if col_centering == 'Mean':
# col_means = np.mean(data, axis=0)
# data = data - col_means
# if col_centering == 'Median':
# col_medians = np.median(data, axis=0)
# data = data - col_medians
#
# if col_normalization:
# col_norm = np.sum(data*data, axis=0)
# data = data/np.sqrt(col_norm)
data = normalize_dataframe(data_df, log_normalize=None,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization).as_matrix()
# print(data_df)
# print(data)
new_data_df = pd.DataFrame(data=data, index=data_df.index, columns=list(data_df))
# print(new_data_df)
# print(og_full_gct)
new_full_gct = new_data_df.copy()
new_full_gct.insert(0, column='Description', value=og_full_gct['Description'])
# print(new_full_gct)
# exit()
return og_data, data_df, data, new_data_df, plot_labels, row_labels, og_full_gct, new_full_gct
str2func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': 'l1',
'l2': 'l2',
'manhattan': 'manhattan',
'cosine': 'cosine',
'euclidean': 'euclidean',
}
str2affinity_func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': my_affinity_l1,
'l2': my_affinity_l2,
'manhattan': my_affinity_m,
'cosine': my_affinity_c,
'euclidean': my_affinity_e,
}
str2dist = {
'custom_euclidean': custom_euclidean_dist,
'uncentered_pearson': uncentered_pearson_dist,
'absolute_uncentered_pearson': absolute_uncentered_pearson_dist,
'information_coefficient': information_coefficient_dist,
'pearson': custom_pearson_dist,
'spearman': custom_spearman_dist,
'kendall': custom_kendall_tau_dist,
'absolute_pearson': absolute_pearson_dist,
'l1': custom_manhattan_dist,
'l2': custom_euclidean_dist,
'manhattan': custom_manhattan_dist,
'cosine': custom_cosine_dist,
'euclidean': custom_euclidean_dist,
}
str2similarity = {
'custom_euclidean': custom_euclidean_sim,
'uncentered_pearson': uncentered_pearson_corr,
'absolute_uncentered_pearson': absolute_uncentered_pearson_corr,
'information_coefficient': information_coefficient,
'pearson': custom_pearson_corr,
'spearman': custom_spearman_corr,
'kendall': custom_kendall_tau_corr,
'absolute_pearson': absolute_pearson_corr,
'l1': custom_manhattan_sim,
'l2': custom_euclidean_sim,
'manhattan': custom_manhattan_sim,
'cosine': custom_cosine_sim,
# 'euclidean': pairwise.paired_euclidean_distances,
'euclidean': custom_euclidean_sim,
# 'euclidean': custom_euclidean_dist,
}
linkage_dic = {
'Pairwise average-linkage': 'average',
'Pairwise complete-linkage': 'complete',
'Pairwise ward-linkage': 'ward',
'average': 'average',
'complete': 'complete',
'ward': 'ward',
}
def make_tree(model, data=None):
# ii = itertools.count(data.shape[0]) # Setting the counter at the number of leaves.
# tree = [{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]
# print(tree)
# return tree
return dict(enumerate(model.children_, model.n_leaves_))
# return dict(enumerate(model.children_, 1))
def make_cdt(data, order_of_columns, order_of_rows, name='test.cdt', atr_companion=True, gtr_companion=False):
# TODO: if order_of_columns == None, then do arange(len(list(data)))
# TODO: if order_of_rows == None, then do arange(len(list(data)))
# exit(data.to_csv())
data.index.name = "ID"
data.rename(columns={'Description': 'Name'}, inplace=True)
temp = np.ones(len(data))
data.insert(loc=1, column='GWEIGHT', value=temp) # adding an extra column
# These three lines add a row
data.loc['EWEIGHT'] = list(np.ones(len(list(data))))
newIndex = ['EWEIGHT'] + [ind for ind in data.index if ind != 'EWEIGHT']
data = data.reindex(index=newIndex)
if atr_companion:
new_AID = ['', '']
for element in range(len(order_of_columns)):
temp = 'ARRY' + str(element) + 'X'
new_AID.append(temp)
data.loc['AID'] = new_AID
newIndex = ['AID'] + [ind for ind in data.index if ind != 'AID']
data = data.reindex(index=newIndex)
data = data[['Name', 'GWEIGHT'] + order_of_columns]
if gtr_companion:
new_GID = ['']
if atr_companion:
new_GID = ['AID', 'EWEIGHT'] # This is to make sure we fit the CDT format
# for element in np.sort(np.unique(GID)):
# if 'NODE' in element:
# # print(element, 'GTR delete')
# pass
# else:
# new_GID.append(element)
for element in range(len(order_of_rows)):
temp = 'GENE' + str(element) + 'X'
new_GID.append(temp)
data.insert(loc=0, column='GID', value=new_GID) # adding an extra column
data.insert(loc=0, column=data.index.name, value=data.index) # Making the index a column
# reorder to match dendogram
temp = ['AID', 'EWEIGHT'] + order_of_rows
# data = data.loc[temp]
# print(data['GID'])
data = data.reindex(temp)
# print(data['GID'])
# print(list(data.index))
# print(data['GID'])
# print(data['Name'])
# Making the 'GID' the index -- for printing purposes
data.index = data['GID']
data.index.name = 'GID'
data.drop(['GID'], axis=1, inplace=True)
# print(list(data.index))
# The first three lines need to be written separately due to a quirk in the CDT file format:
# print(data.to_csv(sep='\t', index=True, header=True))
f = open(name, 'w')
f.write(data.to_csv(sep='\t', index=True, header=True))
# f.write(data.to_csv(sep='\t', index=True, header=True))
f.close()
# pd.options.display.float_format = '{:3.3f}'.format
data = data.round(2)
# print(data.to_csv())
# exit()
# exit(data.to_csv(sep=' ', index=True, header=True, float_format='2',))
return
def make_atr(col_tree_dic, data, dist, clustering_method='average', file_name='test.atr'):
max_val = len(col_tree_dic)
# AID = []
# compute distances
distance_dic = {}
for node, children in col_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=col_tree_dic, data=data, axis=1,
distance=dist, clustering_method=clustering_method)
# print(dist, children, val)
# print("Value is", val)
distance_dic[node] = val
# if dist == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# low_norm = min(distance_dic.values())
# high_norm = max(distance_dic.values())
# for key in distance_dic.keys():
# # distance -= norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = (distance_dic[key]-low_norm)/high_norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = ((1/distance_dic[key])-high_norm)/low_norm
# print(distance_dic[key])
f = open(file_name, 'w')
for node, children in col_tree_dic.items():
elements = [translate_tree(node, max_val, 'atr'), translate_tree(children[0], max_val, 'atr'),
translate_tree(children[1], max_val, 'atr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# print('\t', '\t'.join(elements))
# AID.append(translate_tree(children[0], max_val, 'atr'))
# AID.append(translate_tree(children[1], max_val, 'atr'))
f.write('\t'.join(elements) + '\n')
# print('\t'.join(elements) + '\n')
f.close()
return
def make_gtr(row_tree_dic, data, dist, clustering_method='average', file_name='test.gtr'):
max_val = len(row_tree_dic)
# GID = []
# compute distances
distance_dic = {}
for node, children in row_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=row_tree_dic, data=data, axis=0,
distance=dist, clustering_method=clustering_method)
distance_dic[node] = val
f = open(file_name, 'w')
for node, children in row_tree_dic.items():
elements = [translate_tree(node, max_val, 'gtr'), translate_tree(children[0], max_val, 'gtr'),
translate_tree(children[1], max_val, 'gtr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# GID.append(translate_tree(children[0], max_val, 'gtr'))
# GID.append(translate_tree(children[1], max_val, 'gtr'))
f.write('\t'.join(elements) + '\n')
# val -= 1
f.close()
return
def translate_tree(what, length, g_or_a):
if 'a' in g_or_a:
if what <= length:
translation = 'ARRY' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
elif 'g' in g_or_a:
if what <= length:
translation = 'GENE' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
else:
translation = []
print('This function does not support g_or_a=', g_or_a)
return translation
# def get_children_recursively(k, model, node_dict, leaf_count, n_samples, data, verbose=False, left=None, right=None):
# # print(k)
# i, j = model.children_[k]
#
# if k in node_dict:
# return node_dict[k]['children']
#
# if i < leaf_count:
# # print("i if")
# left = [i]
# else:
# # print("i else")
# # read the AgglomerativeClustering doc. to see why I select i-n_samples
# left, node_dict = get_children_recursively(i - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if j < leaf_count:
# # print("j if")
# right = [j]
# else:
# # print("j else")
# right, node_dict = get_children_recursively(j - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if verbose:
# print(k, i, j, left, right)
# temp = map(lambda ii: data[ii], left)
# left_pos = np.mean(list(temp), axis=0)
# temp = map(lambda ii: data[ii], right)
# right_pos = np.mean(list(temp), axis=0)
#
# # this assumes that agg_cluster used euclidean distances
# dist = metrics.pairwise_distances([left_pos, right_pos], metric='euclidean')[0, 1]
#
# all_children = [x for y in [left, right] for x in y]
# pos = np.mean(list(map(lambda ii: data[ii], all_children)), axis=0)
#
# # store the results to speed up any additional or recursive evaluations
# node_dict[k] = {'top_child': [i, j], 'children': all_children, 'pos': pos, 'dist': dist,
# 'node_i': k + n_samples}
# return all_children, node_dict
# def recursive_atr
def get_children(tree, leaves_are_self_children=False):
# this is a recursive function
expanded_tree = {}
for node in range(max(tree.keys())):
if node <= len(tree):
if leaves_are_self_children:
expanded_tree[node] = [node]
else:
expanded_tree[node] = []
else:
# expanded_tree[node] = list_children_single_node(node, tree)
expanded_tree[node] = list_children_single_node(node, tree, leaves_are_self_children)
return expanded_tree
def list_children_single_node(node, tree, leaves_are_self_children=False, only_leaves_are_children=True):
# children = []
if node <= len(tree):
if leaves_are_self_children:
children = [node]
else:
children = []
else:
children = list(tree[node])
# Check each child, and add their children to the list
for child in children:
if child <= len(tree):
pass
else:
children += list_children_single_node(child, tree, only_leaves_are_children=True)
if only_leaves_are_children:
# print(sorted(np.unique(i for i in children if i <= len(tree))))
# print()
return [i for i in sorted(np.unique(children)) if i <= len(tree)]
else:
return sorted(np.unique(children))
def centroid_distances(node_a, node_b, tree, data, axis=0, distance=mydist, clustering_method='average'):
if axis == 0:
pass
elif axis == 1:
data = np.transpose(data)
else:
exit("Variable 'data' does not have that many axises (╯°□°)╯︵ ┻━┻")
children_of_a = list_children_single_node(node_a, tree=tree, leaves_are_self_children=True)
children_of_b = list_children_single_node(node_b, tree=tree, leaves_are_self_children=True)
# if distance == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# distance = custom_euclidean_dist
distances_list = []
if clustering_method == 'average':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.average(distances_list)
elif clustering_method == 'complete':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.min(distances_list)
else:
exit("Ony 'average' and 'complete' clustering methods are accepted at the moment (>_<)")
def euclidian_similarity(x, y):
dist = mydist(x, y)
# return 1/(1+dist)
return 1 / (np.exp(dist))
def better_dendodist(children, distance, tree, data, axis, clustering_method='average'):
distances_list = []
for pair in children:
distances_list.append(centroid_distances(pair[0], pair[1], tree, data, axis, distance=distance,
clustering_method=clustering_method))
# print(distance, pair, distances_list[-1])
return distances_list
def HierarchicalClustering(pwd: "The current directory",
gct_name: "Gene expression data filename (.gct file) or Pandas DataFrame "
"where rows are genes and columns are samples",
col_distance_metric: "The function to be used when comparing the distance/similarity of "
"the columns in the gct_name dataset",
row_distance_metric: "The function to be used when comparing the distance/similarity of "
"the rows in the gct_name dataset",
clustering_method: "Type of linkage to use" = 'average',
output_base_name: "Base name for output file" = 'HC_output',
row_normalization: "Whether to normalize each row (gene) in the data" = False,
col_normalization: "Whether to normalize each column (sample) in the data" = False,
row_centering: "How to center each row (gene) in the data" = 'Mean',
col_centering: "How to center each column (sample) in the data" = 'Mean',
output_distances: "Whether or not output the pair-wise distance matrix. "
"If true, the distance between each column will be called, "
"which can be very computationally intensive. "
"If unsure, leave as False." = False,
custom_plot: "Plot the dendrograms by Genes, Samples, or Both" = 'Both',
clusters_to_highlight: "How many clusters to highlight in the dendrogram" = 2,
show: "Whether to show the plot at the end" = False):
# gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
# row_normalization, col_normalization, row_centering, col_centering = parse_inputs(sys.argv)
if col_distance_metric == "No_column_clustering":
custom_plot = 'Genes'
if row_distance_metric == "No_row_clustering":
custom_plot = 'Samples'
og_data, og_data_df, data, data_df, col_labels, row_labels, og_full_gct, new_full_gct = \
parse_data(gct_name, row_normalization, col_normalization, row_centering, col_centering)
order_of_columns = list(data_df)
order_of_rows = list(data_df.index)
data_transpose = np.transpose(data)
# print(data)
# print(data_df)
atr_companion = False
col_model = None
col_tree = None
gtr_companion = False
row_model = None
row_tree = None
AID = None
GID = None
if col_distance_metric != 'No_column_clustering':
atr_companion = True
col_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[col_distance_metric])
col_model.fit(data_transpose)
col_tree = make_tree(col_model)
order_of_columns = order_leaves(col_model, tree=col_tree, data=data_transpose,
dist=str2similarity[col_distance_metric], labels=col_labels, reverse=True)
path_to_atr = output_base_name + '.atr'
make_atr(col_tree, file_name=path_to_atr, data=data,
dist=str2similarity[col_distance_metric], clustering_method=linkage_dic[clustering_method])
if row_distance_metric != 'No_row_clustering':
gtr_companion = True
row_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[row_distance_metric])
# y_col = row_model.fit_predict(np.transpose(data))
# print(y_col)
row_model.fit(data)
row_tree = make_tree(row_model)
order_of_rows = order_leaves(row_model, tree=row_tree, data=data,
dist=str2similarity[row_distance_metric], labels=row_labels)
path_to_gtr = output_base_name + '.gtr'
make_gtr(row_tree, data=data, file_name=output_base_name + '.gtr', dist=str2similarity[row_distance_metric])
if output_distances:
# TODO: check which col or row was selected, or both
row_distance_matrix = str2affinity_func[row_distance_metric](data)
# col_distance_matrix = str2affinity_func[col_distance_metric](np.transpose(data))
dist_file = open(output_base_name + '_pairwise_distances.csv', 'w')
dist_file.write('labels,')
dist_file.write(",".join(col_model.labels_.astype(str)) + "\n")
dist_file.write('samples,')
dist_file.write(",".join(list(data_df)) + "\n")
i = 0
for row in row_distance_matrix:
dist_file.write('distances row=' + str(i) + "," + ",".join(row.astype(str)) + "\n")
i += 1
path_to_cdt = output_base_name + '.cdt'
make_cdt(data=new_full_gct, name=path_to_cdt, atr_companion=atr_companion,
gtr_companion=gtr_companion,
order_of_columns=order_of_columns, order_of_rows=order_of_rows)
if custom_plot == 'Samples':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[0]) # Doing dendrogram first
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
# print(col_order)
named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(col_model.labels_)
ax1 = plt.subplot(gs[1])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The ATR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_atr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
# plt.show()
pass
# col_order = [int(i) for i in col_order]
# print(col_order)
# named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(idxs2clusters)
cls_list = col_model.labels_
# for i in range(len(col_order)):
# cls_list.append(idxs2clusters[i])
# print(cls_list)
# order_by = [col_order.index(i) for i in range(len(col_order))]
# list2intlist(cls_list, custom_order=order_by)
# in_list = np.array(cls_list)
# print(cls_list)
# print(np.array(list2intlist(cls_list, custom_order=order_by)))
list2cls(np.array(list2intlist(cls_list)), name_of_out=output_base_name+'.cls', sep=' ')
if custom_plot == 'Genes':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[1]) # Doing dendrogram first
ax0.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# row_order = [int(i) for i in row_order]
# named_row_order = [row_labels[i] for i in row_order]
ax1 = plt.subplot(gs[0])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df.iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
if custom_plot == 'Both':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
# Doing TOP dendrogram first
ax0 = plt.subplot(gs[0])
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
named_col_order = [col_labels[i] for i in col_order]
# Doing RIGHT dendrogram
ax3 = plt.subplot(gs[3])
ax3.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# Plotting the heatmap now
ax1 = plt.subplot(gs[2])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order].iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
return col_model, row_model
def hc_samples(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the columns in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = distance_metric
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Samples'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return col_model
def hc_genes(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the rows in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = 'No_column_clustering'
output_distances = False
row_distance_metric = distance_metric
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Genes'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return row_model
def normalize_dataframe(df, log_normalize=None,
row_centering='Mean', row_normalization=True,
col_centering='Mean', col_normalization=True):
if (log_normalize is None) \
and (row_centering == 'No') and (col_centering == 'No') \
and (row_normalization is False) and (col_normalization is False):
print("No normalization has been requested ಠ_ಠ¯")
return df
data = df.as_matrix()
# Log Normalizing
if log_normalize is not None:
print("I'm sorry, log-normalization is not supported at the moment (u_u)")
if row_centering != 'No':
if row_centering == 'Mean':
row_means = np.mean(data, axis=1)
row_means_col_vec = row_means.reshape((data.shape[0], 1))
data = data - row_means_col_vec
elif row_centering == 'Median':
row_medians = np.median(data, axis=1)
row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
data = data - row_medians_col_vec
else:
print("row_centering has an unexpected value:", row_centering)
if row_normalization:
row_norm = np.sum(data * data, axis=1)
row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
data = data / np.sqrt(row_norm_col_vec)
if col_centering != 'No':
if col_centering == 'Mean':
col_means = np.mean(data, axis=0)
data = data - col_means
elif col_centering == 'Median':
col_medians = np.median(data, axis=0)
data = data - col_medians
else:
print("col_centering has an unexpected value: ", col_centering)
if col_normalization:
col_norm = np.sum(data * data, axis=0)
data = data / np.sqrt(col_norm)
normalized_df = pd.DataFrame(data=data, index=df.index, columns=list(df))
return normalized_df
def display_heatmap(data,
name='heatmap',
log_normalize=None,
row_centering: "How to center each row (gene) in the data" = 'No',
row_normalization: "Whether to normalize each row (gene) in the data" = True,
col_centering: "How to center each column (sample) in the data" = 'No',
col_normalization: "Whether to normalize each column (sample) in the data" = False,
mostrar=False):
if isinstance(data, pd.DataFrame):
data_to_plot = data.copy()
elif os.path.isfile(data):
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
else:
try:
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
except urllib.error.HTTPError:
print("I don't know what the variable 'data' contains.")
print('data=')
print(data)
exit("If this is a url it may not be accessible.\n"
"(╯°□°)╯︵ ┻━┻")
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
data_to_plot = normalize_dataframe(data_to_plot, log_normalize=log_normalize,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization)
plt.clf()
# # figure reshape from:
# # https://stackoverflow.com/questions/35127920/overlapping-yticklabels-is-it-possible-to-control-cell-size-of-heatmap-in-seabo
# # and from:
# # https://matplotlib.org/users/customizing.html
# get the tick label font size
fontsize_pt = plt.rcParams['ytick.labelsize']
dpi = 72.27
# compute the matrix height in points and inches
matrix_height_pt = fontsize_pt * data_to_plot.as_matrix().shape[0]
matrix_height_in = (matrix_height_pt / dpi) * 1.2
# compute the required figure height
top_margin = 0.01 # in percentage of the figure height
bottom_margin = 0.01 # in percentage of the figure height
figure_height = matrix_height_in / (1 - top_margin - bottom_margin)
# build the figure instance with the desired height
fig, ax = plt.subplots(
figsize=(6, figure_height),
gridspec_kw=dict(top=1 - top_margin, bottom=bottom_margin))
sns.heatmap(data_to_plot, cmap='bwr', yticklabels=True, square=True,
cbar_kws={'use_gridspec': False,
'location': "right",
'shrink': 0.5,
'label': ''}
)
if not name.endswith('.pdf'):
name = name + '.pdf'
plt.savefig(name, dpi=dpi, bbox_inches='tight')
# plt.savefig(name, dpi=dpi)
print(name, "has been created!")
if mostrar:
# print(data_to_plot.head())
plt.show()
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + name + '" target="_blank">PDF of the heatmap</a>'))
return
| true | true |
f726e468fffed12d4ce9bb88c0a2c8505212f61d | 6,650 | py | Python | visualizer/visualizer/network.py | NikKaem/mapf-project | d99727d5f62380cf2a7d37dec70b5cdc71db3fb6 | [
"MIT"
] | null | null | null | visualizer/visualizer/network.py | NikKaem/mapf-project | d99727d5f62380cf2a7d37dec70b5cdc71db3fb6 | [
"MIT"
] | null | null | null | visualizer/visualizer/network.py | NikKaem/mapf-project | d99727d5f62380cf2a7d37dec70b5cdc71db3fb6 | [
"MIT"
] | null | null | null | from threading import Thread
import socket
import select
import time
import os
import clingo
import argparse
from PyQt5.QtCore import *
class VisualizerSocket(object):
def __init__(self, default_host = '127.0.0.1', default_port = 5000, socket_name = 'socket'):
self._host = default_host
self._port = default_port
self._s = None
self._timer = None
self._socket_name = socket_name
self._thread = None
self._parser = None
self._waiting = False
def __del__(self):
self.close()
def set_parser(self, parser):
self._parser = parser
def run_script(self, command, port = None):
self.close()
self._thread = Thread(target = lambda: os.system(command))
self._thread.start()
if port is not None:
self.connect('127.0.0.1', port)
def join(self, wait_time):
if self._thread is not None:
self._thread.join(wait_time)
self._thread = None
def run_connection(self):
if self._s is None:
return
if self._timer is not None:
self._timer.stop()
self._timer = QTimer()
self._timer.timeout.connect(self.receive)
self._timer.start(1000)
def connect(self, host = None, port = None):
if self.is_connected() and host == self._host and port == self._port:
return 0
if host is not None:
self._host = host
if port is not None:
self._port = port
self.close()
print('Try connection with '+ self._socket_name)
self._s = socket.socket()
connected = False
tryCount = 0
while not connected: #try to connect to the server
try:
self._s.connect((self._host, self._port))
connected = True
except(socket.error):
if tryCount >= 5:
print('Failed to connect with ' + self._socket_name)
self.close()
return -1
print('Failed to connect with ' + self._socket_name + ' \nRetrying in 2 sek')
time.sleep(2)
tryCount += 1
print('Connect with '+ self._socket_name)
return 0
def send(self, msg):
if self._s is None or msg is None:
return
if msg == '':
return
self._s.send(msg.encode('utf-8'))
pass
def done_step(self, step):
if self._s is None:
return
self._waiting = True
self._s.send(('%$done(' + str(step) + ').\n').encode('utf-8'))
def model_expanded(self, msg):
pass
def _receive_data(self):
breakLoop = False
data = ''
try:
ready = select.select([self._s], [], [], 0.1)
while (not breakLoop) and ready[0]:
new_data = self._s.recv(2048).decode()
if not new_data.find('\n') == -1 or new_data == '':
breakLoop = True
data += new_data
if ready[0] and new_data == '':
self.close()
return None
except socket.error as err:
print(err)
return data
def receive(self):
return
def run(self):
return
def close(self):
if self._timer is not None:
self._timer.stop()
if self._s is not None:
print('Close connection to ' + self._socket_name)
try:
self._s.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._s.close()
self._s = None
self.join(10)
def is_connected(self):
return self._s is not None
def script_is_running(self):
return self._thread is not None
def is_waiting(self):
return self._waiting
def get_host(self):
return self._host
def get_port(self):
return self._port
class SolverSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5000, 'solver')
self._model = None
def set_model(self, model):
self._model = model
if model is not None:
self._model.add_socket(self)
def model_expanded(self, msg):
self.send(msg)
self._waiting = True
def receive(self):
if self._s is None or self._parser is None or self._model is None:
return -1
data = self._receive_data()
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model_actions(True)
else:
self._parser.on_atom(clingo.parse_term(str_atom))
self._model.update_windows()
def solve(self):
if self._s == None or self._model == None: return -1
self._s.send('%$RESET.'.encode('utf-8'))
self._model.set_editable(False)
self._model.restart()
for atom in self._model.to_init_str(): #send instance
atom = atom.replace('\n', '')
self._s.send(str(atom).encode('utf-8'))
self._s.send('\n'.encode('utf-8'))
self.run_connection()
def run(self):
self.solve()
class SimulatorSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5001, 'simulator')
def receive(self):
if self._s is None or self._parser is None:
return -1
data = self._receive_data()
empty = True
reset = False
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model()
reset = True
empty = False
else:
self._parser.on_atom(clingo.parse_term(str_atom))
empty = False
if not empty:
self._parser.done_instance(reset)
def connect(self, host = None, port = None):
VisualizerSocket.connect(self, host, port)
self.run()
def run(self):
self.run_connection()
| 29.424779 | 96 | 0.530226 | from threading import Thread
import socket
import select
import time
import os
import clingo
import argparse
from PyQt5.QtCore import *
class VisualizerSocket(object):
def __init__(self, default_host = '127.0.0.1', default_port = 5000, socket_name = 'socket'):
self._host = default_host
self._port = default_port
self._s = None
self._timer = None
self._socket_name = socket_name
self._thread = None
self._parser = None
self._waiting = False
def __del__(self):
self.close()
def set_parser(self, parser):
self._parser = parser
def run_script(self, command, port = None):
self.close()
self._thread = Thread(target = lambda: os.system(command))
self._thread.start()
if port is not None:
self.connect('127.0.0.1', port)
def join(self, wait_time):
if self._thread is not None:
self._thread.join(wait_time)
self._thread = None
def run_connection(self):
if self._s is None:
return
if self._timer is not None:
self._timer.stop()
self._timer = QTimer()
self._timer.timeout.connect(self.receive)
self._timer.start(1000)
def connect(self, host = None, port = None):
if self.is_connected() and host == self._host and port == self._port:
return 0
if host is not None:
self._host = host
if port is not None:
self._port = port
self.close()
print('Try connection with '+ self._socket_name)
self._s = socket.socket()
connected = False
tryCount = 0
while not connected:
try:
self._s.connect((self._host, self._port))
connected = True
except(socket.error):
if tryCount >= 5:
print('Failed to connect with ' + self._socket_name)
self.close()
return -1
print('Failed to connect with ' + self._socket_name + ' \nRetrying in 2 sek')
time.sleep(2)
tryCount += 1
print('Connect with '+ self._socket_name)
return 0
def send(self, msg):
if self._s is None or msg is None:
return
if msg == '':
return
self._s.send(msg.encode('utf-8'))
pass
def done_step(self, step):
if self._s is None:
return
self._waiting = True
self._s.send(('%$done(' + str(step) + ').\n').encode('utf-8'))
def model_expanded(self, msg):
pass
def _receive_data(self):
breakLoop = False
data = ''
try:
ready = select.select([self._s], [], [], 0.1)
while (not breakLoop) and ready[0]:
new_data = self._s.recv(2048).decode()
if not new_data.find('\n') == -1 or new_data == '':
breakLoop = True
data += new_data
if ready[0] and new_data == '':
self.close()
return None
except socket.error as err:
print(err)
return data
def receive(self):
return
def run(self):
return
def close(self):
if self._timer is not None:
self._timer.stop()
if self._s is not None:
print('Close connection to ' + self._socket_name)
try:
self._s.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._s.close()
self._s = None
self.join(10)
def is_connected(self):
return self._s is not None
def script_is_running(self):
return self._thread is not None
def is_waiting(self):
return self._waiting
def get_host(self):
return self._host
def get_port(self):
return self._port
class SolverSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5000, 'solver')
self._model = None
def set_model(self, model):
self._model = model
if model is not None:
self._model.add_socket(self)
def model_expanded(self, msg):
self.send(msg)
self._waiting = True
def receive(self):
if self._s is None or self._parser is None or self._model is None:
return -1
data = self._receive_data()
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model_actions(True)
else:
self._parser.on_atom(clingo.parse_term(str_atom))
self._model.update_windows()
def solve(self):
if self._s == None or self._model == None: return -1
self._s.send('%$RESET.'.encode('utf-8'))
self._model.set_editable(False)
self._model.restart()
for atom in self._model.to_init_str():
atom = atom.replace('\n', '')
self._s.send(str(atom).encode('utf-8'))
self._s.send('\n'.encode('utf-8'))
self.run_connection()
def run(self):
self.solve()
class SimulatorSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5001, 'simulator')
def receive(self):
if self._s is None or self._parser is None:
return -1
data = self._receive_data()
empty = True
reset = False
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model()
reset = True
empty = False
else:
self._parser.on_atom(clingo.parse_term(str_atom))
empty = False
if not empty:
self._parser.done_instance(reset)
def connect(self, host = None, port = None):
VisualizerSocket.connect(self, host, port)
self.run()
def run(self):
self.run_connection()
| true | true |
f726e4b41f15fdd676d9d580ff8e3144b72f2f13 | 4,712 | py | Python | taxumap-manuscript-notebooks/embeddings.py | jsevo/taxumap | 1a02518dca822a65847994910177c74607243dae | [
"MIT"
] | 5 | 2021-11-21T16:47:17.000Z | 2022-02-04T16:57:15.000Z | taxumap-manuscript-notebooks/embeddings.py | jsevo/taxumap | 1a02518dca822a65847994910177c74607243dae | [
"MIT"
] | 13 | 2021-03-31T19:08:10.000Z | 2022-02-15T19:57:18.000Z | taxumap-manuscript-notebooks/embeddings.py | jsevo/taxumap | 1a02518dca822a65847994910177c74607243dae | [
"MIT"
] | 3 | 2021-09-22T19:21:36.000Z | 2022-02-10T21:39:35.000Z | from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA
from umap import UMAP
from sklearn.preprocessing import MinMaxScaler
RUNEMBEDDINGS = False
if RUNEMBEDDINGS:
#simple PCA
pcaembedding = PCA(n_components=2).fit_transform(XASV.fillna(0))
#base embedding (kernel pca)
kernelpcaembedding = KernelPCA(n_components=2).fit_transform(XASV.fillna(0))
# non-phylo umap
embedding_non_phylo_unscaled = UMAP(n_neighbors=120,min_dist=0.2, metric="manhattan").fit_transform(XASV)
# embedding_non_phylo_scaled = UMAP(n_neighbors=120,min_dist=0.2, metric="manhattan").fit_transform(MinMaxScaler().fit_transform(XASV))
RUNTAXUMAPS = False
if RUNTAXUMAPS:
from taxumap.taxumap import taxumap
agg_levels = ["Phylum", "Family"]
withscaling = False # do not scale the columns of X
distanceperlevel = False # do not calculate a separate distance matrix at each phylogenetic level because we are using the manhattan distance
distancemetric = "manhattan"
printfigure=False
printwithdiversity=False #dont plot the average diversity in the background of the scatter plot
X_in = XASV
tax = taxonomy
withusercolors=taxonomy_meta[["HexColor"]]
# TAXUMAP, X_embedded, taxumap_Xscaled, taxumap_X = taxumap(agg_levels,
# withscaling,
# distanceperlevel,
# distancemetric,
# printfigure,
# printwithdiversity,
# X_in,
# tax,
# withusercolors,
# debug=True, #return tables
# save_embedding=False #save xy coordinates
# );
TAXUMAP_alllevels, X_embedded_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels = taxumap(["Phylum", "Class", "Order", "Family", "Genus"],
withscaling,
distanceperlevel,
distancemetric,
printfigure,
printwithdiversity,
X_in,
tax,
withusercolors,
debug=True, #return tables
save_embedding=False #save xy coordinates
);
# TAXUMAPSCALED, X_embedded_scaled, taxumap_Xscaled_scaled, taxumap_X_scaled = taxumap(
# agg_levels,
# True,
# False,
# "euclidean",
# printfigure,
# printwithdiversity,
# X_in,
# tax,
# withusercolors,
# debug=True, #return tables
# save_embedding=True#save xy coordinates
# );
# TAXUMAPSCALEDeuclidean, X_embedded_scaledeuclidean, taxumap_Xscaled_scaledeuclidean, taxumap_X_scaledeuclidean = taxumap(
# agg_levels,
# True,
# False,
# "euclidean",
# printfigure,
# printwithdiversity,
# X_in,
# tax,
# withusercolors,
# debug=True, #return tables
# save_embedding=True#save xy coordinates
# );
LOADPCoAS = False
if LOADPCoAS:
pcoa_embedding_unweighted_unifrac = PCA(n_components=2).fit_transform(unweighted_unifrac.set_index("SampleID"))
#Weighted Unifrac
pcoa_embedding_weighted_unifrac = PCA(n_components=2).fit_transform(weighted_unifrac.set_index("SampleID"))
del unweighted_unifrac
del weighted_unifrac
#del TAXUMAPSCALED, taxumap_Xscaled_scaled, taxumap_X_scaled
#del TAXUMAPSCALEDeuclidean, taxumap_Xscaled_scaledeuclidean, taxumap_X_scaledeuclidean
del TAXUMAP_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels
write_now=False
if write_now:
for (em,n) in zip(
[pcaembedding,
pcoa_embedding_unweighted_unifract[:,0:2],
pcoa_embedding_weighted_unifract,
embedding_non_phylo_unscaled,
X_embedded_alllevels.values,
X_embedded.values],
["pcaembedding",
"pcoa_unweighted_unifrac_embedding",
"pcoa_weighted_unifrac_embedding",
"embedding_nontax_umap_unscaled",
"taxumap_alllevels",
"current_taxumap_embedding"]):
pd.DataFrame(em, index=XASV.index).to_csv("results/%s.csv"%n) | 40.62069 | 150 | 0.574278 | from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA
from umap import UMAP
from sklearn.preprocessing import MinMaxScaler
RUNEMBEDDINGS = False
if RUNEMBEDDINGS:
pcaembedding = PCA(n_components=2).fit_transform(XASV.fillna(0))
kernelpcaembedding = KernelPCA(n_components=2).fit_transform(XASV.fillna(0))
embedding_non_phylo_unscaled = UMAP(n_neighbors=120,min_dist=0.2, metric="manhattan").fit_transform(XASV)
RUNTAXUMAPS = False
if RUNTAXUMAPS:
from taxumap.taxumap import taxumap
agg_levels = ["Phylum", "Family"]
withscaling = False
distanceperlevel = False
distancemetric = "manhattan"
printfigure=False
printwithdiversity=False
X_in = XASV
tax = taxonomy
withusercolors=taxonomy_meta[["HexColor"]]
mbedded_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels = taxumap(["Phylum", "Class", "Order", "Family", "Genus"],
withscaling,
distanceperlevel,
distancemetric,
printfigure,
printwithdiversity,
X_in,
tax,
withusercolors,
debug=True,
save_embedding=False
);
unweighted_unifrac = PCA(n_components=2).fit_transform(unweighted_unifrac.set_index("SampleID"))
pcoa_embedding_weighted_unifrac = PCA(n_components=2).fit_transform(weighted_unifrac.set_index("SampleID"))
del unweighted_unifrac
del weighted_unifrac
del TAXUMAP_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels
write_now=False
if write_now:
for (em,n) in zip(
[pcaembedding,
pcoa_embedding_unweighted_unifract[:,0:2],
pcoa_embedding_weighted_unifract,
embedding_non_phylo_unscaled,
X_embedded_alllevels.values,
X_embedded.values],
["pcaembedding",
"pcoa_unweighted_unifrac_embedding",
"pcoa_weighted_unifrac_embedding",
"embedding_nontax_umap_unscaled",
"taxumap_alllevels",
"current_taxumap_embedding"]):
pd.DataFrame(em, index=XASV.index).to_csv("results/%s.csv"%n) | true | true |
f726e62af700d6cd869103c9f957465198c2bb6d | 218 | py | Python | structurizr/model/enterprise.py | sixty-north/structurizr-python | 856d0476935952c256981f3628663915768ee85e | [
"Apache-2.0"
] | 15 | 2017-07-20T20:43:40.000Z | 2021-11-12T11:25:01.000Z | structurizr/model/enterprise.py | sixty-north/structurizr-python | 856d0476935952c256981f3628663915768ee85e | [
"Apache-2.0"
] | 2 | 2017-06-05T17:41:05.000Z | 2018-09-11T08:18:07.000Z | structurizr/model/enterprise.py | sixty-north/structurizr-python | 856d0476935952c256981f3628663915768ee85e | [
"Apache-2.0"
] | 7 | 2017-08-16T19:51:24.000Z | 2020-09-24T09:47:35.000Z | class Enterprise:
def __init__(self, name):
if len(name.strip()) == 0:
raise ValueError("Name must be specified.")
self._name = name
def get_name(self):
return self._name
| 19.818182 | 55 | 0.577982 | class Enterprise:
def __init__(self, name):
if len(name.strip()) == 0:
raise ValueError("Name must be specified.")
self._name = name
def get_name(self):
return self._name
| true | true |
f726e62b1de4faf4969737dc866dadf797d1e5a6 | 3,616 | py | Python | reminder/admin/forms.py | luk-kop/event-reminder-apscheduler | 405c9731d340d111aac83094a93b06ec60256754 | [
"MIT"
] | 1 | 2021-04-02T11:07:12.000Z | 2021-04-02T11:07:12.000Z | reminder/admin/forms.py | luk-kop/event-reminder-apscheduler | 405c9731d340d111aac83094a93b06ec60256754 | [
"MIT"
] | 2 | 2021-03-20T22:04:50.000Z | 2021-06-09T07:02:36.000Z | reminder/admin/forms.py | luk-kop/event-reminder | 405c9731d340d111aac83094a93b06ec60256754 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, IntegerField
from wtforms.validators import InputRequired, EqualTo, Regexp, Length, NumberRange, Optional, Email
from reminder.custom_wtforms import MxRecordValidator
class NewUserForm(FlaskForm):
"""
Validators for a new user account.
"""
username = StringField(validators=[InputRequired(),
Length(min=3, max=40),
Regexp(regex='^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,39}[a-zA-Z0-9]$',
message='Username should contain chars (min 3): a-z, A-Z, 0-9, . _ -')])
email = StringField(validators=[InputRequired(),
Email(message='Please enter valid email address'),
Length(max=70),
MxRecordValidator()])
role = SelectField(choices=[('user', 'User'), ('admin', 'Admin')])
access = SelectField(label='Can log in?',
choices=[('False', 'No'), ('True', 'Yes')])
pass_reset = SelectField(label='Change password on next login?',
choices=[('False', 'No'), ('True', 'Yes')])
password = PasswordField(validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password',
validators=[EqualTo('password')])
class EditUserForm(NewUserForm):
"""
Validators for the user being edited
"""
# the password field can be blank (empty) or match the regex pattern
password = PasswordField(label='Password',
validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$|^$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password', validators=[EqualTo('password')])
class NotifyForm(FlaskForm):
"""
Validators for notification settings
"""
notify_status = StringField(label='Notification status',
validators=[Regexp(regex='^on$'), Optional()])
notify_unit = SelectField('Notification interval time units',
choices=[('hours', 'hours'), ('minutes', 'minutes'), ('seconds', 'seconds')])
notify_interval = IntegerField(label='Notification interval',
validators=[InputRequired(), NumberRange(min=1)])
mail_server = StringField(label='Mail server',
validators=[InputRequired(), Length(max=70)])
mail_port = IntegerField(label='Mail port',
validators=[InputRequired(), NumberRange(min=1)])
mail_security = SelectField(label='Mail security',
choices=[('tls', 'TLS'), ('ssl', 'SSL')])
mail_username = StringField(label='Mail username',
validators=[InputRequired(), Length(max=70)])
mail_password = PasswordField(label='Mail Password') | 56.5 | 118 | 0.518252 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, IntegerField
from wtforms.validators import InputRequired, EqualTo, Regexp, Length, NumberRange, Optional, Email
from reminder.custom_wtforms import MxRecordValidator
class NewUserForm(FlaskForm):
username = StringField(validators=[InputRequired(),
Length(min=3, max=40),
Regexp(regex='^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,39}[a-zA-Z0-9]$',
message='Username should contain chars (min 3): a-z, A-Z, 0-9, . _ -')])
email = StringField(validators=[InputRequired(),
Email(message='Please enter valid email address'),
Length(max=70),
MxRecordValidator()])
role = SelectField(choices=[('user', 'User'), ('admin', 'Admin')])
access = SelectField(label='Can log in?',
choices=[('False', 'No'), ('True', 'Yes')])
pass_reset = SelectField(label='Change password on next login?',
choices=[('False', 'No'), ('True', 'Yes')])
password = PasswordField(validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password',
validators=[EqualTo('password')])
class EditUserForm(NewUserForm):
password = PasswordField(label='Password',
validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$|^$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password', validators=[EqualTo('password')])
class NotifyForm(FlaskForm):
notify_status = StringField(label='Notification status',
validators=[Regexp(regex='^on$'), Optional()])
notify_unit = SelectField('Notification interval time units',
choices=[('hours', 'hours'), ('minutes', 'minutes'), ('seconds', 'seconds')])
notify_interval = IntegerField(label='Notification interval',
validators=[InputRequired(), NumberRange(min=1)])
mail_server = StringField(label='Mail server',
validators=[InputRequired(), Length(max=70)])
mail_port = IntegerField(label='Mail port',
validators=[InputRequired(), NumberRange(min=1)])
mail_security = SelectField(label='Mail security',
choices=[('tls', 'TLS'), ('ssl', 'SSL')])
mail_username = StringField(label='Mail username',
validators=[InputRequired(), Length(max=70)])
mail_password = PasswordField(label='Mail Password') | true | true |
f726e725cce6a2546e0dca558dcc54f0ee808e67 | 954 | py | Python | apps/node/src/app/main/users/role.py | AmrMKayid/PyGrid | 695a041649f7cfab6acc7d1495e2a6132f65d529 | [
"Apache-2.0"
] | 7 | 2020-04-20T22:22:08.000Z | 2020-07-25T17:32:08.000Z | apps/node/src/app/main/users/role.py | AmrMKayid/PyGrid | 695a041649f7cfab6acc7d1495e2a6132f65d529 | [
"Apache-2.0"
] | 3 | 2020-04-24T21:20:57.000Z | 2020-05-28T09:17:02.000Z | apps/node/src/app/main/users/role.py | AmrMKayid/PyGrid | 695a041649f7cfab6acc7d1495e2a6132f65d529 | [
"Apache-2.0"
] | 4 | 2020-04-24T22:32:37.000Z | 2020-05-25T19:29:20.000Z | from ... import BaseModel, db
class Role(BaseModel):
__tablename__ = "role"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String())
can_triage_jobs = db.Column(db.Boolean())
can_edit_settings = db.Column(db.Boolean())
can_create_users = db.Column(db.Boolean())
can_create_groups = db.Column(db.Boolean())
can_edit_roles = db.Column(db.Boolean())
can_manage_infrastructure = db.Column(db.Boolean())
def __str__(self):
return (
f"<Role id: {self.id}, name: {self.name}, "
f"can_triage_jobs: {self.can_triage_jobs}, "
f"can_edit_settings: {self.can_edit_settings}, "
f"can_create_users: {self.can_create_users}, "
f"can_create_groups: {self.can_create_groups}, "
f"can_edit_roles: {self.can_edit_roles}, "
f"can_manage_infrastructure: {self.can_manage_infrastructure}>"
)
| 36.692308 | 75 | 0.645702 | from ... import BaseModel, db
class Role(BaseModel):
__tablename__ = "role"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String())
can_triage_jobs = db.Column(db.Boolean())
can_edit_settings = db.Column(db.Boolean())
can_create_users = db.Column(db.Boolean())
can_create_groups = db.Column(db.Boolean())
can_edit_roles = db.Column(db.Boolean())
can_manage_infrastructure = db.Column(db.Boolean())
def __str__(self):
return (
f"<Role id: {self.id}, name: {self.name}, "
f"can_triage_jobs: {self.can_triage_jobs}, "
f"can_edit_settings: {self.can_edit_settings}, "
f"can_create_users: {self.can_create_users}, "
f"can_create_groups: {self.can_create_groups}, "
f"can_edit_roles: {self.can_edit_roles}, "
f"can_manage_infrastructure: {self.can_manage_infrastructure}>"
)
| true | true |
f726e80aacceea27942a112dde7b312235a8f554 | 35,185 | py | Python | sdks/python/apache_beam/typehints/decorators.py | VrishaliShah/beam | c27f5f724e38fbec829d9cf8920fac2bdedb7ca4 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/typehints/decorators.py | VrishaliShah/beam | c27f5f724e38fbec829d9cf8920fac2bdedb7ca4 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:17:07.000Z | 2022-02-10T04:23:10.000Z | sdks/python/apache_beam/typehints/decorators.py | VrishaliShah/beam | c27f5f724e38fbec829d9cf8920fac2bdedb7ca4 | [
"Apache-2.0"
] | 1 | 2020-01-16T17:00:26.000Z | 2020-01-16T17:00:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Type hinting decorators allowing static or runtime type-checking for the SDK.
This module defines decorators which utilize the type-hints defined in
'type_hints.py' to allow annotation of the types of function arguments and
return values.
Type-hints for functions are annotated using two separate decorators. One is for
type-hinting the types of function arguments, the other for type-hinting the
function return value. Type-hints can either be specified in the form of
positional arguments::
@with_input_types(int, int)
def add(a, b):
return a + b
Keyword arguments::
@with_input_types(a=int, b=int)
def add(a, b):
return a + b
Or even a mix of both::
@with_input_types(int, b=int)
def add(a, b):
return a + b
Example usage for type-hinting arguments only::
@with_input_types(s=str)
def to_lower(a):
return a.lower()
Example usage for type-hinting return values only::
@with_output_types(Tuple[int, bool])
def compress_point(ec_point):
return ec_point.x, ec_point.y < 0
Example usage for type-hinting both arguments and return values::
@with_input_types(a=int)
@with_output_types(str)
def int_to_str(a):
return str(a)
Type-hinting a function with arguments that unpack tuples are also supported
(in Python 2 only). As an example, such a function would be defined as::
def foo((a, b)):
...
The valid type-hint for such as function looks like the following::
@with_input_types(a=int, b=int)
def foo((a, b)):
...
Notice that we hint the type of each unpacked argument independently, rather
than hinting the type of the tuple as a whole (Tuple[int, int]).
Optionally, type-hints can be type-checked at runtime. To toggle this behavior
this module defines two functions: 'enable_run_time_type_checking' and
'disable_run_time_type_checking'. NOTE: for this toggle behavior to work
properly it must appear at the top of the module where all functions are
defined, or before importing a module containing type-hinted functions.
"""
# pytype: skip-file
from __future__ import absolute_import
import inspect
import itertools
import logging
import sys
import traceback
import types
from builtins import next
from builtins import object
from builtins import zip
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import TypeVar
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.native_type_compatibility import convert_to_beam_type
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
from apache_beam.typehints.typehints import validate_composite_type_param
try:
import funcsigs # Python 2 only.
except ImportError:
funcsigs = None
__all__ = [
'no_annotations',
'with_input_types',
'with_output_types',
'WithTypeHints',
'TypeCheckError',
]
T = TypeVar('T')
WithTypeHintsT = TypeVar('WithTypeHintsT', bound='WithTypeHints') # pylint: disable=invalid-name
# This is missing in the builtin types module. str.upper is arbitrary, any
# method on a C-implemented type will do.
# pylint: disable=invalid-name
_MethodDescriptorType = type(str.upper)
# pylint: enable=invalid-name
_ANY_VAR_POSITIONAL = typehints.Tuple[typehints.Any, ...]
_ANY_VAR_KEYWORD = typehints.Dict[typehints.Any, typehints.Any]
# TODO(BEAM-8280): Remove this when from_callable is ready to be enabled.
_enable_from_callable = False
try:
_original_getfullargspec = inspect.getfullargspec
_use_full_argspec = True
except AttributeError: # Python 2
_original_getfullargspec = inspect.getargspec # type: ignore
_use_full_argspec = False
def getfullargspec(func):
# Python 3: Use get_signature instead.
assert sys.version_info < (3, ), 'This method should not be used in Python 3'
try:
return _original_getfullargspec(func)
except TypeError:
if isinstance(func, type):
argspec = getfullargspec(func.__init__)
del argspec.args[0]
return argspec
elif callable(func):
try:
return _original_getfullargspec(func.__call__)
except TypeError:
# Return an ArgSpec with at least one positional argument,
# and any number of other (positional or keyword) arguments
# whose name won't match any real argument.
# Arguments with the %unknown% prefix will be ignored in the type
# checking code.
if _use_full_argspec:
return inspect.FullArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', (), [], {}, {})
else: # Python 2
return inspect.ArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', ())
else:
raise
def get_signature(func):
"""Like inspect.signature(), but supports Py2 as well.
This module uses inspect.signature instead of getfullargspec since in the
latter: 'the "self" parameter is always reported, even for bound methods'
https://github.com/python/cpython/blob/44f91c388a6f4da9ed3300df32ca290b8aa104ea/Lib/inspect.py#L1103
"""
# Fall back on funcsigs if inspect module doesn't have 'signature'; prefer
# inspect.signature over funcsigs.signature if both are available.
if hasattr(inspect, 'signature'):
inspect_ = inspect
else:
inspect_ = funcsigs
try:
signature = inspect_.signature(func)
except ValueError:
# Fall back on a catch-all signature.
params = [
inspect_.Parameter('_', inspect_.Parameter.POSITIONAL_OR_KEYWORD),
inspect_.Parameter(
'__unknown__varargs', inspect_.Parameter.VAR_POSITIONAL),
inspect_.Parameter(
'__unknown__keywords', inspect_.Parameter.VAR_KEYWORD)
]
signature = inspect_.Signature(params)
# This is a specialization to hint the first argument of certain builtins,
# such as str.strip.
if isinstance(func, _MethodDescriptorType):
params = list(signature.parameters.values())
if params[0].annotation == params[0].empty:
params[0] = params[0].replace(annotation=func.__objclass__)
signature = signature.replace(parameters=params)
# This is a specialization to hint the return value of type callables.
if (signature.return_annotation == signature.empty and
isinstance(func, type)):
signature = signature.replace(return_annotation=typehints.normalize(func))
return signature
def no_annotations(fn):
"""Decorator that prevents Beam from using type hint annotations on a
callable."""
setattr(fn, '_beam_no_annotations', True)
return fn
class IOTypeHints(NamedTuple(
'IOTypeHints',
[('input_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('output_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('origin', List[str])])):
"""Encapsulates all type hint information about a Dataflow construct.
This should primarily be used via the WithTypeHints mixin class, though
may also be attached to other objects (such as Python functions).
Attributes:
input_types: (tuple, dict) List of typing types, and an optional dictionary.
May be None. The list and dict correspond to args and kwargs.
output_types: (tuple, dict) List of typing types, and an optional dictionary
(unused). Only the first element of the list is used. May be None.
origin: (List[str]) Stack of tracebacks of method calls used to create this
instance.
"""
traceback_limit = 5
@classmethod
def _make_origin(cls, bases, tb=True, msg=()):
# type: (List[IOTypeHints], bool, List[str]) -> List[str]
if msg:
res = msg
else:
res = []
if tb:
# Omit this method and the IOTypeHints method that called it.
num_frames_skip = 2
tb = traceback.format_stack(limit=cls.traceback_limit +
num_frames_skip)[:-num_frames_skip]
# tb is a list of strings in the form of 'File ...\n[code]\n'. Split into
# single lines and flatten.
res += list(
itertools.chain.from_iterable(s.strip().split('\n') for s in tb))
bases = [base for base in bases if base.origin]
if bases:
res += ['', 'based on:']
for i, base in enumerate(bases):
if i > 0:
res += ['', 'and:']
res += [' ' + str(base)]
res += [' ' + s for s in base.origin]
return res
@classmethod
def empty(cls):
# type: () -> IOTypeHints
"""Construct a base IOTypeHints object with no hints."""
return IOTypeHints(None, None, [])
@classmethod
def from_callable(cls, fn):
# type: (Callable) -> Optional[IOTypeHints]
"""Construct an IOTypeHints object from a callable's signature.
Supports Python 3 annotations. For partial annotations, sets unknown types
to Any, _ANY_VAR_POSITIONAL, or _ANY_VAR_KEYWORD.
Returns:
A new IOTypeHints or None if no annotations found.
"""
if not _enable_from_callable or getattr(fn, '_beam_no_annotations', False):
return None
signature = get_signature(fn)
if (all(param.annotation == param.empty
for param in signature.parameters.values()) and
signature.return_annotation == signature.empty):
return None
input_args = []
input_kwargs = {}
for param in signature.parameters.values():
if param.annotation == param.empty:
if param.kind == param.VAR_POSITIONAL:
input_args.append(_ANY_VAR_POSITIONAL)
elif param.kind == param.VAR_KEYWORD:
input_kwargs[param.name] = _ANY_VAR_KEYWORD
elif param.kind == param.KEYWORD_ONLY:
input_kwargs[param.name] = typehints.Any
else:
input_args.append(typehints.Any)
else:
if param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
input_kwargs[param.name] = convert_to_beam_type(param.annotation)
else:
assert param.kind in [param.POSITIONAL_ONLY,
param.POSITIONAL_OR_KEYWORD,
param.VAR_POSITIONAL], \
'Unsupported Parameter kind: %s' % param.kind
input_args.append(convert_to_beam_type(param.annotation))
output_args = []
if signature.return_annotation != signature.empty:
output_args.append(convert_to_beam_type(signature.return_annotation))
else:
output_args.append(typehints.Any)
name = getattr(fn, '__name__', '<unknown>')
msg = ['from_callable(%s)' % name, ' signature: %s' % signature]
if hasattr(fn, '__code__'):
msg.append(
' File "%s", line %d' %
(fn.__code__.co_filename, fn.__code__.co_firstlineno))
return IOTypeHints(
input_types=(tuple(input_args), input_kwargs),
output_types=(tuple(output_args), {}),
origin=cls._make_origin([], tb=False, msg=msg))
def with_input_types(self, *args, **kwargs):
# type: (...) -> IOTypeHints
return self._replace(
input_types=(args, kwargs), origin=self._make_origin([self]))
def with_output_types(self, *args, **kwargs):
# type: (...) -> IOTypeHints
return self._replace(
output_types=(args, kwargs), origin=self._make_origin([self]))
def simple_output_type(self, context):
if self._has_output_types():
args, kwargs = self.output_types
if len(args) != 1 or kwargs:
raise TypeError(
'Expected single output type hint for %s but got: %s' %
(context, self.output_types))
return args[0]
def has_simple_output_type(self):
"""Whether there's a single positional output type."""
return (
self.output_types and len(self.output_types[0]) == 1 and
not self.output_types[1])
def strip_iterable(self):
# type: () -> IOTypeHints
"""Removes outer Iterable (or equivalent) from output type.
Only affects instances with simple output types, otherwise is a no-op.
Does not modify self.
Designed to be used with type hints from callables of ParDo, FlatMap, DoFn.
Output type may be Optional[T], in which case the result of stripping T is
used as the output type.
Output type may be None/NoneType, in which case nothing is done.
Example: Generator[Tuple(int, int)] becomes Tuple(int, int)
Returns:
A copy of this instance with a possibly different output type.
Raises:
ValueError if output type is simple and not iterable.
"""
if self.output_types is None or not self.has_simple_output_type():
return self
output_type = self.output_types[0][0]
if output_type is None or isinstance(output_type, type(None)):
return self
# If output_type == Optional[T]: output_type = T.
if isinstance(output_type, typehints.UnionConstraint):
types = list(output_type.union_types)
if len(types) == 2:
try:
types.remove(type(None))
output_type = types[0]
except ValueError:
pass
yielded_type = typehints.get_yielded_type(output_type)
return self._replace(
output_types=((yielded_type, ), {}),
origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
def with_defaults(self, hints):
# type: (Optional[IOTypeHints]) -> IOTypeHints
if not hints:
return self
if not self:
return hints
if self._has_input_types():
input_types = self.input_types
else:
input_types = hints.input_types
if self._has_output_types():
output_types = self.output_types
else:
output_types = hints.output_types
res = IOTypeHints(
input_types,
output_types,
self._make_origin([self, hints], tb=False, msg=['with_defaults()']))
if res == self:
return self # Don't needlessly increase origin traceback length.
else:
return res
def _has_input_types(self):
return self.input_types is not None and any(self.input_types)
def _has_output_types(self):
return self.output_types is not None and any(self.output_types)
def __bool__(self):
return self._has_input_types() or self._has_output_types()
def __repr__(self):
return 'IOTypeHints[inputs=%s, outputs=%s]' % (
self.input_types, self.output_types)
def debug_str(self):
return '\n'.join([self.__repr__()] + self.origin)
def __eq__(self, other):
def same(a, b):
if a is None or not any(a):
return b is None or not any(b)
else:
return a == b
return (
same(self.input_types, other.input_types) and
same(self.output_types, other.output_types))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(str(self))
def __reduce__(self):
# Don't include "origin" debug information in pickled form.
return (IOTypeHints, (self.input_types, self.output_types, []))
class WithTypeHints(object):
"""A mixin class that provides the ability to set and retrieve type hints.
"""
def __init__(self, *unused_args, **unused_kwargs):
self._type_hints = IOTypeHints.empty()
def _get_or_create_type_hints(self):
# type: () -> IOTypeHints
# __init__ may have not been called
try:
# Only return an instance bound to self (see BEAM-8629).
return self.__dict__['_type_hints']
except KeyError:
self._type_hints = IOTypeHints.empty()
return self._type_hints
def get_type_hints(self):
"""Gets and/or initializes type hints for this object.
If type hints have not been set, attempts to initialize type hints in this
order:
- Using self.default_type_hints().
- Using self.__class__ type hints.
"""
return (
self._get_or_create_type_hints().with_defaults(
self.default_type_hints()).with_defaults(
get_type_hints(self.__class__)))
def default_type_hints(self):
return None
def with_input_types(self, *arg_hints, **kwarg_hints):
# type: (WithTypeHintsT, *Any, **Any) -> WithTypeHintsT
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_input_types(
*arg_hints, **kwarg_hints)
return self
def with_output_types(self, *arg_hints, **kwarg_hints):
# type: (WithTypeHintsT, *Any, **Any) -> WithTypeHintsT
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_output_types(
*arg_hints, **kwarg_hints)
return self
class TypeCheckError(Exception):
pass
def _positional_arg_hints(arg, hints):
"""Returns the type of a (possibly tuple-packed) positional argument.
E.g. for lambda ((a, b), c): None the single positional argument is (as
returned by inspect) [[a, b], c] which should have type
Tuple[Tuple[Int, Any], float] when applied to the type hints
{a: int, b: Any, c: float}.
"""
if isinstance(arg, list):
return typehints.Tuple[[_positional_arg_hints(a, hints) for a in arg]]
return hints.get(arg, typehints.Any)
def _unpack_positional_arg_hints(arg, hint):
"""Unpacks the given hint according to the nested structure of arg.
For example, if arg is [[a, b], c] and hint is Tuple[Any, int], then
this function would return ((Any, Any), int) so it can be used in conjunction
with inspect.getcallargs.
"""
if isinstance(arg, list):
tuple_constraint = typehints.Tuple[[typehints.Any] * len(arg)]
if not typehints.is_consistent_with(hint, tuple_constraint):
raise TypeCheckError(
'Bad tuple arguments for %s: expected %s, got %s' %
(arg, tuple_constraint, hint))
if isinstance(hint, typehints.TupleConstraint):
return tuple(
_unpack_positional_arg_hints(a, t) for a,
t in zip(arg, hint.tuple_types))
return (typehints.Any, ) * len(arg)
return hint
def getcallargs_forhints(func, *typeargs, **typekwargs):
"""Like inspect.getcallargs, with support for declaring default args as Any.
In Python 2, understands that Tuple[] and an Any unpack.
Returns:
(Dict[str, Any]) A dictionary from arguments names to values.
"""
if sys.version_info < (3, ):
return getcallargs_forhints_impl_py2(func, typeargs, typekwargs)
else:
return getcallargs_forhints_impl_py3(func, typeargs, typekwargs)
def getcallargs_forhints_impl_py2(func, typeargs, typekwargs):
argspec = getfullargspec(func)
# Turn Tuple[x, y] into (x, y) so getcallargs can do the proper unpacking.
packed_typeargs = [
_unpack_positional_arg_hints(arg, hint)
for (arg, hint) in zip(argspec.args, typeargs)
]
packed_typeargs += list(typeargs[len(packed_typeargs):])
# Monkeypatch inspect.getfullargspec to allow passing non-function objects.
# getfullargspec (getargspec on Python 2) are used by inspect.getcallargs.
# TODO(BEAM-5490): Reimplement getcallargs and stop relying on monkeypatch.
inspect.getargspec = getfullargspec
try:
callargs = inspect.getcallargs(func, *packed_typeargs, **typekwargs) # pylint: disable=deprecated-method
except TypeError as e:
raise TypeCheckError(e)
finally:
# Revert monkey-patch.
inspect.getargspec = _original_getfullargspec
if argspec.defaults:
# Declare any default arguments to be Any.
for k, var in enumerate(reversed(argspec.args)):
if k >= len(argspec.defaults):
break
if callargs.get(var, None) is argspec.defaults[-k - 1]:
callargs[var] = typehints.Any
# Patch up varargs and keywords
if argspec.varargs:
# TODO(BEAM-8122): This will always assign _ANY_VAR_POSITIONAL. Should be
# "callargs.get(...) or _ANY_VAR_POSITIONAL".
callargs[argspec.varargs] = typekwargs.get(
argspec.varargs, _ANY_VAR_POSITIONAL)
varkw = argspec.keywords
if varkw:
# TODO(robertwb): Consider taking the union of key and value types.
callargs[varkw] = typekwargs.get(varkw, _ANY_VAR_KEYWORD)
# TODO(BEAM-5878) Support kwonlyargs.
return callargs
def _normalize_var_positional_hint(hint):
"""Converts a var_positional hint into Tuple[Union[<types>], ...] form.
Args:
hint: (tuple) Should be either a tuple of one or more types, or a single
Tuple[<type>, ...].
Raises:
TypeCheckError if hint does not have the right form.
"""
if not hint or type(hint) != tuple:
raise TypeCheckError('Unexpected VAR_POSITIONAL value: %s' % hint)
if len(hint) == 1 and isinstance(hint[0], typehints.TupleSequenceConstraint):
# Example: tuple(Tuple[Any, ...]) -> Tuple[Any, ...]
return hint[0]
else:
# Example: tuple(int, str) -> Tuple[Union[int, str], ...]
return typehints.Tuple[typehints.Union[hint], ...]
def _normalize_var_keyword_hint(hint, arg_name):
"""Converts a var_keyword hint into Dict[<key type>, <value type>] form.
Args:
hint: (dict) Should either contain a pair (arg_name,
Dict[<key type>, <value type>]), or one or more possible types for the
value.
arg_name: (str) The keyword receiving this hint.
Raises:
TypeCheckError if hint does not have the right form.
"""
if not hint or type(hint) != dict:
raise TypeCheckError('Unexpected VAR_KEYWORD value: %s' % hint)
keys = list(hint.keys())
values = list(hint.values())
if (len(values) == 1 and keys[0] == arg_name and
isinstance(values[0], typehints.DictConstraint)):
# Example: dict(kwargs=Dict[str, Any]) -> Dict[str, Any]
return values[0]
else:
# Example: dict(k1=str, k2=int) -> Dict[str, Union[str,int]]
return typehints.Dict[str, typehints.Union[values]]
def getcallargs_forhints_impl_py3(func, type_args, type_kwargs):
"""Bind type_args and type_kwargs to func.
Works like inspect.getcallargs, with some modifications to support type hint
checks.
For unbound args, will use annotations and fall back to Any (or variants of
Any).
Returns:
A mapping from parameter name to argument.
"""
try:
signature = get_signature(func)
except ValueError as e:
logging.warning('Could not get signature for function: %s: %s', func, e)
return {}
try:
bindings = signature.bind(*type_args, **type_kwargs)
except TypeError as e:
# Might be raised due to too few or too many arguments.
raise TypeCheckError(e)
bound_args = bindings.arguments
for param in signature.parameters.values():
if param.name in bound_args:
# Bound: unpack/convert variadic arguments.
if param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _normalize_var_positional_hint(
bound_args[param.name])
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _normalize_var_keyword_hint(
bound_args[param.name], param.name)
else:
# Unbound: must have a default or be variadic.
if param.annotation != param.empty:
bound_args[param.name] = param.annotation
elif param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _ANY_VAR_POSITIONAL
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _ANY_VAR_KEYWORD
elif param.default is not param.empty:
# Declare unbound parameters with defaults to be Any.
bound_args[param.name] = typehints.Any
else:
# This case should be caught by signature.bind() above.
raise ValueError('Unexpected unbound parameter: %s' % param.name)
return dict(bound_args)
def get_type_hints(fn):
# type: (Any) -> IOTypeHints
"""Gets the type hint associated with an arbitrary object fn.
Always returns a valid IOTypeHints object, creating one if necessary.
"""
# pylint: disable=protected-access
if not hasattr(fn, '_type_hints'):
try:
fn._type_hints = IOTypeHints.empty()
except (AttributeError, TypeError):
# Can't add arbitrary attributes to this object,
# but might have some restrictions anyways...
hints = IOTypeHints.empty()
# Python 3.7 introduces annotations for _MethodDescriptorTypes.
if isinstance(fn, _MethodDescriptorType) and sys.version_info < (3, 7):
hints = hints.with_input_types(fn.__objclass__) # type: ignore
return hints
return fn._type_hints
# pylint: enable=protected-access
def with_input_types(*positional_hints, **keyword_hints):
# type: (*Any, **Any) -> Callable[[T], T]
"""A decorator that type-checks defined type-hints with passed func arguments.
All type-hinted arguments can be specified using positional arguments,
keyword arguments, or a mix of both. Additionaly, all function arguments must
be type-hinted in totality if even one parameter is type-hinted.
Once fully decorated, if the arguments passed to the resulting function
violate the type-hint constraints defined, a :class:`TypeCheckError`
detailing the error will be raised.
To be used as:
.. testcode::
from apache_beam.typehints import with_input_types
@with_input_types(str)
def upper(s):
return s.upper()
Or:
.. testcode::
from apache_beam.typehints import with_input_types
from apache_beam.typehints import List
from apache_beam.typehints import Tuple
@with_input_types(ls=List[Tuple[int, int]])
def increment(ls):
[(i + 1, j + 1) for (i,j) in ls]
Args:
*positional_hints: Positional type-hints having identical order as the
function's formal arguments. Values for this argument must either be a
built-in Python type or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint` created by
'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint` instance
with a type parameter.
**keyword_hints: Keyword arguments mirroring the names of the parameters to
the decorated functions. The value of each keyword argument must either
be one of the allowed built-in Python types, a custom class, or an
instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`
created by 'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint` instance
with a type parameter.
Raises:
:class:`ValueError`: If not all function arguments have
corresponding type-hints specified. Or if the inner wrapper function isn't
passed a function object.
:class:`TypeCheckError`: If the any of the passed type-hint
constraints are not a type or
:class:`~apache_beam.typehints.typehints.TypeConstraint` instance.
Returns:
The original function decorated such that it enforces type-hint constraints
for all received function arguments.
"""
converted_positional_hints = (
native_type_compatibility.convert_to_beam_types(positional_hints))
converted_keyword_hints = (
native_type_compatibility.convert_to_beam_types(keyword_hints))
del positional_hints
del keyword_hints
def annotate_input_types(f):
if isinstance(f, types.FunctionType):
for t in (list(converted_positional_hints) +
list(converted_keyword_hints.values())):
validate_composite_type_param(
t, error_msg_prefix='All type hint arguments')
th = getattr(f, '_type_hints', IOTypeHints.empty()).with_input_types(
*converted_positional_hints, **converted_keyword_hints)
f._type_hints = th # pylint: disable=protected-access
return f
return annotate_input_types
def with_output_types(*return_type_hint, **kwargs):
# type: (*Any, **Any) -> Callable[[T], T]
"""A decorator that type-checks defined type-hints for return values(s).
This decorator will type-check the return value(s) of the decorated function.
Only a single type-hint is accepted to specify the return type of the return
value. If the function to be decorated has multiple return values, then one
should use: ``Tuple[type_1, type_2]`` to annotate the types of the return
values.
If the ultimate return value for the function violates the specified type-hint
a :class:`TypeCheckError` will be raised detailing the type-constraint
violation.
This decorator is intended to be used like:
.. testcode::
from apache_beam.typehints import with_output_types
from apache_beam.typehints import Set
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
@with_output_types(Set[Coordinate])
def parse_ints(ints):
return {Coordinate(i, i) for i in ints}
Or with a simple type-hint:
.. testcode::
from apache_beam.typehints import with_output_types
@with_output_types(bool)
def negate(p):
return not p if p else p
Args:
*return_type_hint: A type-hint specifying the proper return type of the
function. This argument should either be a built-in Python type or an
instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`
created by 'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint`.
**kwargs: Not used.
Raises:
:class:`ValueError`: If any kwarg parameters are passed in,
or the length of **return_type_hint** is greater than ``1``. Or if the
inner wrapper function isn't passed a function object.
:class:`TypeCheckError`: If the **return_type_hint** object is
in invalid type-hint.
Returns:
The original function decorated such that it enforces type-hint constraints
for all return values.
"""
if kwargs:
raise ValueError(
"All arguments for the 'returns' decorator must be "
"positional arguments.")
if len(return_type_hint) != 1:
raise ValueError(
"'returns' accepts only a single positional argument. In "
"order to specify multiple return types, use the 'Tuple' "
"type-hint.")
return_type_hint = native_type_compatibility.convert_to_beam_type(
return_type_hint[0])
validate_composite_type_param(
return_type_hint, error_msg_prefix='All type hint arguments')
def annotate_output_types(f):
th = getattr(f, '_type_hints', IOTypeHints.empty())
f._type_hints = th.with_output_types(return_type_hint) # pylint: disable=protected-access
return f
return annotate_output_types
def _check_instance_type(
type_constraint, instance, var_name=None, verbose=False):
"""A helper function to report type-hint constraint violations.
Args:
type_constraint: An instance of a 'TypeConstraint' or a built-in Python
type.
instance: The candidate object which will be checked by to satisfy
'type_constraint'.
var_name: If 'instance' is an argument, then the actual name for the
parameter in the original function definition.
Raises:
TypeCheckError: If 'instance' fails to meet the type-constraint of
'type_constraint'.
"""
hint_type = (
"argument: '%s'" % var_name if var_name is not None else 'return type')
try:
check_constraint(type_constraint, instance)
except SimpleTypeHintError:
if verbose:
verbose_instance = '%s, ' % instance
else:
verbose_instance = ''
raise TypeCheckError(
'Type-hint for %s violated. Expected an '
'instance of %s, instead found %san instance of %s.' %
(hint_type, type_constraint, verbose_instance, type(instance)))
except CompositeTypeHintError as e:
raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e))
def _interleave_type_check(type_constraint, var_name=None):
"""Lazily type-check the type-hint for a lazily generated sequence type.
This function can be applied as a decorator or called manually in a curried
manner:
* @_interleave_type_check(List[int])
def gen():
yield 5
or
* gen = _interleave_type_check(Tuple[int, int], 'coord_gen')(gen)
As a result, all type-checking for the passed generator will occur at 'yield'
time. This way, we avoid having to depleat the generator in order to
type-check it.
Args:
type_constraint: An instance of a TypeConstraint. The output yielded of
'gen' will be type-checked according to this type constraint.
var_name: The variable name binded to 'gen' if type-checking a function
argument. Used solely for templating in error message generation.
Returns:
A function which takes a generator as an argument and returns a wrapped
version of the generator that interleaves type-checking at 'yield'
iteration. If the generator received is already wrapped, then it is simply
returned to avoid nested wrapping.
"""
def wrapper(gen):
if isinstance(gen, GeneratorWrapper):
return gen
return GeneratorWrapper(
gen, lambda x: _check_instance_type(type_constraint, x, var_name))
return wrapper
class GeneratorWrapper(object):
"""A wrapper around a generator, allows execution of a callback per yield.
Additionally, wrapping a generator with this class allows one to assign
arbitary attributes to a generator object just as with a function object.
Attributes:
internal_gen: A instance of a generator object. As part of 'step' of the
generator, the yielded object will be passed to 'interleave_func'.
interleave_func: A callback accepting a single argument. This function will
be called with the result of each yielded 'step' in the internal
generator.
"""
def __init__(self, gen, interleave_func):
self.internal_gen = gen
self.interleave_func = interleave_func
def __getattr__(self, attr):
# TODO(laolu): May also want to intercept 'send' in the future if we move to
# a GeneratorHint with 3 type-params:
# * Generator[send_type, return_type, yield_type]
if attr == '__next__':
return self.__next__()
elif attr == '__iter__':
return self.__iter__()
return getattr(self.internal_gen, attr)
def __next__(self):
next_val = next(self.internal_gen)
self.interleave_func(next_val)
return next_val
next = __next__
def __iter__(self):
for x in self.internal_gen:
self.interleave_func(x)
yield x
| 34.562868 | 109 | 0.699986 |
from __future__ import absolute_import
import inspect
import itertools
import logging
import sys
import traceback
import types
from builtins import next
from builtins import object
from builtins import zip
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import TypeVar
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.native_type_compatibility import convert_to_beam_type
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
from apache_beam.typehints.typehints import validate_composite_type_param
try:
import funcsigs
except ImportError:
funcsigs = None
__all__ = [
'no_annotations',
'with_input_types',
'with_output_types',
'WithTypeHints',
'TypeCheckError',
]
T = TypeVar('T')
WithTypeHintsT = TypeVar('WithTypeHintsT', bound='WithTypeHints')
_MethodDescriptorType = type(str.upper)
_ANY_VAR_POSITIONAL = typehints.Tuple[typehints.Any, ...]
_ANY_VAR_KEYWORD = typehints.Dict[typehints.Any, typehints.Any]
_enable_from_callable = False
try:
_original_getfullargspec = inspect.getfullargspec
_use_full_argspec = True
except AttributeError:
_original_getfullargspec = inspect.getargspec
_use_full_argspec = False
def getfullargspec(func):
assert sys.version_info < (3, ), 'This method should not be used in Python 3'
try:
return _original_getfullargspec(func)
except TypeError:
if isinstance(func, type):
argspec = getfullargspec(func.__init__)
del argspec.args[0]
return argspec
elif callable(func):
try:
return _original_getfullargspec(func.__call__)
except TypeError:
# Arguments with the %unknown% prefix will be ignored in the type
# checking code.
if _use_full_argspec:
return inspect.FullArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', (), [], {}, {})
else: # Python 2
return inspect.ArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', ())
else:
raise
def get_signature(func):
# Fall back on funcsigs if inspect module doesn't have 'signature'; prefer
if hasattr(inspect, 'signature'):
inspect_ = inspect
else:
inspect_ = funcsigs
try:
signature = inspect_.signature(func)
except ValueError:
params = [
inspect_.Parameter('_', inspect_.Parameter.POSITIONAL_OR_KEYWORD),
inspect_.Parameter(
'__unknown__varargs', inspect_.Parameter.VAR_POSITIONAL),
inspect_.Parameter(
'__unknown__keywords', inspect_.Parameter.VAR_KEYWORD)
]
signature = inspect_.Signature(params)
if isinstance(func, _MethodDescriptorType):
params = list(signature.parameters.values())
if params[0].annotation == params[0].empty:
params[0] = params[0].replace(annotation=func.__objclass__)
signature = signature.replace(parameters=params)
if (signature.return_annotation == signature.empty and
isinstance(func, type)):
signature = signature.replace(return_annotation=typehints.normalize(func))
return signature
def no_annotations(fn):
setattr(fn, '_beam_no_annotations', True)
return fn
class IOTypeHints(NamedTuple(
'IOTypeHints',
[('input_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('output_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('origin', List[str])])):
traceback_limit = 5
@classmethod
def _make_origin(cls, bases, tb=True, msg=()):
if msg:
res = msg
else:
res = []
if tb:
num_frames_skip = 2
tb = traceback.format_stack(limit=cls.traceback_limit +
num_frames_skip)[:-num_frames_skip]
res += list(
itertools.chain.from_iterable(s.strip().split('\n') for s in tb))
bases = [base for base in bases if base.origin]
if bases:
res += ['', 'based on:']
for i, base in enumerate(bases):
if i > 0:
res += ['', 'and:']
res += [' ' + str(base)]
res += [' ' + s for s in base.origin]
return res
@classmethod
def empty(cls):
return IOTypeHints(None, None, [])
@classmethod
def from_callable(cls, fn):
if not _enable_from_callable or getattr(fn, '_beam_no_annotations', False):
return None
signature = get_signature(fn)
if (all(param.annotation == param.empty
for param in signature.parameters.values()) and
signature.return_annotation == signature.empty):
return None
input_args = []
input_kwargs = {}
for param in signature.parameters.values():
if param.annotation == param.empty:
if param.kind == param.VAR_POSITIONAL:
input_args.append(_ANY_VAR_POSITIONAL)
elif param.kind == param.VAR_KEYWORD:
input_kwargs[param.name] = _ANY_VAR_KEYWORD
elif param.kind == param.KEYWORD_ONLY:
input_kwargs[param.name] = typehints.Any
else:
input_args.append(typehints.Any)
else:
if param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
input_kwargs[param.name] = convert_to_beam_type(param.annotation)
else:
assert param.kind in [param.POSITIONAL_ONLY,
param.POSITIONAL_OR_KEYWORD,
param.VAR_POSITIONAL], \
'Unsupported Parameter kind: %s' % param.kind
input_args.append(convert_to_beam_type(param.annotation))
output_args = []
if signature.return_annotation != signature.empty:
output_args.append(convert_to_beam_type(signature.return_annotation))
else:
output_args.append(typehints.Any)
name = getattr(fn, '__name__', '<unknown>')
msg = ['from_callable(%s)' % name, ' signature: %s' % signature]
if hasattr(fn, '__code__'):
msg.append(
' File "%s", line %d' %
(fn.__code__.co_filename, fn.__code__.co_firstlineno))
return IOTypeHints(
input_types=(tuple(input_args), input_kwargs),
output_types=(tuple(output_args), {}),
origin=cls._make_origin([], tb=False, msg=msg))
def with_input_types(self, *args, **kwargs):
return self._replace(
input_types=(args, kwargs), origin=self._make_origin([self]))
def with_output_types(self, *args, **kwargs):
return self._replace(
output_types=(args, kwargs), origin=self._make_origin([self]))
def simple_output_type(self, context):
if self._has_output_types():
args, kwargs = self.output_types
if len(args) != 1 or kwargs:
raise TypeError(
'Expected single output type hint for %s but got: %s' %
(context, self.output_types))
return args[0]
def has_simple_output_type(self):
return (
self.output_types and len(self.output_types[0]) == 1 and
not self.output_types[1])
def strip_iterable(self):
if self.output_types is None or not self.has_simple_output_type():
return self
output_type = self.output_types[0][0]
if output_type is None or isinstance(output_type, type(None)):
return self
if isinstance(output_type, typehints.UnionConstraint):
types = list(output_type.union_types)
if len(types) == 2:
try:
types.remove(type(None))
output_type = types[0]
except ValueError:
pass
yielded_type = typehints.get_yielded_type(output_type)
return self._replace(
output_types=((yielded_type, ), {}),
origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
def with_defaults(self, hints):
if not hints:
return self
if not self:
return hints
if self._has_input_types():
input_types = self.input_types
else:
input_types = hints.input_types
if self._has_output_types():
output_types = self.output_types
else:
output_types = hints.output_types
res = IOTypeHints(
input_types,
output_types,
self._make_origin([self, hints], tb=False, msg=['with_defaults()']))
if res == self:
return self
else:
return res
def _has_input_types(self):
return self.input_types is not None and any(self.input_types)
def _has_output_types(self):
return self.output_types is not None and any(self.output_types)
def __bool__(self):
return self._has_input_types() or self._has_output_types()
def __repr__(self):
return 'IOTypeHints[inputs=%s, outputs=%s]' % (
self.input_types, self.output_types)
def debug_str(self):
return '\n'.join([self.__repr__()] + self.origin)
def __eq__(self, other):
def same(a, b):
if a is None or not any(a):
return b is None or not any(b)
else:
return a == b
return (
same(self.input_types, other.input_types) and
same(self.output_types, other.output_types))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(str(self))
def __reduce__(self):
# Don't include "origin" debug information in pickled form.
return (IOTypeHints, (self.input_types, self.output_types, []))
class WithTypeHints(object):
def __init__(self, *unused_args, **unused_kwargs):
self._type_hints = IOTypeHints.empty()
def _get_or_create_type_hints(self):
try:
return self.__dict__['_type_hints']
except KeyError:
self._type_hints = IOTypeHints.empty()
return self._type_hints
def get_type_hints(self):
return (
self._get_or_create_type_hints().with_defaults(
self.default_type_hints()).with_defaults(
get_type_hints(self.__class__)))
def default_type_hints(self):
return None
def with_input_types(self, *arg_hints, **kwarg_hints):
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_input_types(
*arg_hints, **kwarg_hints)
return self
def with_output_types(self, *arg_hints, **kwarg_hints):
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_output_types(
*arg_hints, **kwarg_hints)
return self
class TypeCheckError(Exception):
pass
def _positional_arg_hints(arg, hints):
if isinstance(arg, list):
return typehints.Tuple[[_positional_arg_hints(a, hints) for a in arg]]
return hints.get(arg, typehints.Any)
def _unpack_positional_arg_hints(arg, hint):
if isinstance(arg, list):
tuple_constraint = typehints.Tuple[[typehints.Any] * len(arg)]
if not typehints.is_consistent_with(hint, tuple_constraint):
raise TypeCheckError(
'Bad tuple arguments for %s: expected %s, got %s' %
(arg, tuple_constraint, hint))
if isinstance(hint, typehints.TupleConstraint):
return tuple(
_unpack_positional_arg_hints(a, t) for a,
t in zip(arg, hint.tuple_types))
return (typehints.Any, ) * len(arg)
return hint
def getcallargs_forhints(func, *typeargs, **typekwargs):
if sys.version_info < (3, ):
return getcallargs_forhints_impl_py2(func, typeargs, typekwargs)
else:
return getcallargs_forhints_impl_py3(func, typeargs, typekwargs)
def getcallargs_forhints_impl_py2(func, typeargs, typekwargs):
argspec = getfullargspec(func)
packed_typeargs = [
_unpack_positional_arg_hints(arg, hint)
for (arg, hint) in zip(argspec.args, typeargs)
]
packed_typeargs += list(typeargs[len(packed_typeargs):])
inspect.getargspec = getfullargspec
try:
callargs = inspect.getcallargs(func, *packed_typeargs, **typekwargs)
except TypeError as e:
raise TypeCheckError(e)
finally:
inspect.getargspec = _original_getfullargspec
if argspec.defaults:
for k, var in enumerate(reversed(argspec.args)):
if k >= len(argspec.defaults):
break
if callargs.get(var, None) is argspec.defaults[-k - 1]:
callargs[var] = typehints.Any
if argspec.varargs:
callargs[argspec.varargs] = typekwargs.get(
argspec.varargs, _ANY_VAR_POSITIONAL)
varkw = argspec.keywords
if varkw:
callargs[varkw] = typekwargs.get(varkw, _ANY_VAR_KEYWORD)
return callargs
def _normalize_var_positional_hint(hint):
if not hint or type(hint) != tuple:
raise TypeCheckError('Unexpected VAR_POSITIONAL value: %s' % hint)
if len(hint) == 1 and isinstance(hint[0], typehints.TupleSequenceConstraint):
return hint[0]
else:
return typehints.Tuple[typehints.Union[hint], ...]
def _normalize_var_keyword_hint(hint, arg_name):
if not hint or type(hint) != dict:
raise TypeCheckError('Unexpected VAR_KEYWORD value: %s' % hint)
keys = list(hint.keys())
values = list(hint.values())
if (len(values) == 1 and keys[0] == arg_name and
isinstance(values[0], typehints.DictConstraint)):
return values[0]
else:
return typehints.Dict[str, typehints.Union[values]]
def getcallargs_forhints_impl_py3(func, type_args, type_kwargs):
try:
signature = get_signature(func)
except ValueError as e:
logging.warning('Could not get signature for function: %s: %s', func, e)
return {}
try:
bindings = signature.bind(*type_args, **type_kwargs)
except TypeError as e:
raise TypeCheckError(e)
bound_args = bindings.arguments
for param in signature.parameters.values():
if param.name in bound_args:
if param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _normalize_var_positional_hint(
bound_args[param.name])
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _normalize_var_keyword_hint(
bound_args[param.name], param.name)
else:
if param.annotation != param.empty:
bound_args[param.name] = param.annotation
elif param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _ANY_VAR_POSITIONAL
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _ANY_VAR_KEYWORD
elif param.default is not param.empty:
bound_args[param.name] = typehints.Any
else:
raise ValueError('Unexpected unbound parameter: %s' % param.name)
return dict(bound_args)
def get_type_hints(fn):
if not hasattr(fn, '_type_hints'):
try:
fn._type_hints = IOTypeHints.empty()
except (AttributeError, TypeError):
# but might have some restrictions anyways...
hints = IOTypeHints.empty()
# Python 3.7 introduces annotations for _MethodDescriptorTypes.
if isinstance(fn, _MethodDescriptorType) and sys.version_info < (3, 7):
hints = hints.with_input_types(fn.__objclass__) # type: ignore
return hints
return fn._type_hints
# pylint: enable=protected-access
def with_input_types(*positional_hints, **keyword_hints):
# type: (*Any, **Any) -> Callable[[T], T]
converted_positional_hints = (
native_type_compatibility.convert_to_beam_types(positional_hints))
converted_keyword_hints = (
native_type_compatibility.convert_to_beam_types(keyword_hints))
del positional_hints
del keyword_hints
def annotate_input_types(f):
if isinstance(f, types.FunctionType):
for t in (list(converted_positional_hints) +
list(converted_keyword_hints.values())):
validate_composite_type_param(
t, error_msg_prefix='All type hint arguments')
th = getattr(f, '_type_hints', IOTypeHints.empty()).with_input_types(
*converted_positional_hints, **converted_keyword_hints)
f._type_hints = th # pylint: disable=protected-access
return f
return annotate_input_types
def with_output_types(*return_type_hint, **kwargs):
# type: (*Any, **Any) -> Callable[[T], T]
if kwargs:
raise ValueError(
"All arguments for the 'returns' decorator must be "
"positional arguments.")
if len(return_type_hint) != 1:
raise ValueError(
"'returns' accepts only a single positional argument. In "
"order to specify multiple return types, use the 'Tuple' "
"type-hint.")
return_type_hint = native_type_compatibility.convert_to_beam_type(
return_type_hint[0])
validate_composite_type_param(
return_type_hint, error_msg_prefix='All type hint arguments')
def annotate_output_types(f):
th = getattr(f, '_type_hints', IOTypeHints.empty())
f._type_hints = th.with_output_types(return_type_hint) # pylint: disable=protected-access
return f
return annotate_output_types
def _check_instance_type(
type_constraint, instance, var_name=None, verbose=False):
hint_type = (
"argument: '%s'" % var_name if var_name is not None else 'return type')
try:
check_constraint(type_constraint, instance)
except SimpleTypeHintError:
if verbose:
verbose_instance = '%s, ' % instance
else:
verbose_instance = ''
raise TypeCheckError(
'Type-hint for %s violated. Expected an '
'instance of %s, instead found %san instance of %s.' %
(hint_type, type_constraint, verbose_instance, type(instance)))
except CompositeTypeHintError as e:
raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e))
def _interleave_type_check(type_constraint, var_name=None):
def wrapper(gen):
if isinstance(gen, GeneratorWrapper):
return gen
return GeneratorWrapper(
gen, lambda x: _check_instance_type(type_constraint, x, var_name))
return wrapper
class GeneratorWrapper(object):
def __init__(self, gen, interleave_func):
self.internal_gen = gen
self.interleave_func = interleave_func
def __getattr__(self, attr):
# TODO(laolu): May also want to intercept 'send' in the future if we move to
# a GeneratorHint with 3 type-params:
# * Generator[send_type, return_type, yield_type]
if attr == '__next__':
return self.__next__()
elif attr == '__iter__':
return self.__iter__()
return getattr(self.internal_gen, attr)
def __next__(self):
next_val = next(self.internal_gen)
self.interleave_func(next_val)
return next_val
next = __next__
def __iter__(self):
for x in self.internal_gen:
self.interleave_func(x)
yield x
| true | true |
f726e812891f79ad91797716634694dc86a45c44 | 4,022 | py | Python | h/services/group.py | julien-cheng/h | 36c8ec044725720cf36f0986cdf025395aca8929 | [
"BSD-2-Clause"
] | 2 | 2019-08-04T07:22:11.000Z | 2020-07-17T05:01:41.000Z | h/services/group.py | 11-eleven-11/h | 91c7a4504ad7471ed3e30246763a03e6c1cc531b | [
"BSD-2-Clause"
] | null | null | null | h/services/group.py | 11-eleven-11/h | 91c7a4504ad7471ed3e30246763a03e6c1cc531b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlalchemy as sa
from h.models import Group, User
from h.models.group import ReadableBy
from h.util import group as group_util
class GroupService(object):
def __init__(self, session, user_fetcher):
"""
Create a new groups service.
:param session: the SQLAlchemy session object
:param user_fetcher: a callable for fetching users by userid
:param publish: a callable for publishing events
"""
self.session = session
self.user_fetcher = user_fetcher
def fetch(self, pubid_or_groupid):
"""
Fetch a group using either a groupid or a pubid.
:arg pubid_or_groupid: a string in either :mod:`~h.pubid` format
or as :attr:`h.models.Group.groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
if group_util.is_groupid(pubid_or_groupid):
return self.fetch_by_groupid(pubid_or_groupid)
return self.fetch_by_pubid(pubid_or_groupid)
def fetch_by_pubid(self, pubid):
"""Return a group with the given ``pubid`` or ``None``."""
return self.session.query(Group).filter_by(pubid=pubid).one_or_none()
def fetch_by_groupid(self, groupid):
"""
Return a group with the given ``groupid`` or ``None``.
:arg groupid: String in groupid format, e.g. ``group:foo@bar.com``.
See :class:`~h.models.Group`
:raises ValueError: if ``groupid`` is not a valid groupid.
See :func:`h.util.group.split_groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
parts = group_util.split_groupid(groupid)
authority = parts["authority"]
authority_provided_id = parts["authority_provided_id"]
return (
self.session.query(Group)
.filter_by(authority=authority)
.filter_by(authority_provided_id=authority_provided_id)
.one_or_none()
)
def filter_by_name(self, name=None):
"""
Return a Query of all Groups, optionally filtered by name.
If ``name`` is present, groups will be filtered by name. Filtering
is case-insensitive and wildcarded. Otherwise, all groups will be
retrieved.
:rtype: sqlalchemy.orm.query.Query
"""
filter_terms = []
if name:
filter_terms.append(
sa.func.lower(Group.name).like("%{}%".format(name.lower()))
)
return (
self.session.query(Group)
.filter(*filter_terms)
.order_by(Group.created.desc())
)
def groupids_readable_by(self, user):
"""
Return a list of pubids for which the user has read access.
If the passed-in user is ``None``, this returns the list of
world-readable groups.
:type user: `h.models.user.User`
"""
readable = Group.readable_by == ReadableBy.world
if user is not None:
readable_member = sa.and_(
Group.readable_by == ReadableBy.members,
Group.members.any(User.id == user.id),
)
readable = sa.or_(readable, readable_member)
return [
record.pubid for record in self.session.query(Group.pubid).filter(readable)
]
def groupids_created_by(self, user):
"""
Return a list of pubids which the user created.
If the passed-in user is ``None``, this returns an empty list.
:type user: `h.models.user.User` or None
"""
if user is None:
return []
return [
g.pubid for g in self.session.query(Group.pubid).filter_by(creator=user)
]
def groups_factory(context, request):
"""Return a GroupService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupService(session=request.db, user_fetcher=user_service.fetch)
| 31.920635 | 87 | 0.60915 |
from __future__ import unicode_literals
import sqlalchemy as sa
from h.models import Group, User
from h.models.group import ReadableBy
from h.util import group as group_util
class GroupService(object):
def __init__(self, session, user_fetcher):
self.session = session
self.user_fetcher = user_fetcher
def fetch(self, pubid_or_groupid):
if group_util.is_groupid(pubid_or_groupid):
return self.fetch_by_groupid(pubid_or_groupid)
return self.fetch_by_pubid(pubid_or_groupid)
def fetch_by_pubid(self, pubid):
return self.session.query(Group).filter_by(pubid=pubid).one_or_none()
def fetch_by_groupid(self, groupid):
parts = group_util.split_groupid(groupid)
authority = parts["authority"]
authority_provided_id = parts["authority_provided_id"]
return (
self.session.query(Group)
.filter_by(authority=authority)
.filter_by(authority_provided_id=authority_provided_id)
.one_or_none()
)
def filter_by_name(self, name=None):
filter_terms = []
if name:
filter_terms.append(
sa.func.lower(Group.name).like("%{}%".format(name.lower()))
)
return (
self.session.query(Group)
.filter(*filter_terms)
.order_by(Group.created.desc())
)
def groupids_readable_by(self, user):
readable = Group.readable_by == ReadableBy.world
if user is not None:
readable_member = sa.and_(
Group.readable_by == ReadableBy.members,
Group.members.any(User.id == user.id),
)
readable = sa.or_(readable, readable_member)
return [
record.pubid for record in self.session.query(Group.pubid).filter(readable)
]
def groupids_created_by(self, user):
if user is None:
return []
return [
g.pubid for g in self.session.query(Group.pubid).filter_by(creator=user)
]
def groups_factory(context, request):
user_service = request.find_service(name="user")
return GroupService(session=request.db, user_fetcher=user_service.fetch)
| true | true |
f726e91e889b74acf6f116c6d95887b343147e4d | 73,451 | py | Python | tools/sourcecode/Python-3.10.0/Lib/asyncio/base_events.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 33 | 2021-07-25T14:23:35.000Z | 2022-03-31T00:17:30.000Z | tools/sourcecode/Python-3.10.0/Lib/asyncio/base_events.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 32 | 2019-04-26T12:29:36.000Z | 2022-03-08T14:24:30.000Z | Lib/asyncio/base_events.py | val-verde/cpython | 17aa701d799d5e071d83205d877f722f1498a09f | [
"0BSD"
] | 3 | 2019-11-12T15:21:58.000Z | 2020-09-04T14:27:55.000Z | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| 38.516518 | 83 | 0.573689 |
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
_MIN_SCHEDULED_TIMER_HANDLES = 100
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
raise NotImplementedError
def _write_to_self(self):
raise NotImplementedError
def _process_events(self, event_list):
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
self._stopping = True
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
return (self._thread_id is not None)
def time(self):
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else:
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
addr_infos = {}
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
return self._exception_handler
def set_exception_handler(self, handler):
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| true | true |
f726e9f4ca4961a8ea29f9196c6fa380bedb6b8e | 1,865 | py | Python | test/unittest/committee_test.py | Cocos-BCX/Python-Middleware | 9e8db14cdbf12131964d48d1189e0686b69369a8 | [
"MIT"
] | 101 | 2019-07-24T08:30:30.000Z | 2021-02-18T15:11:59.000Z | test/unittest/committee_test.py | marcomgsilva/Python-Middleware | 9e8db14cdbf12131964d48d1189e0686b69369a8 | [
"MIT"
] | 4 | 2019-08-01T10:06:29.000Z | 2019-11-29T08:32:34.000Z | test/unittest/committee_test.py | marcomgsilva/Python-Middleware | 9e8db14cdbf12131964d48d1189e0686b69369a8 | [
"MIT"
] | 7 | 2019-08-11T16:02:41.000Z | 2021-02-11T04:23:51.000Z | import unittest
from config import Config
class CommitteeTestCase(unittest.TestCase):
def testCreateCommittee(self):
params = {
"url": " ",
"account": "1.2.25"
}
gph = Config().gph
try:
print("CreateCommittee:", gph.committee_member_create(**params))
except Exception as e:
print(repr(e))
def testUpdateCommittee(self):
params = {
"work_status": True,
"new_url": "www.1234.com",
"account": "1.2.25"
}
gph = Config().gph
try:
print("UpdateCommittee:", gph.committee_member_update(**params))
except Exception as e:
print(repr(e))
def testApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 10,
"vote_asset": "1.3.0",
"account": "1.2.16"
}
gph = Config().gph
try:
print("ApproveCommittee:", gph.approve_committee(**params))
except Exception as e:
print(repr(e))
def testDisApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 1,
"vote_asset": "1.3.0",
"account": "1.2.14"
}
gph = Config().gph
try:
print("DisApproveCommittee:", gph.disapprove_committee(**params))
except Exception as e:
print(repr(e))
if __name__ == "__main__":
# case1 = CommitteeTestCase("testCreateCommittee")
# case1()
# case2 = CommitteeTestCase("testUpdateCommittee")
# case2()
case3 = CommitteeTestCase("testApproveCommittee")
case3()
# case4 = CommitteeTestCase("testDisApproveCommittee")
# case4() | 27.835821 | 77 | 0.527614 | import unittest
from config import Config
class CommitteeTestCase(unittest.TestCase):
def testCreateCommittee(self):
params = {
"url": " ",
"account": "1.2.25"
}
gph = Config().gph
try:
print("CreateCommittee:", gph.committee_member_create(**params))
except Exception as e:
print(repr(e))
def testUpdateCommittee(self):
params = {
"work_status": True,
"new_url": "www.1234.com",
"account": "1.2.25"
}
gph = Config().gph
try:
print("UpdateCommittee:", gph.committee_member_update(**params))
except Exception as e:
print(repr(e))
def testApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 10,
"vote_asset": "1.3.0",
"account": "1.2.16"
}
gph = Config().gph
try:
print("ApproveCommittee:", gph.approve_committee(**params))
except Exception as e:
print(repr(e))
def testDisApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 1,
"vote_asset": "1.3.0",
"account": "1.2.14"
}
gph = Config().gph
try:
print("DisApproveCommittee:", gph.disapprove_committee(**params))
except Exception as e:
print(repr(e))
if __name__ == "__main__":
case3 = CommitteeTestCase("testApproveCommittee")
case3()
| true | true |
f726eaa4291a25a6faf61571bc3ad1b43a3541f2 | 4,011 | py | Python | PhysicsTools/Heppy/python/physicsutils/genutils.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | PhysicsTools/Heppy/python/physicsutils/genutils.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | PhysicsTools/Heppy/python/physicsutils/genutils.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import printOut
from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import GenParticle
def findStatus1Leptons(particle):
'''Returns status 1 e and mu among the particle daughters'''
leptons = []
for i in range( particle.numberOfDaughters() ):
dau = particle.daughter(i)
if dau.status() == 1:
if abs(dau.pdgId())==11 or abs(dau.pdgId())==13:
leptons.append( dau )
else:
continue
else:
leptons = findStatus1Leptons( dau, leptons )
return leptons
def allDaughters(particle, daughters, rank ):
'''Fills daughters with all the daughters of particle.
Recursive function.'''
rank += 1
for i in range( particle.numberOfDaughters() ):
dau = GenParticle(particle.daughter(i))
dau.rank = rank
daughters.append( dau )
daughters = allDaughters( dau, daughters, rank )
return daughters
def bosonToX(particles, bosonType, xType):
bosons = filter(lambda x: x.status()==3 and x.pdgId()==bosonType, particles)
daughters = []
if len(bosons)==0:
return [], False
boson = bosons[0]
daus = []
allDaughters( boson, daus, 0)
xDaus = filter(lambda x: x.status()==3 and abs(x.pdgId())==xType, daus)
# print printOut(xDaus)
return xDaus, True
def isNotHadronicId(pdgId,includeSMLeptons=True):
if abs(pdgId) in [11,12,13,14,15,16]:
return includeSMLeptons
i = (abs(pdgId) % 1000)
return i > 10 and i != 21 and i < 100
def isPromptLepton(lepton, beforeFSR, includeMotherless=True, includeTauDecays=False):
if abs(lepton.pdgId()) not in [11,13,15]:
return False
if lepton.numberOfMothers() == 0:
return includeMotherless;
mom = lepton.mother()
if mom.pdgId() == lepton.pdgId():
if beforeFSR: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
elif abs(mom.pdgId()) == 15:
if not includeTauDecays: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
else:
return isNotHadronicId(mom.pdgId(), includeSMLeptons=False)
def isNotFromHadronicShower(l):
for x in xrange(l.numberOfMothers()):
mom = l.mother(x)
if mom.status() > 2: return True
id = abs(mom.pdgId())
if id > 1000000: return True
if id > 100: return False
if id < 6: return False
if id == 21: return False
if id in [11,12,13,14,15,16]:
if l.status() > 2: return True
return isNotFromHadronicShower(mom)
if id >= 22 and id <= 39: return True
return True
def realGenDaughters(gp,excludeRadiation=True):
"""Get the daughters of a particle, going through radiative X -> X' + a
decays, either including or excluding the radiation among the daughters
e.g. for
X -> X' + a, X' -> b c
realGenDaughters(X, excludeRadiation=True) = { b, c }
realGenDaughters(X, excludeRadiation=False) = { a, b, c }"""
ret = []
for i in xrange(gp.numberOfDaughters()):
dau = gp.daughter(i)
if dau.pdgId() == gp.pdgId():
if excludeRadiation:
return realGenDaughters(dau)
else:
ret += realGenDaughters(dau)
else:
ret.append(dau)
return ret
def realGenMothers(gp):
"""Get the mothers of a particle X going through intermediate X -> X' chains.
e.g. if Y -> X, X -> X' realGenMothers(X') = Y"""
ret = []
for i in xrange(gp.numberOfMothers()):
mom = gp.mother(i)
if mom.pdgId() == gp.pdgId():
ret += realGenMothers(mom)
else:
ret.append(mom)
return ret
def lastGenCopy(gp):
me = gp.pdgId();
for i in xrange(gp.numberOfDaughters()):
if gp.daughter(i).pdgId() == me:
return False
return True
| 33.705882 | 86 | 0.605834 | from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import printOut
from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import GenParticle
def findStatus1Leptons(particle):
leptons = []
for i in range( particle.numberOfDaughters() ):
dau = particle.daughter(i)
if dau.status() == 1:
if abs(dau.pdgId())==11 or abs(dau.pdgId())==13:
leptons.append( dau )
else:
continue
else:
leptons = findStatus1Leptons( dau, leptons )
return leptons
def allDaughters(particle, daughters, rank ):
rank += 1
for i in range( particle.numberOfDaughters() ):
dau = GenParticle(particle.daughter(i))
dau.rank = rank
daughters.append( dau )
daughters = allDaughters( dau, daughters, rank )
return daughters
def bosonToX(particles, bosonType, xType):
bosons = filter(lambda x: x.status()==3 and x.pdgId()==bosonType, particles)
daughters = []
if len(bosons)==0:
return [], False
boson = bosons[0]
daus = []
allDaughters( boson, daus, 0)
xDaus = filter(lambda x: x.status()==3 and abs(x.pdgId())==xType, daus)
return xDaus, True
def isNotHadronicId(pdgId,includeSMLeptons=True):
if abs(pdgId) in [11,12,13,14,15,16]:
return includeSMLeptons
i = (abs(pdgId) % 1000)
return i > 10 and i != 21 and i < 100
def isPromptLepton(lepton, beforeFSR, includeMotherless=True, includeTauDecays=False):
if abs(lepton.pdgId()) not in [11,13,15]:
return False
if lepton.numberOfMothers() == 0:
return includeMotherless;
mom = lepton.mother()
if mom.pdgId() == lepton.pdgId():
if beforeFSR: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
elif abs(mom.pdgId()) == 15:
if not includeTauDecays: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
else:
return isNotHadronicId(mom.pdgId(), includeSMLeptons=False)
def isNotFromHadronicShower(l):
for x in xrange(l.numberOfMothers()):
mom = l.mother(x)
if mom.status() > 2: return True
id = abs(mom.pdgId())
if id > 1000000: return True
if id > 100: return False
if id < 6: return False
if id == 21: return False
if id in [11,12,13,14,15,16]:
if l.status() > 2: return True
return isNotFromHadronicShower(mom)
if id >= 22 and id <= 39: return True
return True
def realGenDaughters(gp,excludeRadiation=True):
ret = []
for i in xrange(gp.numberOfDaughters()):
dau = gp.daughter(i)
if dau.pdgId() == gp.pdgId():
if excludeRadiation:
return realGenDaughters(dau)
else:
ret += realGenDaughters(dau)
else:
ret.append(dau)
return ret
def realGenMothers(gp):
ret = []
for i in xrange(gp.numberOfMothers()):
mom = gp.mother(i)
if mom.pdgId() == gp.pdgId():
ret += realGenMothers(mom)
else:
ret.append(mom)
return ret
def lastGenCopy(gp):
me = gp.pdgId();
for i in xrange(gp.numberOfDaughters()):
if gp.daughter(i).pdgId() == me:
return False
return True
| true | true |
f726ebf3b8c2775c6822150273cdcd7cd4ffc96d | 2,878 | py | Python | factor_tools.py | ericgreveson/projecteuler | 1844bf383fca871b82d88ef1eb3a9b1a0e363054 | [
"Apache-2.0"
] | null | null | null | factor_tools.py | ericgreveson/projecteuler | 1844bf383fca871b82d88ef1eb3a9b1a0e363054 | [
"Apache-2.0"
] | null | null | null | factor_tools.py | ericgreveson/projecteuler | 1844bf383fca871b82d88ef1eb3a9b1a0e363054 | [
"Apache-2.0"
] | null | null | null | from fractions import Fraction
import math
def compute_factors(n):
"""
Return a list of all factors (proper divisors) of a number n, including the factor 1
"""
factors = [1]
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
factors.append(i)
factors.append(n // i)
return factors
def is_prime(n, prime_cache=None, prime_cache_max=None):
"""
Return true if n is prime (n>1)
If prime_cache is given, it should be a set of consecutive primes from 2 to prime_cache_max
(and prime_cache_max must also be given).
Then if n <= prime_cache_max, this test will use set lookup rather than factorization
"""
# Optimizations to quickly reject known non-primes
if n in [2, 3, 5, 7]:
return True
if (n % 10) not in [1, 3, 7, 9] or n == 1:
return False
if prime_cache and n <= prime_cache_max:
return n in prime_cache
return len(compute_factors(n)) == 1
def next_prime(previous):
"""
Get the next prime after previous
"""
i = previous + 1
while True:
if is_prime(i):
return i
i += 1
def prime_factors(n, primes=None):
"""
Compute all prime factors of a number n
Some prime factors may be repeated e.g. 12 has prime factors [2, 2, 3]
primes: if supplied, primes up to sqrt(n) should be available
"""
if not primes:
primes = get_primes(int(math.sqrt(n)))
factors = []
remainder = n
for prime in primes:
# Divide by the current prime as many times as we can
while remainder % current_prime == 0:
factors.append(current_prime)
remainder //= current_prime
# We can bail out once we've finished factorizing
if remainder == 1:
break
return factors
def get_primes(up_to):
"""
Get all primes up to (but not including) up_to
"""
primes = [2]
while primes[-1] < up_to:
primes.append(next_prime(primes[-1]))
return primes[:-1]
def totient(n, primes):
"""
Compute totient function with precomputed primes
primes must include all (ordered) primes from 2 up to at least n
"""
product = n
for p in primes:
if p > n:
break
if n % p == 0:
product *= (1 - Fraction(1, p))
return product
def get_coprimes(n, primes):
"""
Get list of numbers coprime to n
primes: list of prime numbers up to at least sqrt(n)
"""
factors = set(prime_factors(n, primes))
# Now sieve out the factors
coprime = [True for i in range(n)]
coprime[0] = False
coprime[1] = False
for factor in factors:
for multiplier in range(1, n // factor):
coprime[factor * multiplier] = False
# And we have the coprimes!
return [c for c in coprime if c]
| 25.927928 | 95 | 0.59729 | from fractions import Fraction
import math
def compute_factors(n):
factors = [1]
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
factors.append(i)
factors.append(n // i)
return factors
def is_prime(n, prime_cache=None, prime_cache_max=None):
if n in [2, 3, 5, 7]:
return True
if (n % 10) not in [1, 3, 7, 9] or n == 1:
return False
if prime_cache and n <= prime_cache_max:
return n in prime_cache
return len(compute_factors(n)) == 1
def next_prime(previous):
i = previous + 1
while True:
if is_prime(i):
return i
i += 1
def prime_factors(n, primes=None):
if not primes:
primes = get_primes(int(math.sqrt(n)))
factors = []
remainder = n
for prime in primes:
while remainder % current_prime == 0:
factors.append(current_prime)
remainder //= current_prime
if remainder == 1:
break
return factors
def get_primes(up_to):
primes = [2]
while primes[-1] < up_to:
primes.append(next_prime(primes[-1]))
return primes[:-1]
def totient(n, primes):
product = n
for p in primes:
if p > n:
break
if n % p == 0:
product *= (1 - Fraction(1, p))
return product
def get_coprimes(n, primes):
factors = set(prime_factors(n, primes))
# Now sieve out the factors
coprime = [True for i in range(n)]
coprime[0] = False
coprime[1] = False
for factor in factors:
for multiplier in range(1, n // factor):
coprime[factor * multiplier] = False
# And we have the coprimes!
return [c for c in coprime if c]
| true | true |
f726ebfcc0be524ce8e65eb0ea66ac8411693e2e | 1,175 | py | Python | course_grader/dao/__init__.py | uw-it-aca/gradepage | 7059d715cc112ad0ecb0e5012f716e525ee7b3bc | [
"Apache-2.0"
] | 1 | 2017-01-29T09:52:06.000Z | 2017-01-29T09:52:06.000Z | course_grader/dao/__init__.py | uw-it-aca/gradepage | 7059d715cc112ad0ecb0e5012f716e525ee7b3bc | [
"Apache-2.0"
] | 287 | 2017-03-09T00:17:20.000Z | 2022-01-08T00:36:34.000Z | course_grader/dao/__init__.py | uw-it-aca/gradepage | 7059d715cc112ad0ecb0e5012f716e525ee7b3bc | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.utils.timezone import (
get_default_timezone, localtime, is_naive, make_aware)
from datetime import datetime
from uw_sws import SWS_DAO, sws_now
from abc import ABC, abstractmethod
def __update_get(self, url, response):
pass
# Replace the SWS _update_get method to prevent tampering with mocked resources
SWS_DAO._update_get = __update_get
def current_datetime():
override_dt = getattr(settings, "CURRENT_DATETIME_OVERRIDE", None)
if override_dt is not None:
return datetime.strptime(override_dt, "%Y-%m-%d %H:%M:%S")
else:
return sws_now()
def display_datetime(dt):
if is_naive(dt):
dt = make_aware(dt, get_default_timezone())
else:
dt = localtime(dt)
return dt.strftime("%B %d at %l:%M %p %Z")
class GradeImportSource(ABC):
true_values = ["1", "y", "yes", "true"]
@abstractmethod
def grades_for_section(self, section, instructor, **kwargs):
pass
def is_true(self, val):
return (val is not None and val.lower() in self.true_values)
| 25.543478 | 79 | 0.700426 |
from django.conf import settings
from django.utils.timezone import (
get_default_timezone, localtime, is_naive, make_aware)
from datetime import datetime
from uw_sws import SWS_DAO, sws_now
from abc import ABC, abstractmethod
def __update_get(self, url, response):
pass
SWS_DAO._update_get = __update_get
def current_datetime():
override_dt = getattr(settings, "CURRENT_DATETIME_OVERRIDE", None)
if override_dt is not None:
return datetime.strptime(override_dt, "%Y-%m-%d %H:%M:%S")
else:
return sws_now()
def display_datetime(dt):
if is_naive(dt):
dt = make_aware(dt, get_default_timezone())
else:
dt = localtime(dt)
return dt.strftime("%B %d at %l:%M %p %Z")
class GradeImportSource(ABC):
true_values = ["1", "y", "yes", "true"]
@abstractmethod
def grades_for_section(self, section, instructor, **kwargs):
pass
def is_true(self, val):
return (val is not None and val.lower() in self.true_values)
| true | true |
f726ec3d0e2d020ea619b787e5eec91931023911 | 5,455 | py | Python | bin/make_changelog.py | nyuszika7h/rclone | 7bf056316fe82aa9566f6e482da5cd9b184ac3f7 | [
"MIT"
] | 3 | 2018-08-25T01:14:39.000Z | 2022-03-22T00:36:27.000Z | bin/make_changelog.py | nyuszika7h/rclone | 7bf056316fe82aa9566f6e482da5cd9b184ac3f7 | [
"MIT"
] | 1 | 2020-12-01T17:00:00.000Z | 2020-12-01T17:00:00.000Z | bin/make_changelog.py | nyuszika7h/rclone | 7bf056316fe82aa9566f6e482da5cd9b184ac3f7 | [
"MIT"
] | 2 | 2021-01-16T07:35:41.000Z | 2021-01-16T08:41:00.000Z | #!/usr/bin/python3
"""
Generate a markdown changelog for the rclone project
"""
import os
import sys
import re
import datetime
import subprocess
from collections import defaultdict
IGNORE_RES = [
r"^Add .* to contributors$",
r"^Start v\d+\.\d+(\.\d+)?-DEV development$",
r"^Version v\d+\.\d+(\.\d+)?$",
]
IGNORE_RE = re.compile("(?:" + "|".join(IGNORE_RES) + ")")
CATEGORY = re.compile(r"(^[\w/ ]+(?:, *[\w/ ]+)*):\s*(.*)$")
backends = [ x for x in os.listdir("backend") if x != "all"]
backend_aliases = {
"amazon cloud drive" : "amazonclouddrive",
"acd" : "amazonclouddrive",
"google cloud storage" : "googlecloudstorage",
"gcs" : "googlecloudstorage",
"azblob" : "azureblob",
"mountlib": "mount",
"cmount": "mount",
"mount/cmount": "mount",
}
backend_titles = {
"amazonclouddrive": "Amazon Cloud Drive",
"googlecloudstorage": "Google Cloud Storage",
"azureblob": "Azure Blob",
"ftp": "FTP",
"sftp": "SFTP",
"http": "HTTP",
"webdav": "WebDAV",
}
STRIP_FIX_RE = re.compile(r"(\s+-)?\s+((fixes|addresses)\s+)?#\d+", flags=re.I)
STRIP_PATH_RE = re.compile(r"^(backend|fs)/")
IS_FIX_RE = re.compile(r"\b(fix|fixes)\b", flags=re.I)
def make_out(data, indent=""):
"""Return a out, lines the first being a function for output into the second"""
out_lines = []
def out(category, title=None):
if title == None:
title = category
lines = data.get(category)
if not lines:
return
del(data[category])
if indent != "" and len(lines) == 1:
out_lines.append(indent+"* " + title+": " + lines[0])
return
out_lines.append(indent+"* " + title)
for line in lines:
out_lines.append(indent+" * " + line)
return out, out_lines
def process_log(log):
"""Process the incoming log into a category dict of lists"""
by_category = defaultdict(list)
for log_line in reversed(log.split("\n")):
log_line = log_line.strip()
hash, author, timestamp, message = log_line.split("|", 3)
message = message.strip()
if IGNORE_RE.search(message):
continue
match = CATEGORY.search(message)
categories = "UNKNOWN"
if match:
categories = match.group(1).lower()
message = match.group(2)
message = STRIP_FIX_RE.sub("", message)
message = message +" ("+author+")"
message = message[0].upper()+message[1:]
seen = set()
for category in categories.split(","):
category = category.strip()
category = STRIP_PATH_RE.sub("", category)
category = backend_aliases.get(category, category)
if category in seen:
continue
by_category[category].append(message)
seen.add(category)
#print category, hash, author, timestamp, message
return by_category
def main():
if len(sys.argv) != 3:
print("Syntax: %s vX.XX vX.XY" % sys.argv[0], file=sys.stderr)
sys.exit(1)
version, next_version = sys.argv[1], sys.argv[2]
log = subprocess.check_output(["git", "log", '''--pretty=format:%H|%an|%aI|%s'''] + [version+".."+next_version])
log = log.decode("utf-8")
by_category = process_log(log)
# Output backends first so remaining in by_category are core items
out, backend_lines = make_out(by_category)
out("mount", title="Mount")
out("vfs", title="VFS")
out("local", title="Local")
out("cache", title="Cache")
out("crypt", title="Crypt")
backend_names = sorted(x for x in list(by_category.keys()) if x in backends)
for backend_name in backend_names:
if backend_name in backend_titles:
backend_title = backend_titles[backend_name]
else:
backend_title = backend_name.title()
out(backend_name, title=backend_title)
# Split remaining in by_category into new features and fixes
new_features = defaultdict(list)
bugfixes = defaultdict(list)
for name, messages in by_category.items():
for message in messages:
if IS_FIX_RE.search(message):
bugfixes[name].append(message)
else:
new_features[name].append(message)
# Output new features
out, new_features_lines = make_out(new_features, indent=" ")
for name in sorted(new_features.keys()):
out(name)
# Output bugfixes
out, bugfix_lines = make_out(bugfixes, indent=" ")
for name in sorted(bugfixes.keys()):
out(name)
# Read old changlog and split
with open("docs/content/changelog.md") as fd:
old_changelog = fd.read()
heading = "# Changelog"
i = old_changelog.find(heading)
if i < 0:
raise AssertionError("Couldn't find heading in old changelog")
i += len(heading)
old_head, old_tail = old_changelog[:i], old_changelog[i:]
# Update the build date
old_head = re.sub(r"\d\d\d\d-\d\d-\d\d", str(datetime.date.today()), old_head)
# Output combined changelog with new part
sys.stdout.write(old_head)
sys.stdout.write("""
## %s - %s
* New backends
* New commands
* New Features
%s
* Bug Fixes
%s
%s""" % (next_version, datetime.date.today(), "\n".join(new_features_lines), "\n".join(bugfix_lines), "\n".join(backend_lines)))
sys.stdout.write(old_tail)
if __name__ == "__main__":
main()
| 31.171429 | 128 | 0.6033 |
import os
import sys
import re
import datetime
import subprocess
from collections import defaultdict
IGNORE_RES = [
r"^Add .* to contributors$",
r"^Start v\d+\.\d+(\.\d+)?-DEV development$",
r"^Version v\d+\.\d+(\.\d+)?$",
]
IGNORE_RE = re.compile("(?:" + "|".join(IGNORE_RES) + ")")
CATEGORY = re.compile(r"(^[\w/ ]+(?:, *[\w/ ]+)*):\s*(.*)$")
backends = [ x for x in os.listdir("backend") if x != "all"]
backend_aliases = {
"amazon cloud drive" : "amazonclouddrive",
"acd" : "amazonclouddrive",
"google cloud storage" : "googlecloudstorage",
"gcs" : "googlecloudstorage",
"azblob" : "azureblob",
"mountlib": "mount",
"cmount": "mount",
"mount/cmount": "mount",
}
backend_titles = {
"amazonclouddrive": "Amazon Cloud Drive",
"googlecloudstorage": "Google Cloud Storage",
"azureblob": "Azure Blob",
"ftp": "FTP",
"sftp": "SFTP",
"http": "HTTP",
"webdav": "WebDAV",
}
STRIP_FIX_RE = re.compile(r"(\s+-)?\s+((fixes|addresses)\s+)?#\d+", flags=re.I)
STRIP_PATH_RE = re.compile(r"^(backend|fs)/")
IS_FIX_RE = re.compile(r"\b(fix|fixes)\b", flags=re.I)
def make_out(data, indent=""):
out_lines = []
def out(category, title=None):
if title == None:
title = category
lines = data.get(category)
if not lines:
return
del(data[category])
if indent != "" and len(lines) == 1:
out_lines.append(indent+"* " + title+": " + lines[0])
return
out_lines.append(indent+"* " + title)
for line in lines:
out_lines.append(indent+" * " + line)
return out, out_lines
def process_log(log):
by_category = defaultdict(list)
for log_line in reversed(log.split("\n")):
log_line = log_line.strip()
hash, author, timestamp, message = log_line.split("|", 3)
message = message.strip()
if IGNORE_RE.search(message):
continue
match = CATEGORY.search(message)
categories = "UNKNOWN"
if match:
categories = match.group(1).lower()
message = match.group(2)
message = STRIP_FIX_RE.sub("", message)
message = message +" ("+author+")"
message = message[0].upper()+message[1:]
seen = set()
for category in categories.split(","):
category = category.strip()
category = STRIP_PATH_RE.sub("", category)
category = backend_aliases.get(category, category)
if category in seen:
continue
by_category[category].append(message)
seen.add(category)
return by_category
def main():
if len(sys.argv) != 3:
print("Syntax: %s vX.XX vX.XY" % sys.argv[0], file=sys.stderr)
sys.exit(1)
version, next_version = sys.argv[1], sys.argv[2]
log = subprocess.check_output(["git", "log", '''--pretty=format:%H|%an|%aI|%s'''] + [version+".."+next_version])
log = log.decode("utf-8")
by_category = process_log(log)
out, backend_lines = make_out(by_category)
out("mount", title="Mount")
out("vfs", title="VFS")
out("local", title="Local")
out("cache", title="Cache")
out("crypt", title="Crypt")
backend_names = sorted(x for x in list(by_category.keys()) if x in backends)
for backend_name in backend_names:
if backend_name in backend_titles:
backend_title = backend_titles[backend_name]
else:
backend_title = backend_name.title()
out(backend_name, title=backend_title)
new_features = defaultdict(list)
bugfixes = defaultdict(list)
for name, messages in by_category.items():
for message in messages:
if IS_FIX_RE.search(message):
bugfixes[name].append(message)
else:
new_features[name].append(message)
out, new_features_lines = make_out(new_features, indent=" ")
for name in sorted(new_features.keys()):
out(name)
out, bugfix_lines = make_out(bugfixes, indent=" ")
for name in sorted(bugfixes.keys()):
out(name)
with open("docs/content/changelog.md") as fd:
old_changelog = fd.read()
heading = "# Changelog"
i = old_changelog.find(heading)
if i < 0:
raise AssertionError("Couldn't find heading in old changelog")
i += len(heading)
old_head, old_tail = old_changelog[:i], old_changelog[i:]
# Update the build date
old_head = re.sub(r"\d\d\d\d-\d\d-\d\d", str(datetime.date.today()), old_head)
# Output combined changelog with new part
sys.stdout.write(old_head)
sys.stdout.write("""
## %s - %s
* New backends
* New commands
* New Features
%s
* Bug Fixes
%s
%s""" % (next_version, datetime.date.today(), "\n".join(new_features_lines), "\n".join(bugfix_lines), "\n".join(backend_lines)))
sys.stdout.write(old_tail)
if __name__ == "__main__":
main()
| true | true |
f726ef153fc15bb0f73f2ddd0be42d2221822c43 | 12,328 | py | Python | dnnutil/training.py | catalys1/dnnutil | a55a73ae59c5ac0117f58d8d8136bdd32902141f | [
"MIT"
] | null | null | null | dnnutil/training.py | catalys1/dnnutil | a55a73ae59c5ac0117f58d8d8136bdd32902141f | [
"MIT"
] | 9 | 2018-07-31T02:53:23.000Z | 2019-03-28T16:57:45.000Z | dnnutil/training.py | catalys1/dnnutil | a55a73ae59c5ac0117f58d8d8136bdd32902141f | [
"MIT"
] | null | null | null | import torch
import numpy as np
import dnnutil.network as network
import time
__all__ = ['calculate_accuracy', 'Trainer', 'ClassifierTrainer', 'AutoencoderTrainer']
def calculate_accuracy(prediction, label, axis=1):
'''calculate_accuracy(prediction, label)
Computes the mean accuracy over a batch of predictions and corresponding
ground-truth labels.
Args:
prediction (Tensor): A batch of predictions. Assumed to have shape
[batch-size, nclasses, [d0, d1, ...]].
label (LongTensor): A batch of labels. Assumed to have shape
[batch-size, [d0, d1, ...]]). The number of dimensions should be
one less than prediction.
Returns:
accuracy (Tensor): A single-element Tensor containing the percent of
correct predictions in the batch as a value between 0 and 1.
'''
return torch.eq(prediction.argmax(axis), label).float().mean().item()
class Trainer(object):
'''Trainer(net, optim, loss_fn, accuracy_metric=None)
Base class for all network trainers. Network trainer classes provide
methods to facilitate training and testing deep network models. The goal
is to encapsulate the common functionality, to reduce the boilerplate
code that needs to be repeated across projects.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
accuracy_metric (callable): A callabel that calculates and returns
an accuracy value. Usually this will be a floating point number
in [0, 1].
'''
def __init__(self, net, optim, loss_fn, accuracy_metric=None):
self.net = net
self.loss_fn = loss_fn
self.optim = optim
if accuracy_metric is not None:
self.measure_accuracy = accuracy_metric
else:
self.measure_accuracy = calculate_accuracy
self.train_loss = 0.
self.train_acc = 0.
self.test_loss = 0.
self.test_acc = 0.
def _set_train_stats(self, stats):
'''TODO:docs
'''
self.train_loss = stats[0]
self.train_acc = stats[1]
def _set_test_stats(self, stats):
'''TODO:docs
'''
self.test_loss = stats[0]
self.test_acc = stats[1]
def get_stats(self):
'''TODO:docs
'''
return (self.train_loss, self.train_acc,
self.test_loss, self.test_acc)
def train(self, dataloader, epoch):
'''Train the Trainer's network.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the training data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
self.net.train()
stats = self._run_epoch(dataloader, epoch)
self._set_train_stats(stats)
return stats
def eval(self, dataloader, epoch):
'''Evaluate the Trainer's network.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
self.net.eval()
stats = self._run_epoch(dataloader, epoch)
self._set_test_stats(stats)
return stats
def _run_epoch(self, dataloader, epoch):
'''Perform a single epoch of either training or evaluation.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
N = len(dataloader.batch_sampler)
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
acc = []
at = 0
for i, batch in enumerate(dataloader):
t = time.time()
if self.net.training:
self.update_lr(epoch * N + i + 1)
batch_loss, batch_acc = func(batch)
t = time.time() - t
if i == 0:
at = t
else:
at = at * i / (i + 1) + t / (i + 1)
loss.append(batch_loss)
acc.append(batch_acc)
print(f'\rEPOCH {epoch}: {msg} '
f'batch {i + 1:04d}/{N} '
f'lr[ {self.optim.param_groups[0]["lr"]:1.3e} ] '
f'[ {t:.3f} ({at:.3f}) secs ]'
f'{" "*10}',
end='', flush=True)
loss = np.mean(loss)
acc = np.mean(acc)
return loss, acc
def update_lr(self, i=None):
'''Update the optimizer's learning rate. Used for batch-level
learning rate scheduling. If using an epoch-level scheduler,
define and use it in the epoch loop. If the iteration number is
not provided (None) or the Trainer has no lr_schedule attribute,
this function does nothing and returns.
Args:
i (int): iteration number (starts at 1 for the first batch).
'''
if i is None or not hasattr(self, 'lr_schedule'):
return
self.lr_schedule.step(i)
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
'''
raise NotImplementedError()
def test_batch(self, batch):
'''Test the Trainer's network on a single testing batch.
'''
raise NotImplementedError()
class ClassifierTrainer(Trainer):
'''ClassifierTrainer(net, optim, loss_fn, accuracy_metric=None)
Trainer for training a network to do image classification.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
accuracy_metric (callable): A callabel that calculates and returns
an accuracy value. Usually this will be a floating point number
in [0, 1].
'''
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
accuracy (float): The mean accuracy over the batch (in [0, 1]).
'''
self.optim.zero_grad()
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels)
loss.backward()
self.optim.step()
loss = loss.item()
with torch.no_grad():
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
@torch.no_grad()
def test_batch(self, batch):
'''Evaluate the Trainer's network on a single testing batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
accuracy (float): The mean accuracy over the batch (in [0, 1]).
'''
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels).item()
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
class AutoencoderTrainer(Trainer):
'''AutoencoderTrainer(net, optim, loss_fn)
Trainer for training an autoencoder network.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
'''
def __init__(self, net, optim, loss_fn):
super(AutoencoderTrainer, self).__init__(
net, optim, loss_fn, None)
delattr(self, 'measure_accuracy')
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
'''
self.optim.zero_grad()
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs)
loss.backward()
self.optim.step()
loss = loss.item()
return loss
@torch.no_grad()
def test_batch(self, batch):
'''Evaluate the Trainer's network on a single testing batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
'''
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs).item()
return loss
def _run_epoch(self, dataloader, epoch):
'''Perform a single epoch of either training or evaluation.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
'''
N = int(np.ceil(len(dataloader.dataset) / dataloader.batch_size))
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
for i, batch in enumerate(dataloader):
batch_loss = func(batch)
loss.append(batch_loss)
print(f'\rEPOCH {epoch}: {msg} batch {i:04d}/{N}{" "*10}',
end='', flush=True)
loss = np.mean(loss)
return loss
| 36.473373 | 86 | 0.596042 | import torch
import numpy as np
import dnnutil.network as network
import time
__all__ = ['calculate_accuracy', 'Trainer', 'ClassifierTrainer', 'AutoencoderTrainer']
def calculate_accuracy(prediction, label, axis=1):
return torch.eq(prediction.argmax(axis), label).float().mean().item()
class Trainer(object):
def __init__(self, net, optim, loss_fn, accuracy_metric=None):
self.net = net
self.loss_fn = loss_fn
self.optim = optim
if accuracy_metric is not None:
self.measure_accuracy = accuracy_metric
else:
self.measure_accuracy = calculate_accuracy
self.train_loss = 0.
self.train_acc = 0.
self.test_loss = 0.
self.test_acc = 0.
def _set_train_stats(self, stats):
self.train_loss = stats[0]
self.train_acc = stats[1]
def _set_test_stats(self, stats):
self.test_loss = stats[0]
self.test_acc = stats[1]
def get_stats(self):
return (self.train_loss, self.train_acc,
self.test_loss, self.test_acc)
def train(self, dataloader, epoch):
self.net.train()
stats = self._run_epoch(dataloader, epoch)
self._set_train_stats(stats)
return stats
def eval(self, dataloader, epoch):
self.net.eval()
stats = self._run_epoch(dataloader, epoch)
self._set_test_stats(stats)
return stats
def _run_epoch(self, dataloader, epoch):
N = len(dataloader.batch_sampler)
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
acc = []
at = 0
for i, batch in enumerate(dataloader):
t = time.time()
if self.net.training:
self.update_lr(epoch * N + i + 1)
batch_loss, batch_acc = func(batch)
t = time.time() - t
if i == 0:
at = t
else:
at = at * i / (i + 1) + t / (i + 1)
loss.append(batch_loss)
acc.append(batch_acc)
print(f'\rEPOCH {epoch}: {msg} '
f'batch {i + 1:04d}/{N} '
f'lr[ {self.optim.param_groups[0]["lr"]:1.3e} ] '
f'[ {t:.3f} ({at:.3f}) secs ]'
f'{" "*10}',
end='', flush=True)
loss = np.mean(loss)
acc = np.mean(acc)
return loss, acc
def update_lr(self, i=None):
if i is None or not hasattr(self, 'lr_schedule'):
return
self.lr_schedule.step(i)
def train_batch(self, batch):
raise NotImplementedError()
def test_batch(self, batch):
raise NotImplementedError()
class ClassifierTrainer(Trainer):
def train_batch(self, batch):
self.optim.zero_grad()
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels)
loss.backward()
self.optim.step()
loss = loss.item()
with torch.no_grad():
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
@torch.no_grad()
def test_batch(self, batch):
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels).item()
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
class AutoencoderTrainer(Trainer):
def __init__(self, net, optim, loss_fn):
super(AutoencoderTrainer, self).__init__(
net, optim, loss_fn, None)
delattr(self, 'measure_accuracy')
def train_batch(self, batch):
self.optim.zero_grad()
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs)
loss.backward()
self.optim.step()
loss = loss.item()
return loss
@torch.no_grad()
def test_batch(self, batch):
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs).item()
return loss
def _run_epoch(self, dataloader, epoch):
N = int(np.ceil(len(dataloader.dataset) / dataloader.batch_size))
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
for i, batch in enumerate(dataloader):
batch_loss = func(batch)
loss.append(batch_loss)
print(f'\rEPOCH {epoch}: {msg} batch {i:04d}/{N}{" "*10}',
end='', flush=True)
loss = np.mean(loss)
return loss
| true | true |
f726efc91697481d09f75f4837fbbf66b5fd0535 | 4,736 | py | Python | build_nec_file.py | crumpstrr33/NEC_scripts | fcb88afc538c884dab141ac26529ed3adf53e81e | [
"MIT"
] | null | null | null | build_nec_file.py | crumpstrr33/NEC_scripts | fcb88afc538c884dab141ac26529ed3adf53e81e | [
"MIT"
] | null | null | null | build_nec_file.py | crumpstrr33/NEC_scripts | fcb88afc538c884dab141ac26529ed3adf53e81e | [
"MIT"
] | null | null | null | """
This script uses python to build a `.nec` file. This allows
for the use of variables and other arithmetic which is much
easier in python. For information on the cards specified by the
arguments, e.g. EX or RP, check out https://www.nec2.org/part_3/cards/
"""
from datetime import datetime as dt
from math import *
def build_nec_file(
comments,
wires,
constants,
frequency=[],
excitations=[],
rad_pattern=[],
output="output",
lims=[2, 5, 10, 20, 30, 40, 50, 60, 70, 80],
sig_figs=2,
verbose=0,
):
"""
Creates a `.nec` file. The values can contain arithmetic in it. Anything
that Python's `eval` can handle and any function in the `math` package,
so trig functions, exponentials, etc.
Parameters:
comments - The comments that are found on CM cards, added as a list
wires - The wire data found on GW cards, a list of lists where the
elements of the sublist are each parameter for the wire. Can use
constants defined in the `constants` argument and baisc arithmatic
(or any function defined in Python's `math` package).
constants - A dictionary of constants to be substituted into the nec
file. Constant names may not be such that one is found in another.
For example, you cannot have 'offset' and 'origin_offset' because
'offset' can be found (via Python's `replace` method in 'origin_offset').
frequency (default []) - Defines the FR card, the frequency range and step
for calculations.
excitations (default []) - List for EX cards, cards that define excitations,
e.g. voltage sources.
rad_pattern (default []) - The RP card which defines how to calculate the
the radiation pattern.
output (default 'output') - The name of the output `.nec` file, the
extension is automatically added.
lims (default [2, 5, 10, 20, 30, 40, 50, 60, 70, 80]) - The character
number that each column ends on. For example, for the default,
we allocate 2 characters for the first argument (the card name),
3 for the next column, 5 for the third, and 10 for the rest.
sig_figs (default 2) - The number of significant figures used for the
numbers written in scientific notation (i.e. how many digits after
the decimal point).
verbose (default 2) - If 0, will not print out anything. If 1, will print out
just info on the number of wires, file location and time taken to create
file. If 2, will print out the comments in the .nec file, and info on the
number of wires, file location and time taken to create file.
"""
# scinot_ind tells this function at which column of a row to
# start using scientific notation
def _format_rows(rows, card, scinot_ind):
for row in rows:
row_str = card
for ind, param in enumerate(row):
# Replace constants with values
for const_key, const_val in constants.items():
param = param.replace(const_key, str(const_val))
# Add to line correctly formatted
rlim = lims[ind + 1] - lims[ind]
if ind > (scinot_ind - 1):
# Change to 3-digit rounded scientific notation
val = f"{eval(param):.{sig_figs}e}"
else:
# Otherwise just evaluate, e.g. tag number
val = str(eval(param))
# Add to string and push the rightmost it can go
row_str += f"{val.rjust(rlim):<{rlim}}"
nec_file.append(row_str)
dt_start = dt.now()
nec_file = []
# Add comments
for comment in comments:
nec_file.append(f"CM {comment}")
# Comment end
nec_file.append("CE")
# Add wires
_format_rows(rows=wires, card="GW", scinot_ind=2)
# Wire end
nec_file.append(f"GE{(lims[1] - lims[0] - 1)*' '}0")
# Frequency
if frequency:
_format_rows(rows=[frequency], card="FR", scinot_ind=4)
# Excitations
if excitations:
_format_rows(rows=excitations, card="EX", scinot_ind=4)
# Radation pattern,
if rad_pattern:
_format_rows(rows=[rad_pattern], card="RP", scinot_ind=8)
# File end
nec_file.append("EN\n")
# Write to new file
with open(f"{output}.nec", "w") as f:
f.write("\n".join(nec_file))
dt_end = dt.now()
if verbose:
if verbose == 2:
print("\nComments:")
for comment in comments:
print(" " * 8 + f"{comment}")
print(
f"Wrote {len(wires)} wires to {output}.nec in "
+ f"{(dt_end - dt_start).total_seconds() * 1000:.3f}ms."
)
| 39.798319 | 81 | 0.618243 | from datetime import datetime as dt
from math import *
def build_nec_file(
comments,
wires,
constants,
frequency=[],
excitations=[],
rad_pattern=[],
output="output",
lims=[2, 5, 10, 20, 30, 40, 50, 60, 70, 80],
sig_figs=2,
verbose=0,
):
def _format_rows(rows, card, scinot_ind):
for row in rows:
row_str = card
for ind, param in enumerate(row):
for const_key, const_val in constants.items():
param = param.replace(const_key, str(const_val))
rlim = lims[ind + 1] - lims[ind]
if ind > (scinot_ind - 1):
val = f"{eval(param):.{sig_figs}e}"
else:
val = str(eval(param))
row_str += f"{val.rjust(rlim):<{rlim}}"
nec_file.append(row_str)
dt_start = dt.now()
nec_file = []
for comment in comments:
nec_file.append(f"CM {comment}")
nec_file.append("CE")
_format_rows(rows=wires, card="GW", scinot_ind=2)
nec_file.append(f"GE{(lims[1] - lims[0] - 1)*' '}0")
if frequency:
_format_rows(rows=[frequency], card="FR", scinot_ind=4)
if excitations:
_format_rows(rows=excitations, card="EX", scinot_ind=4)
if rad_pattern:
_format_rows(rows=[rad_pattern], card="RP", scinot_ind=8)
nec_file.append("EN\n")
with open(f"{output}.nec", "w") as f:
f.write("\n".join(nec_file))
dt_end = dt.now()
if verbose:
if verbose == 2:
print("\nComments:")
for comment in comments:
print(" " * 8 + f"{comment}")
print(
f"Wrote {len(wires)} wires to {output}.nec in "
+ f"{(dt_end - dt_start).total_seconds() * 1000:.3f}ms."
)
| true | true |
f726f0ecbf1474170ae42090ca93cfbcb7385ec8 | 1,735 | py | Python | flightServices/flightApp/views.py | saibottrenham/djangorest | 45efadabb19cf421a282b98f3480cf49789eaae1 | [
"MIT"
] | null | null | null | flightServices/flightApp/views.py | saibottrenham/djangorest | 45efadabb19cf421a282b98f3480cf49789eaae1 | [
"MIT"
] | null | null | null | flightServices/flightApp/views.py | saibottrenham/djangorest | 45efadabb19cf421a282b98f3480cf49789eaae1 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from flightApp.models import Flight, Passenger, Reservation
from flightApp.serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
@api_view(['POST'])
def find_flights(request):
flights = Flight.objects.filter(
departureCity=request.data['departureCity'],
arrivalCity=request.data['arrivalCity'],
dateOfDeparture=request.data['dateOfDeparture'],
)
serializer = FlightSerializer(flights, many=True)
return Response(serializer.data)
@api_view(['POST'])
def save_reservation(request):
reservation = Reservation.objects.create(
flight=Flight.objects.get(id=request.data['flightId']),
passenger=Passenger.objects.create(
firstName=request.data['firstName'],
lastName=request.data['lastName'],
middleName=request.data['middleName'],
email=request.data['email'],
phone=request.data['phone'],
),
)
return Response(status=status.HTTP_201_CREATED, data=ReservationSerializer(reservation).data)
class FlightViewSet(viewsets.ModelViewSet):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = (IsAuthenticated,)
class PassengerViewSet(viewsets.ModelViewSet):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ReservationViewSet(viewsets.ModelViewSet):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
| 34.019608 | 97 | 0.748703 | from django.shortcuts import render
from flightApp.models import Flight, Passenger, Reservation
from flightApp.serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
@api_view(['POST'])
def find_flights(request):
flights = Flight.objects.filter(
departureCity=request.data['departureCity'],
arrivalCity=request.data['arrivalCity'],
dateOfDeparture=request.data['dateOfDeparture'],
)
serializer = FlightSerializer(flights, many=True)
return Response(serializer.data)
@api_view(['POST'])
def save_reservation(request):
reservation = Reservation.objects.create(
flight=Flight.objects.get(id=request.data['flightId']),
passenger=Passenger.objects.create(
firstName=request.data['firstName'],
lastName=request.data['lastName'],
middleName=request.data['middleName'],
email=request.data['email'],
phone=request.data['phone'],
),
)
return Response(status=status.HTTP_201_CREATED, data=ReservationSerializer(reservation).data)
class FlightViewSet(viewsets.ModelViewSet):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = (IsAuthenticated,)
class PassengerViewSet(viewsets.ModelViewSet):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ReservationViewSet(viewsets.ModelViewSet):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
| true | true |
f726f0f2c8fef30d17ac352da7f4d08edb92adb8 | 15,583 | py | Python | nemo/collections/asr/models/classification_models.py | vinayphadnis/NeMo | 9dc7773c48e164b8a82051bb558a728c6eeb85ec | [
"Apache-2.0"
] | 2 | 2020-10-08T13:38:46.000Z | 2020-10-14T15:09:34.000Z | nemo/collections/asr/models/classification_models.py | vinayphadnis/NeMo | 9dc7773c48e164b8a82051bb558a728c6eeb85ec | [
"Apache-2.0"
] | null | null | null | nemo/collections/asr/models/classification_models.py | vinayphadnis/NeMo | 9dc7773c48e164b8a82051bb558a728c6eeb85ec | [
"Apache-2.0"
] | 1 | 2020-12-18T14:23:37.000Z | 2020-12-18T14:23:37.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.asr.data.audio_to_text import AudioLabelDataset
from nemo.collections.asr.models.asr_model import ASRModel
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.asr.parts.perturb import process_augmentations
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.common.metrics import TopKClassificationAccuracy, compute_topk_accuracy
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import *
from nemo.utils import logging
__all__ = ['EncDecClassificationModel', 'MatchboxNet']
class EncDecClassificationModel(ASRModel):
"""Encoder decoder CTC-based models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self._update_decoder_config(self.cfg.decoder)
self.preprocessor = EncDecClassificationModel.from_config_dict(self._cfg.preprocessor)
self.encoder = EncDecClassificationModel.from_config_dict(self._cfg.encoder)
self.decoder = EncDecClassificationModel.from_config_dict(self._cfg.decoder)
self.loss = CrossEntropyLoss()
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = EncDecClassificationModel.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
if hasattr(self._cfg, 'crop_or_pad_augment') and self._cfg.crop_or_pad_augment is not None:
self.crop_or_pad = EncDecClassificationModel.from_config_dict(self._cfg.crop_or_pad_augment)
else:
self.crop_or_pad = None
# Setup metric objects
self._accuracy = TopKClassificationAccuracy()
def transcribe(self, paths2audio_files: str) -> str:
raise NotImplementedError("Classification models do not transcribe audio.")
def _setup_dataloader_from_config(self, config: Optional[Dict]):
if config.get('manifest_filepath') is None:
return
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
dataset = AudioLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', True),
load_audio=config.get('load_audio', True),
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=dataset.collate_fn,
drop_last=config.get('drop_last', False),
shuffle=config['shuffle'],
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v1.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.32% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x2x64-v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v1.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.68% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 35 classes) which obtains 97.12% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 30 classes) which obtains 97.29% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2-subset-task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2-subset-task.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.2% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x2x64-v2-subset-task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v2-subset-task.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.4% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-VAD-3x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet_VAD_3x2.nemo",
description="Voice Activity Detection MatchboxNet model trained on google speech command (v2) and freesound background data, which obtains 0.992 accuracy on testset from same source and 0.852 TPR for FPR=0.315 on testset (ALL) of AVA movie data",
)
result.append(model)
return result
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype),
"input_signal_length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"outputs": NeuralType(('B', 'D'), LogitsType())}
@typecheck()
def forward(self, input_signal, input_signal_length):
processed_signal, processed_signal_len = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
# Crop or pad is always applied
if self.crop_or_pad is not None:
processed_signal, processed_signal_len = self.crop_or_pad(
input_signal=processed_signal, length=processed_signal_len
)
# Spec augment is not applied during evaluation/testing
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits = self.decoder(encoder_output=encoded)
return logits
# PTL-specific methods
def training_step(self, batch, batch_nb):
self.training_step_end()
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
}
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
for ki in range(correct_counts.shape[-1]):
correct_count = correct_counts[ki]
total_count = total_counts[ki]
top_k = self._accuracy.top_k[ki]
tensorboard_logs['training_batch_accuracy_top@{}'.format(top_k)] = correct_count / float(total_count)
return {'loss': loss_value, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
return {'val_loss': loss_value, 'val_correct_counts': correct_counts, 'val_total_counts': total_counts}
def test_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
return {'test_loss': loss_value, 'test_correct_counts': correct_counts, 'test_total_counts': total_counts}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in outputs])
total_counts = torch.stack([x['val_total_counts'] for x in outputs])
topk_scores = compute_topk_accuracy(correct_counts, total_counts)
tensorboard_log = {'val_loss': val_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['val_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['test_correct_counts'].unsqueeze(0) for x in outputs])
total_counts = torch.stack([x['test_total_counts'].unsqueeze(0) for x in outputs])
topk_scores = compute_topk_accuracy(correct_counts, total_counts)
tensorboard_log = {'test_loss': test_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['test_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
def change_labels(self, new_labels: List[str]):
"""
Changes labels used by the decoder model. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another dataset.
If new_labels == self.decoder.vocabulary then nothing will be changed.
Args:
new_labels: list with new labels. Must contain at least 2 elements. Typically, \
this is set of labels for the dataset.
Returns: None
"""
if new_labels is not None and not isinstance(new_labels, ListConfig):
new_labels = ListConfig(new_labels)
if self._cfg.labels == new_labels:
logging.warning(
f"Old labels ({self._cfg.labels}) and new labels ({new_labels}) match. Not changing anything"
)
else:
if new_labels is None or len(new_labels) == 0:
raise ValueError(f'New labels must be non-empty list of labels. But I got: {new_labels}')
# Update config
self._cfg.labels = new_labels
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
self._update_decoder_config(new_decoder_config)
del self.decoder
self.decoder = EncDecClassificationModel.from_config_dict(new_decoder_config)
OmegaConf.set_struct(self._cfg.decoder, False)
self._cfg.decoder = new_decoder_config
OmegaConf.set_struct(self._cfg.decoder, True)
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
self._cfg.train_ds.labels = new_labels
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
self._cfg.validation_ds.labels = new_labels
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
self._cfg.test_ds.labels = new_labels
logging.info(f"Changed decoder output to {self.decoder.num_classes} labels.")
def _update_decoder_config(self, cfg):
"""
Update the number of classes in the decoder based on labels provided.
Args:
cfg: The config of the decoder which will be updated.
"""
OmegaConf.set_struct(cfg, False)
labels = self.cfg.labels
if 'params' in cfg:
cfg.params.num_classes = len(labels)
else:
cfg.num_classes = len(labels)
OmegaConf.set_struct(cfg, True)
class MatchboxNet(EncDecClassificationModel):
pass
| 46.10355 | 258 | 0.686774 |
import copy
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.asr.data.audio_to_text import AudioLabelDataset
from nemo.collections.asr.models.asr_model import ASRModel
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.asr.parts.perturb import process_augmentations
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.common.metrics import TopKClassificationAccuracy, compute_topk_accuracy
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import *
from nemo.utils import logging
__all__ = ['EncDecClassificationModel', 'MatchboxNet']
class EncDecClassificationModel(ASRModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self._update_decoder_config(self.cfg.decoder)
self.preprocessor = EncDecClassificationModel.from_config_dict(self._cfg.preprocessor)
self.encoder = EncDecClassificationModel.from_config_dict(self._cfg.encoder)
self.decoder = EncDecClassificationModel.from_config_dict(self._cfg.decoder)
self.loss = CrossEntropyLoss()
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = EncDecClassificationModel.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
if hasattr(self._cfg, 'crop_or_pad_augment') and self._cfg.crop_or_pad_augment is not None:
self.crop_or_pad = EncDecClassificationModel.from_config_dict(self._cfg.crop_or_pad_augment)
else:
self.crop_or_pad = None
self._accuracy = TopKClassificationAccuracy()
def transcribe(self, paths2audio_files: str) -> str:
raise NotImplementedError("Classification models do not transcribe audio.")
def _setup_dataloader_from_config(self, config: Optional[Dict]):
if config.get('manifest_filepath') is None:
return
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
dataset = AudioLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', True),
load_audio=config.get('load_audio', True),
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=dataset.collate_fn,
drop_last=config.get('drop_last', False),
shuffle=config['shuffle'],
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
result = []
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v1.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.32% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x2x64-v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v1.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.68% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 35 classes) which obtains 97.12% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 30 classes) which obtains 97.29% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2-subset-task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2-subset-task.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.2% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x2x64-v2-subset-task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v2-subset-task.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.4% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-VAD-3x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet_VAD_3x2.nemo",
description="Voice Activity Detection MatchboxNet model trained on google speech command (v2) and freesound background data, which obtains 0.992 accuracy on testset from same source and 0.852 TPR for FPR=0.315 on testset (ALL) of AVA movie data",
)
result.append(model)
return result
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype),
"input_signal_length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"outputs": NeuralType(('B', 'D'), LogitsType())}
@typecheck()
def forward(self, input_signal, input_signal_length):
processed_signal, processed_signal_len = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
if self.crop_or_pad is not None:
processed_signal, processed_signal_len = self.crop_or_pad(
input_signal=processed_signal, length=processed_signal_len
)
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits = self.decoder(encoder_output=encoded)
return logits
def training_step(self, batch, batch_nb):
self.training_step_end()
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
}
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
for ki in range(correct_counts.shape[-1]):
correct_count = correct_counts[ki]
total_count = total_counts[ki]
top_k = self._accuracy.top_k[ki]
tensorboard_logs['training_batch_accuracy_top@{}'.format(top_k)] = correct_count / float(total_count)
return {'loss': loss_value, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
return {'val_loss': loss_value, 'val_correct_counts': correct_counts, 'val_total_counts': total_counts}
def test_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
return {'test_loss': loss_value, 'test_correct_counts': correct_counts, 'test_total_counts': total_counts}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in outputs])
total_counts = torch.stack([x['val_total_counts'] for x in outputs])
topk_scores = compute_topk_accuracy(correct_counts, total_counts)
tensorboard_log = {'val_loss': val_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['val_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['test_correct_counts'].unsqueeze(0) for x in outputs])
total_counts = torch.stack([x['test_total_counts'].unsqueeze(0) for x in outputs])
topk_scores = compute_topk_accuracy(correct_counts, total_counts)
tensorboard_log = {'test_loss': test_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['test_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
def change_labels(self, new_labels: List[str]):
if new_labels is not None and not isinstance(new_labels, ListConfig):
new_labels = ListConfig(new_labels)
if self._cfg.labels == new_labels:
logging.warning(
f"Old labels ({self._cfg.labels}) and new labels ({new_labels}) match. Not changing anything"
)
else:
if new_labels is None or len(new_labels) == 0:
raise ValueError(f'New labels must be non-empty list of labels. But I got: {new_labels}')
self._cfg.labels = new_labels
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
self._update_decoder_config(new_decoder_config)
del self.decoder
self.decoder = EncDecClassificationModel.from_config_dict(new_decoder_config)
OmegaConf.set_struct(self._cfg.decoder, False)
self._cfg.decoder = new_decoder_config
OmegaConf.set_struct(self._cfg.decoder, True)
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
self._cfg.train_ds.labels = new_labels
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
self._cfg.validation_ds.labels = new_labels
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
self._cfg.test_ds.labels = new_labels
logging.info(f"Changed decoder output to {self.decoder.num_classes} labels.")
def _update_decoder_config(self, cfg):
OmegaConf.set_struct(cfg, False)
labels = self.cfg.labels
if 'params' in cfg:
cfg.params.num_classes = len(labels)
else:
cfg.num_classes = len(labels)
OmegaConf.set_struct(cfg, True)
class MatchboxNet(EncDecClassificationModel):
pass
| true | true |
f726f228726b7b2b3f9d488b6ba21009b47132f1 | 881 | py | Python | website/addons/s3/__init__.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | website/addons/s3/__init__.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | website/addons/s3/__init__.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | import os
from . import model
from . import routes
from . import views
MODELS = [model.AddonS3UserSettings, model.AddonS3NodeSettings]
USER_SETTINGS_MODEL = model.AddonS3UserSettings
NODE_SETTINGS_MODEL = model.AddonS3NodeSettings
ROUTES = [routes.settings_routes]
SHORT_NAME = 's3'
FULL_NAME = 'Amazon S3'
OWNERS = ['user', 'node']
ADDED_DEFAULT = []
ADDED_MANDATORY = []
VIEWS = []
CONFIGS = ['accounts', 'node']
CATEGORIES = ['storage']
INCLUDE_JS = {}
INCLUDE_CSS = {
'widget': [],
'page': [],
}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.s3_hgrid_data
# 1024 ** 1024 # There really shouldnt be a limit...
MAX_FILE_SIZE = 128 # MB
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 's3_node_settings.mako')
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 's3_user_settings.mako')
| 20.97619 | 81 | 0.727582 | import os
from . import model
from . import routes
from . import views
MODELS = [model.AddonS3UserSettings, model.AddonS3NodeSettings]
USER_SETTINGS_MODEL = model.AddonS3UserSettings
NODE_SETTINGS_MODEL = model.AddonS3NodeSettings
ROUTES = [routes.settings_routes]
SHORT_NAME = 's3'
FULL_NAME = 'Amazon S3'
OWNERS = ['user', 'node']
ADDED_DEFAULT = []
ADDED_MANDATORY = []
VIEWS = []
CONFIGS = ['accounts', 'node']
CATEGORIES = ['storage']
INCLUDE_JS = {}
INCLUDE_CSS = {
'widget': [],
'page': [],
}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.s3_hgrid_data
h.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 's3_node_settings.mako')
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 's3_user_settings.mako')
| true | true |
f726f26f0db530e1d6ac7228dc6da3573ce0200f | 237 | py | Python | CodingTestForEmployment/Part3/implementation/implementation1.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | CodingTestForEmployment/Part3/implementation/implementation1.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | CodingTestForEmployment/Part3/implementation/implementation1.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | n = input()
front_n = n[0:len(n)//2]
back_n = n[len(n)//2:len(n)]
front_n = map(int,front_n)
back_n = map(int,back_n)
result_f = sum(front_n)
result_b = sum(back_n)
if result_f == result_b:
print('LUCKY')
else:
print('READY') | 15.8 | 28 | 0.637131 | n = input()
front_n = n[0:len(n)//2]
back_n = n[len(n)//2:len(n)]
front_n = map(int,front_n)
back_n = map(int,back_n)
result_f = sum(front_n)
result_b = sum(back_n)
if result_f == result_b:
print('LUCKY')
else:
print('READY') | true | true |
f726f3e6b68297e13227e122ba85506dd2bb46e5 | 2,762 | py | Python | pyclustering/samples/__init__.py | JosephChataignon/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 1,013 | 2015-01-26T19:50:14.000Z | 2022-03-31T07:38:48.000Z | pyclustering/samples/__init__.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 542 | 2015-01-20T16:44:32.000Z | 2022-01-29T14:57:20.000Z | pyclustering/samples/__init__.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 262 | 2015-03-19T07:28:12.000Z | 2022-03-30T07:28:24.000Z | """!
@brief pyclustering module for samples.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
class answer_reader:
"""!
@brief Answer reader for samples that are used by pyclustering library.
"""
def __init__(self, answer_path):
"""!
@brief Creates instance of answer reader to read proper clustering results of samples.
@param[in] answer_path (string): Path to clustering results (answers).
"""
self.__answer_path = answer_path
self.__clusters = None
self.__noise = None
def get_clusters(self):
"""!
@brief Read proper clustering results.
@return (list) Clusters where each cluster is represented by list of index point from dataset.
"""
self.__read_answer()
return self.__clusters
def get_noise(self):
"""!
@brief Read proper clustering results
@return (list) Noise where each outlier is represented by index point from dataset.
"""
self.__read_answer()
return self.__noise
def get_cluster_lengths(self):
"""!
@brief Read proper cluster lengths.
@details Cluster length means amount of point in a cluster.
@return (list) Cluster lengths where each length means amount of points in a cluster.
"""
clusters = self.get_clusters()
return [len(cluster) for cluster in clusters]
def __read_answer_from_line(self, index_point, line):
"""!
@brief Read information about point from the specific line and place it to cluster or noise in line with that
information.
@param[in] index_point (uint): Index point that should be placed to cluster or noise.
@param[in] line (string): Line where information about point should be read.
"""
if line[0] == 'n':
self.__noise.append(index_point)
else:
index_cluster = int(line)
if index_cluster >= len(self.__clusters):
self.__clusters.append([index_point])
else:
self.__clusters[index_cluster].append(index_point)
def __read_answer(self):
"""!
@brief Read information about proper clusters and noises from the file.
"""
if self.__clusters is not None:
return
file = open(self.__answer_path, 'r')
self.__clusters, self.__noise = [], []
index_point = 0
for line in file:
self.__read_answer_from_line(index_point, line)
index_point += 1
file.close()
| 26.815534 | 118 | 0.589428 |
class answer_reader:
def __init__(self, answer_path):
self.__answer_path = answer_path
self.__clusters = None
self.__noise = None
def get_clusters(self):
self.__read_answer()
return self.__clusters
def get_noise(self):
self.__read_answer()
return self.__noise
def get_cluster_lengths(self):
clusters = self.get_clusters()
return [len(cluster) for cluster in clusters]
def __read_answer_from_line(self, index_point, line):
if line[0] == 'n':
self.__noise.append(index_point)
else:
index_cluster = int(line)
if index_cluster >= len(self.__clusters):
self.__clusters.append([index_point])
else:
self.__clusters[index_cluster].append(index_point)
def __read_answer(self):
if self.__clusters is not None:
return
file = open(self.__answer_path, 'r')
self.__clusters, self.__noise = [], []
index_point = 0
for line in file:
self.__read_answer_from_line(index_point, line)
index_point += 1
file.close()
| true | true |
f726f53393a6475b58d999e2bc14d087f34c543e | 1,919 | py | Python | calendar_events/views.py | alexkyllo/django-calendar-events | f1ad2c2b858f93a1256604ff9f7b223914acf29e | [
"Apache-2.0"
] | 1 | 2016-09-09T04:16:10.000Z | 2016-09-09T04:16:10.000Z | calendar_events/views.py | alexkyllo/django-calendar-events | f1ad2c2b858f93a1256604ff9f7b223914acf29e | [
"Apache-2.0"
] | null | null | null | calendar_events/views.py | alexkyllo/django-calendar-events | f1ad2c2b858f93a1256604ff9f7b223914acf29e | [
"Apache-2.0"
] | 2 | 2018-04-19T19:29:46.000Z | 2018-09-21T00:18:22.000Z | from django.shortcuts import render, render_to_response
from django.http import Http404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from models import *
from forms import *
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from dateutil import parser
import json
# Create your views here.
def show_calendar(request, *args, **kwargs):
return render_to_response('calendar_events/show_calendar.html', context_instance=RequestContext(request))
@require_GET
def view_all_events_between(request, **kwargs):
'''
This view is for the jquery-ui fullcalendar widget. Takes a GET request with a date range and returns all events inside the range
in the JSON format that fullcalendar is expecting.
'''
startdatetime = parser.parse(request.GET['start']+'T00:00:00.0+00:00')
enddatetime = parser.parse(request.GET['end']+'T00:00:00.0+00:00')
events = Event.objects.all()
event_occurrences = [event.get_occurrences(startdatetime,enddatetime) for event in events]
if event_occurrences is None:
return HttpResponse("[]")
else:
event_occurrences_flat = [item for sublist in event_occurrences for item in sublist] #flatten the list of lists of events
fullcalendar_events = [occurrence.to_fullcalendar() for occurrence in event_occurrences_flat]
return HttpResponse(json.dumps(fullcalendar_events))
class EventList(ListView):
model = Event
# def get_queryset(self):
# return Event.objects.all()
class EventCreate(CreateView):
model = Event
form_class = EventForm
class EventDelete(DeleteView):
model = Event
class EventUpdate(UpdateView):
model = Event
form_class = EventForm
class EventDetail(DetailView):
model = Event | 35.537037 | 133 | 0.756123 | from django.shortcuts import render, render_to_response
from django.http import Http404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from models import *
from forms import *
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from dateutil import parser
import json
def show_calendar(request, *args, **kwargs):
return render_to_response('calendar_events/show_calendar.html', context_instance=RequestContext(request))
@require_GET
def view_all_events_between(request, **kwargs):
startdatetime = parser.parse(request.GET['start']+'T00:00:00.0+00:00')
enddatetime = parser.parse(request.GET['end']+'T00:00:00.0+00:00')
events = Event.objects.all()
event_occurrences = [event.get_occurrences(startdatetime,enddatetime) for event in events]
if event_occurrences is None:
return HttpResponse("[]")
else:
event_occurrences_flat = [item for sublist in event_occurrences for item in sublist]
fullcalendar_events = [occurrence.to_fullcalendar() for occurrence in event_occurrences_flat]
return HttpResponse(json.dumps(fullcalendar_events))
class EventList(ListView):
model = Event
class EventCreate(CreateView):
model = Event
form_class = EventForm
class EventDelete(DeleteView):
model = Event
class EventUpdate(UpdateView):
model = Event
form_class = EventForm
class EventDetail(DetailView):
model = Event | true | true |
f726f57c229488230500b5d7998e2d3d8ba1a490 | 176 | py | Python | regular-expressions-tutorial/verify_email.py | dapopov-st/python-youtube-code | 770c9291988898f259ad28bbab5989acee8fb830 | [
"MIT"
] | 262 | 2020-03-17T03:24:35.000Z | 2022-03-22T12:50:02.000Z | regular-expressions-tutorial/verify_email.py | dapopov-st/python-youtube-code | 770c9291988898f259ad28bbab5989acee8fb830 | [
"MIT"
] | 14 | 2020-07-12T14:17:36.000Z | 2022-03-21T09:38:45.000Z | regular-expressions-tutorial/verify_email.py | dapopov-st/python-youtube-code | 770c9291988898f259ad28bbab5989acee8fb830 | [
"MIT"
] | 583 | 2020-02-12T17:54:21.000Z | 2022-03-30T03:59:21.000Z | import re
pattern = "[a-zA-Z0-9]+@[a-zA-z]+\.(com|edu|net)"
user_input = input()
if(re.search(pattern, user_input)):
print("valid email")
else:
print("invalid email")
| 19.555556 | 49 | 0.636364 | import re
pattern = "[a-zA-Z0-9]+@[a-zA-z]+\.(com|edu|net)"
user_input = input()
if(re.search(pattern, user_input)):
print("valid email")
else:
print("invalid email")
| true | true |
f726f5c4e2f692389ad5a170f072d70c27e57734 | 3,838 | py | Python | reservation_rest_api.py | usc-isi-i2/wikidata-reservation | 1298ec2a7b347ed88bc93fa30531fa9b10c651a7 | [
"MIT"
] | null | null | null | reservation_rest_api.py | usc-isi-i2/wikidata-reservation | 1298ec2a7b347ed88bc93fa30531fa9b10c651a7 | [
"MIT"
] | null | null | null | reservation_rest_api.py | usc-isi-i2/wikidata-reservation | 1298ec2a7b347ed88bc93fa30531fa9b10c651a7 | [
"MIT"
] | null | null | null | from flask import Flask, request
from reservation_service import get_qnode, read_data, register, delete_namespace
import json
import logging
from tabulate import tabulate
app = Flask(__name__)
ALLOWED_EXTENSIONS = {'xls', 'yaml', 'csv', 'json'}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# logging.basicConfig(format=FORMAT, stream=sys.stdout, level=logging.DEBUG)
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(name)s %(lineno)d -- %(message)s",
datefmt='%m-%d %H:%M:%S',
filename='reservation_rest_api.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# # set a format which is simpler for console use
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s %(lineno)d -- %(message)s", '%m-%d %H:%M:%S')
# # tell the handler to use this format
console.setFormatter(formatter)
# # add the handler to the root logger
logging.getLogger('').addHandler(console)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/<namespace>', methods=['GET'])
def get_ns_list(namespace):
data = read_data()
if data:
table = []
headers = ['Satellite', 'Satellite URI', 'Latest qnode number', 'Prefix', 'num_of_0']
if namespace == 'all':
logger.debug('return all namespaces')
for k, v in data.items():
table.append([k, v['uri'], v['latest'], v['prefix'], v['num_of_0']])
else:
if namespace in data.keys():
logger.debug('return ' + namespace + ' namespace')
table.append([namespace, data[namespace]['uri'], data[namespace]['latest'],
data[namespace]['prefix'], data[namespace]['num_of_0']])
else:
raise Exception('Namespace does not exist in satellite.')
return tabulate(table, headers, tablefmt="psql")
return 'There is no satellite. Please register your satellite at first.'
@app.route('/<namespace>/reservation', methods=['GET', 'POST'])
def get_qnode_by_ns(namespace):
if namespace:
data = get_qnode(namespace)
if data:
logger.debug('reserve a qnode in ' + namespace + ' namespace')
return json.dumps({'Latest qnode': data}, indent=2)
else:
raise Exception('Please register your satellite at first.')
return 'Welcome to the reservation service.'
@app.route('/delete', methods=['GET', 'POST'])
def delete_ns():
namespace = request.values.get('namespace')
if namespace:
flag = delete_namespace(namespace)
if flag:
logger.debug('delete ' + namespace + ' namespace success.')
return 'Success'
else:
raise Exception('Namespace does not exist in satellite.')
return 'Welcome to the reservation service.'
@app.route('/register', methods=['GET', 'POST'])
def register_ns():
namespace = request.values.get('namespace')
uri = request.values.get('uri')
prefix = request.values.get('prefix')
num_of_0 = request.values.get('num_of_0')
if not num_of_0:
num_of_0 = 7
if namespace and uri and prefix:
flag = register(namespace, uri, prefix, num_of_0)
if flag:
logger.debug('register ' + namespace + ' namespace success.')
return 'Register successfully and you are ready to use this satellite. '
else:
raise Exception('This satellite already exists.')
return 'Welcome to the reservation service.'
if __name__ == '__main__':
app.run() | 36.552381 | 113 | 0.632882 | from flask import Flask, request
from reservation_service import get_qnode, read_data, register, delete_namespace
import json
import logging
from tabulate import tabulate
app = Flask(__name__)
ALLOWED_EXTENSIONS = {'xls', 'yaml', 'csv', 'json'}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(name)s %(lineno)d -- %(message)s",
datefmt='%m-%d %H:%M:%S',
filename='reservation_rest_api.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
levelname)s] %(name)s %(lineno)d -- %(message)s", '%m-%d %H:%M:%S')
e)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/<namespace>', methods=['GET'])
def get_ns_list(namespace):
data = read_data()
if data:
table = []
headers = ['Satellite', 'Satellite URI', 'Latest qnode number', 'Prefix', 'num_of_0']
if namespace == 'all':
logger.debug('return all namespaces')
for k, v in data.items():
table.append([k, v['uri'], v['latest'], v['prefix'], v['num_of_0']])
else:
if namespace in data.keys():
logger.debug('return ' + namespace + ' namespace')
table.append([namespace, data[namespace]['uri'], data[namespace]['latest'],
data[namespace]['prefix'], data[namespace]['num_of_0']])
else:
raise Exception('Namespace does not exist in satellite.')
return tabulate(table, headers, tablefmt="psql")
return 'There is no satellite. Please register your satellite at first.'
@app.route('/<namespace>/reservation', methods=['GET', 'POST'])
def get_qnode_by_ns(namespace):
if namespace:
data = get_qnode(namespace)
if data:
logger.debug('reserve a qnode in ' + namespace + ' namespace')
return json.dumps({'Latest qnode': data}, indent=2)
else:
raise Exception('Please register your satellite at first.')
return 'Welcome to the reservation service.'
@app.route('/delete', methods=['GET', 'POST'])
def delete_ns():
namespace = request.values.get('namespace')
if namespace:
flag = delete_namespace(namespace)
if flag:
logger.debug('delete ' + namespace + ' namespace success.')
return 'Success'
else:
raise Exception('Namespace does not exist in satellite.')
return 'Welcome to the reservation service.'
@app.route('/register', methods=['GET', 'POST'])
def register_ns():
namespace = request.values.get('namespace')
uri = request.values.get('uri')
prefix = request.values.get('prefix')
num_of_0 = request.values.get('num_of_0')
if not num_of_0:
num_of_0 = 7
if namespace and uri and prefix:
flag = register(namespace, uri, prefix, num_of_0)
if flag:
logger.debug('register ' + namespace + ' namespace success.')
return 'Register successfully and you are ready to use this satellite. '
else:
raise Exception('This satellite already exists.')
return 'Welcome to the reservation service.'
if __name__ == '__main__':
app.run() | true | true |
f726f5c80de9e071ad05e77e26f7512ec0cee0dd | 5,269 | py | Python | models.py | gautamMalu/Aesthetic_attributes_maps | f2462c92d414f9457a3babd32171b071e4703515 | [
"MIT"
] | 22 | 2017-07-14T02:53:27.000Z | 2021-03-19T20:13:12.000Z | models.py | gautamMalu/Aesthetic_attributes_maps | f2462c92d414f9457a3babd32171b071e4703515 | [
"MIT"
] | 3 | 2017-07-25T03:01:23.000Z | 2018-06-27T14:03:43.000Z | models.py | gautamMalu/Aesthetic_attributes_maps | f2462c92d414f9457a3babd32171b071e4703515 | [
"MIT"
] | 11 | 2017-07-14T08:23:33.000Z | 2021-11-24T09:18:48.000Z | from keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from keras.layers import Flatten, Dropout, Lambda, GlobalAveragePooling2D, merge, Input, Dense
from keras.models import Model
import keras.backend as K
#from keras.utils.visualize_util import plot
#from SpatialPyramidPooling import SpatialPyramidPooling
def l2_normalize(x):
return K.l2_normalize(x, 0)
def l2_normalize_output_shape(input_shape):
return input_shape
def squared_root_normalization(x):
"""
Squared root normalization for convolution layers` output
first apply global average pooling followed by squared root on all elements
then l2 normalize the vector
:param x: input tensor, output of convolution layer
:return:
"""
x = GlobalAveragePooling2D()(x)
#output shape = (None, nc)
# x = K.sqrt(x)
#x = K.l2_normalize(x, axis=0)
return x
def squared_root_normalization_output_shape(input_shape):
"""
Return the output shape for squared root normalization layer
for any given input size of the convolution filter
:param input_shape: shape of the input
:return: output shape
"""
return (input_shape[0], input_shape[-1])
def model1(weights_path=None):
'''
Basic ResNet-FT for baseline comparisions.
Creates a model by for all aesthetic attributes along
with overall aesthetic score, by finetuning resnet50
:param weights_path: path of the weight file
:return: Keras model instance
'''
_input = Input(shape=(299, 299, 3))
resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)
last_layer_output = GlobalAveragePooling2D()(resnet.get_layer('activation_49').output)
# output of model
outputs = []
attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
for attribute in attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(last_layer_output))
non_negative_attrs = ['Repetition', 'Symmetry', 'score']
for attribute in non_negative_attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(last_layer_output))
model = Model(input=_input, output=outputs)
if weights_path:
model.load_weights(weights_path)
return model
def model2(weights_path=None):
'''
Creates a model by concatenating the features from lower layers
with high level convolution features for all aesthetic attributes along
with overall aesthetic score
:param weights_path: path of the weight file
:return: Keras model instance
This is the model used in the paper
'''
_input = Input(shape=(299, 299, 3))
resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)
activation_layers = []
layers = resnet.layers
for layer in layers:
# print layer.name, layer.input_shape, layer.output_shape
if 'activation' in layer.name:
activation_layers.append(layer)
activations = 0
activation_plus_squared_outputs = []
# Remove last activation layer so
# it can be used with spatial pooling layer if required
nlayers = len(activation_layers) - 1
for i in range(1, nlayers):
layer = activation_layers[i]
if layer.output_shape[-1] > activation_layers[i - 1].output_shape[-1]:
# print layer.name, layer.input_shape, layer.output_shape
activations += layer.output_shape[-1]
_out = Lambda(squared_root_normalization,
output_shape=squared_root_normalization_output_shape, name=layer.name + '_normalized')(layer.output)
activation_plus_squared_outputs.append(_out)
# print "sum of all activations should be {}".format(activations)
last_layer_output = GlobalAveragePooling2D()(activation_layers[-1].output)
# last_layer_output = Lambda(K.sqrt, output_shape=squared_root_normalization_output_shape)(last_layer_output)
last_layer_output = Lambda(l2_normalize, output_shape=l2_normalize_output_shape,
name=activation_layers[-1].name+'_normalized')(last_layer_output)
activation_plus_squared_outputs.append(last_layer_output)
merged = merge(activation_plus_squared_outputs, mode='concat', concat_axis=1)
merged = Lambda(l2_normalize, output_shape=l2_normalize_output_shape, name='merge')(merged)
# output of model
outputs = []
attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
for attribute in attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(merged))
non_negative_attrs = ['Repetition', 'Symmetry', 'score']
for attribute in non_negative_attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(merged))
model = Model(input=_input, output=outputs)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == '__main__':
model = model2()
model.summary()
# plot(model, to_file='model2.png', show_shapes=True)
| 37.368794 | 126 | 0.707155 | from keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from keras.layers import Flatten, Dropout, Lambda, GlobalAveragePooling2D, merge, Input, Dense
from keras.models import Model
import keras.backend as K
def l2_normalize(x):
return K.l2_normalize(x, 0)
def l2_normalize_output_shape(input_shape):
return input_shape
def squared_root_normalization(x):
x = GlobalAveragePooling2D()(x)
return x
def squared_root_normalization_output_shape(input_shape):
return (input_shape[0], input_shape[-1])
def model1(weights_path=None):
_input = Input(shape=(299, 299, 3))
resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)
last_layer_output = GlobalAveragePooling2D()(resnet.get_layer('activation_49').output)
outputs = []
attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
for attribute in attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(last_layer_output))
non_negative_attrs = ['Repetition', 'Symmetry', 'score']
for attribute in non_negative_attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(last_layer_output))
model = Model(input=_input, output=outputs)
if weights_path:
model.load_weights(weights_path)
return model
def model2(weights_path=None):
_input = Input(shape=(299, 299, 3))
resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)
activation_layers = []
layers = resnet.layers
for layer in layers:
if 'activation' in layer.name:
activation_layers.append(layer)
activations = 0
activation_plus_squared_outputs = []
nlayers = len(activation_layers) - 1
for i in range(1, nlayers):
layer = activation_layers[i]
if layer.output_shape[-1] > activation_layers[i - 1].output_shape[-1]:
activations += layer.output_shape[-1]
_out = Lambda(squared_root_normalization,
output_shape=squared_root_normalization_output_shape, name=layer.name + '_normalized')(layer.output)
activation_plus_squared_outputs.append(_out)
last_layer_output = GlobalAveragePooling2D()(activation_layers[-1].output)
last_layer_output = Lambda(l2_normalize, output_shape=l2_normalize_output_shape,
name=activation_layers[-1].name+'_normalized')(last_layer_output)
activation_plus_squared_outputs.append(last_layer_output)
merged = merge(activation_plus_squared_outputs, mode='concat', concat_axis=1)
merged = Lambda(l2_normalize, output_shape=l2_normalize_output_shape, name='merge')(merged)
outputs = []
attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
for attribute in attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(merged))
non_negative_attrs = ['Repetition', 'Symmetry', 'score']
for attribute in non_negative_attrs:
outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(merged))
model = Model(input=_input, output=outputs)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == '__main__':
model = model2()
model.summary()
| true | true |
f726f884a93cc0f5b265ff93d535edd969498ccd | 518 | py | Python | Models/initialize.py | jeffrey-clark/gender_in_academia | 25f76abfccb06ee7d6a630ee1d4cecdbf6dbe21d | [
"MIT"
] | null | null | null | Models/initialize.py | jeffrey-clark/gender_in_academia | 25f76abfccb06ee7d6a630ee1d4cecdbf6dbe21d | [
"MIT"
] | null | null | null | Models/initialize.py | jeffrey-clark/gender_in_academia | 25f76abfccb06ee7d6a630ee1d4cecdbf6dbe21d | [
"MIT"
] | null | null | null | # import dependencies
import os, re, io, sys
import pandas as pd
#import mysql.connector
import json
import numpy as np
# import function collections
from Functions.j_functions import *
from Functions.language import *
from Functions.functions import *
# set universal variables
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
pref_order = ['app_id', 'name', 'surname', 'financier', 'keywords', 'keyword_lang']
nonelist = ['None', 'NA', 'N/A', '-', '', ' ', '--', "null", "N.A.", ]
| 24.666667 | 83 | 0.69305 |
import os, re, io, sys
import pandas as pd
import json
import numpy as np
from Functions.j_functions import *
from Functions.language import *
from Functions.functions import *
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
pref_order = ['app_id', 'name', 'surname', 'financier', 'keywords', 'keyword_lang']
nonelist = ['None', 'NA', 'N/A', '-', '', ' ', '--', "null", "N.A.", ]
| true | true |
f726f911716ec981e91b4ea974dc3e14779424c2 | 29,285 | py | Python | zdd.py | sonecabr/marathon-lb-rsyslog | 1e4f6a738b7b7afaa0b2a70c67963b95f8ee54c8 | [
"Apache-2.0"
] | null | null | null | zdd.py | sonecabr/marathon-lb-rsyslog | 1e4f6a738b7b7afaa0b2a70c67963b95f8ee54c8 | [
"Apache-2.0"
] | null | null | null | zdd.py | sonecabr/marathon-lb-rsyslog | 1e4f6a738b7b7afaa0b2a70c67963b95f8ee54c8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import csv
import json
import logging
import math
import socket
import subprocess
import sys
import time
import traceback
from datetime import datetime
from collections import namedtuple
import requests
import six.moves.urllib as urllib
from common import (get_marathon_auth_params, set_logging_args,
set_marathon_auth_args, setup_logging)
from utils import get_task_ip_and_ports
from zdd_exceptions import (
AppCreateException, AppDeleteException, AppScaleException,
InvalidArgException, MarathonEndpointException,
MarathonLbEndpointException, MissingFieldException)
logger = logging.getLogger('zdd')
def query_yes_no(question, default="yes"):
# Thanks stackoverflow:
# https://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def marathon_get_request(args, path):
url = args.marathon + path
try:
response = requests.get(url, auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise MarathonEndpointException(
"Error while querying marathon", url, traceback.format_exc())
return response
def list_marathon_apps(args):
response = marathon_get_request(args, "/v2/apps")
return response.json()['apps']
def fetch_marathon_app(args, app_id):
response = marathon_get_request(args, "/v2/apps" + app_id)
return response.json()['app']
def _get_alias_records(hostname):
"""Return all IPv4 A records for a given hostname
"""
return socket.gethostbyname_ex(hostname)[2]
def _unparse_url_alias(url, addr):
"""Reassemble a url object into a string but with a new address
"""
return urllib.parse.urlunparse((url[0],
addr + ":" + str(url.port),
url[2],
url[3],
url[4],
url[5]))
def get_marathon_lb_urls(args):
"""Return a list of urls for all Aliases of the
marathon_lb url passed in as an argument
"""
url = urllib.parse.urlparse(args.marathon_lb)
addrs = _get_alias_records(url.hostname)
return [_unparse_url_alias(url, addr) for addr in addrs]
def fetch_haproxy_pids(haproxy_url):
try:
response = requests.get(haproxy_url + "/_haproxy_getpids")
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception("Caught exception when retrieving HAProxy"
" pids from " + haproxy_url)
raise
return response.text.split()
def check_haproxy_reloading(haproxy_url):
"""Return False if haproxy has only one pid, it is not reloading.
Return True if we catch an exception while making a request to
haproxy or if more than one pid is returned
"""
try:
pids = fetch_haproxy_pids(haproxy_url)
except requests.exceptions.RequestException:
# Assume reloading on any error, this should be caught with a timeout
return True
if len(pids) > 1:
logger.info("Waiting for {} pids on {}".format(len(pids), haproxy_url))
return True
return False
def any_marathon_lb_reloading(marathon_lb_urls):
return any([check_haproxy_reloading(url) for url in marathon_lb_urls])
def fetch_haproxy_stats(haproxy_url):
try:
response = requests.get(haproxy_url + "/haproxy?stats;csv")
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception("Caught exception when retrieving HAProxy"
" stats from " + haproxy_url)
raise
return response.text
def fetch_combined_haproxy_stats(marathon_lb_urls):
raw = ''.join([fetch_haproxy_stats(url) for url in marathon_lb_urls])
return parse_haproxy_stats(raw)
def parse_haproxy_stats(csv_data):
rows = csv_data.splitlines()
headings = rows.pop(0).lstrip('# ').rstrip(',\n').split(',')
csv_reader = csv.reader(rows, delimiter=',', quotechar="'")
Row = namedtuple('Row', headings)
return [Row(*row[0:-1]) for row in csv_reader if row[0][0] != '#']
def get_deployment_label(app):
return get_deployment_group(app) + "_" + app['labels']['HAPROXY_0_PORT']
def _if_app_listener(app, listener):
return (listener.pxname == get_deployment_label(app) and
listener.svname not in ['BACKEND', 'FRONTEND'])
def fetch_app_listeners(app, marathon_lb_urls):
haproxy_stats = fetch_combined_haproxy_stats(marathon_lb_urls)
return [l for l in haproxy_stats if _if_app_listener(app, l)]
def waiting_for_listeners(new_app, old_app, listeners, haproxy_count):
listener_count = (len(listeners) / haproxy_count)
return listener_count != new_app['instances'] + old_app['instances']
def get_deployment_target(app):
if 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES' in app['labels']:
return int(app['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'])
else:
return app['instances']
def get_new_instance_count(app):
if 'HAPROXY_DEPLOYMENT_NEW_INSTANCES' in app['labels']:
return int(app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'])
else:
return 0
def waiting_for_up_listeners(app, listeners, haproxy_count):
up_listeners = [l for l in listeners if l.status == 'UP']
up_listener_count = (len(up_listeners) / haproxy_count)
return up_listener_count < get_deployment_target(app)
def select_draining_listeners(listeners):
return [l for l in listeners if l.status == 'MAINT']
def select_drained_listeners(listeners):
draining_listeners = select_draining_listeners(listeners)
return [l for l in draining_listeners if not _has_pending_requests(l)]
def get_svnames_from_task(app, task):
prefix = task['host'].replace('.', '_')
task_ip, task_port = get_task_ip_and_ports(app, task)
if task['host'] == task_ip:
for port in task['ports']:
yield('{}_{}'.format(prefix, port))
else:
for port in task['ports']:
yield('{}_{}_{}'.format(prefix, task_ip.replace('.', '_'), port))
def get_svnames_from_tasks(app, tasks):
svnames = []
for task in tasks:
svnames += get_svnames_from_task(app, task)
return svnames
def _has_pending_requests(listener):
return int(listener.qcur or 0) > 0 or int(listener.scur or 0) > 0
def is_hybrid_deployment(args, app):
if (get_new_instance_count(app) != 0 and not args.complete_cur and
not args.complete_prev):
return True
else:
return False
def find_drained_task_ids(app, listeners, haproxy_count):
"""Return app tasks which have all haproxy listeners down and draining
of any pending sessions or connections
"""
tasks = zip(get_svnames_from_tasks(app, app['tasks']), app['tasks'])
drained_listeners = select_drained_listeners(listeners)
drained_task_ids = []
for svname, task in tasks:
task_listeners = [l for l in drained_listeners if l.svname == svname]
if len(task_listeners) == haproxy_count:
drained_task_ids.append(task['id'])
return drained_task_ids
def find_draining_task_ids(app, listeners, haproxy_count):
"""Return app tasks which have all haproxy listeners draining
"""
tasks = zip(get_svnames_from_tasks(app, app['tasks']), app['tasks'])
draining_listeners = select_draining_listeners(listeners)
draining_task_ids = []
for svname, task in tasks:
task_listeners = [l for l in draining_listeners if l.svname == svname]
if len(task_listeners) == haproxy_count:
draining_task_ids.append(task['id'])
return draining_task_ids
def max_wait_not_exceeded(max_wait, timestamp):
return time.time() - timestamp < max_wait
def find_tasks_to_kill(args, new_app, old_app, timestamp):
marathon_lb_urls = get_marathon_lb_urls(args)
haproxy_count = len(marathon_lb_urls)
try:
listeners = fetch_app_listeners(new_app, marathon_lb_urls)
except requests.exceptions.RequestException:
raise MarathonLbEndpointException(
"Error while querying Marathon-LB",
marathon_lb_urls,
traceback.format_exc())
while max_wait_not_exceeded(args.max_wait, timestamp):
time.sleep(args.step_delay)
logger.info("Existing app running {} instances, "
"new app running {} instances"
.format(old_app['instances'], new_app['instances']))
if any_marathon_lb_reloading(marathon_lb_urls):
continue
try:
listeners = fetch_app_listeners(new_app, marathon_lb_urls)
except requests.exceptions.RequestException:
# Restart loop if we hit an exception while loading listeners,
# this may be normal behaviour
continue
logger.info("Found {} app listeners across {} HAProxy instances"
.format(len(listeners), haproxy_count))
if waiting_for_listeners(new_app, old_app, listeners, haproxy_count):
continue
if waiting_for_up_listeners(new_app, listeners, haproxy_count):
continue
if waiting_for_drained_listeners(listeners):
continue
return find_drained_task_ids(old_app, listeners, haproxy_count)
logger.info('Timed out waiting for tasks to fully drain, find any draining'
' tasks and continue with deployment...')
return find_draining_task_ids(old_app, listeners, haproxy_count)
def deployment_in_progress(app):
return len(app['deployments']) > 0
def execute_pre_kill_hook(args, old_app, tasks_to_kill, new_app):
if args.pre_kill_hook is not None:
logger.info("Calling pre-kill hook '{}'".format(args.pre_kill_hook))
subprocess.check_call([args.pre_kill_hook,
json.dumps(old_app),
json.dumps(tasks_to_kill),
json.dumps(new_app)])
def swap_zdd_apps(args, new_app, old_app):
func_args = (args, new_app, old_app)
while True:
res = _swap_zdd_apps(func_args[0], func_args[1], func_args[2])
if isinstance(res, bool):
return res
func_args = res
def _swap_zdd_apps(args, new_app, old_app):
old_app = fetch_marathon_app(args, old_app['id'])
new_app = fetch_marathon_app(args, new_app['id'])
if deployment_in_progress(new_app):
time.sleep(args.step_delay)
return args, new_app, old_app
tasks_to_kill = find_tasks_to_kill(args, new_app, old_app, time.time())
if ready_to_delete_old_app(args, new_app, old_app, tasks_to_kill):
return safe_delete_app(args, old_app, new_app)
if len(tasks_to_kill) > 0:
execute_pre_kill_hook(args, old_app, tasks_to_kill, new_app)
logger.info("There are {} draining listeners, "
"about to kill the following tasks:\n - {}"
.format(len(tasks_to_kill),
"\n - ".join(tasks_to_kill)))
if args.force or query_yes_no("Continue?"):
logger.info("Scaling down old app by {} instances"
.format(len(tasks_to_kill)))
kill_marathon_tasks(args, tasks_to_kill)
else:
return False
if is_hybrid_deployment(args, new_app):
if new_app['instances'] < get_new_instance_count(new_app):
scale_new_app_instances(args, new_app, old_app)
else:
if new_app['instances'] < get_deployment_target(new_app):
scale_new_app_instances(args, new_app, old_app)
return (args, new_app, old_app)
def ready_to_delete_old_app(args, new_app, old_app, draining_task_ids):
new_instances = get_new_instance_count(new_app)
if is_hybrid_deployment(args, new_app):
return (int(new_app['instances']) == new_instances and
int(old_app['instances']) == (
get_deployment_target(old_app) - new_instances))
else:
return (int(new_app['instances']) == get_deployment_target(new_app) and
len(draining_task_ids) == int(old_app['instances']))
def waiting_for_drained_listeners(listeners):
return len(select_drained_listeners(listeners)) < 1
def scale_new_app_instances(args, new_app, old_app):
"""Scale the app by 50% of its existing instances until we
meet or surpass instances deployed for old_app.
At which point go right to the new_app deployment target
"""
instances = (math.floor(new_app['instances'] +
(new_app['instances'] + 1) / 2))
if is_hybrid_deployment(args, new_app):
if instances > get_new_instance_count(new_app):
instances = get_new_instance_count(new_app)
else:
if instances >= old_app['instances']:
instances = get_deployment_target(new_app)
logger.info("Scaling new app up to {} instances".format(instances))
return scale_marathon_app_instances(args, new_app, instances)
def safe_delete_app(args, app, new_app):
if is_hybrid_deployment(args, new_app):
logger.info("Not deleting old app, as its hybrid configuration")
return True
else:
logger.info("About to delete old app {}".format(app['id']))
if args.force or query_yes_no("Continue?"):
delete_marathon_app(args, app)
return True
else:
return False
def delete_marathon_app(args, app):
url = args.marathon + '/v2/apps' + app['id']
try:
response = requests.delete(url,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise AppDeleteException(
"Error while deleting the app", url, traceback.format_exc())
return response
def kill_marathon_tasks(args, ids):
data = json.dumps({'ids': ids})
url = args.marathon + "/v2/tasks/delete?scale=true"
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
# This is App Scale Down, so raising AppScale Exception
raise AppScaleException(
"Error while scaling the app", url, data, traceback.format_exc())
return response
def scale_marathon_app_instances(args, app, instances):
url = args.marathon + "/v2/apps" + app['id']
data = json.dumps({'instances': instances})
headers = {'Content-Type': 'application/json'}
try:
response = requests.put(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
# This is App Scale Up, so raising AppScale Exception
raise AppScaleException(
"Error while scaling the app", url, data, traceback.format_exc())
return response
def deploy_marathon_app(args, app):
url = args.marathon + "/v2/apps"
data = json.dumps(app)
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise AppCreateException(
"Error while creating the app", url, data, traceback.format_exc())
return response
def get_service_port(app):
try:
return \
int(app['container']['docker']['portMappings'][0]['servicePort'])
except KeyError:
try:
return \
int(app['portDefinitions'][0]['port'])
except KeyError:
return int(app['ports'][0])
def set_service_port(app, servicePort):
try:
app['container']['docker']['portMappings'][0]['servicePort'] = \
int(servicePort)
except KeyError:
app['ports'][0] = int(servicePort)
return app
def validate_app(app):
if app['id'] is None:
raise MissingFieldException("App doesn't contain a valid App ID",
'id')
if 'labels' not in app:
raise MissingFieldException("No labels found. Please define the"
" HAPROXY_DEPLOYMENT_GROUP label",
'label')
if 'HAPROXY_DEPLOYMENT_GROUP' not in app['labels']:
raise MissingFieldException("Please define the "
"HAPROXY_DEPLOYMENT_GROUP label",
'HAPROXY_DEPLOYMENT_GROUP')
if 'HAPROXY_DEPLOYMENT_ALT_PORT' not in app['labels']:
raise MissingFieldException("Please define the "
"HAPROXY_DEPLOYMENT_ALT_PORT label",
'HAPROXY_DEPLOYMENT_ALT_PORT')
def set_app_ids(app, colour):
app['labels']['HAPROXY_APP_ID'] = app['id']
app['id'] = app['id'] + '-' + colour
if app['id'][0] != '/':
app['id'] = '/' + app['id']
return app
def set_service_ports(app, servicePort):
app['labels']['HAPROXY_0_PORT'] = str(get_service_port(app))
try:
app['container']['docker']['portMappings'][0]['servicePort'] = \
int(servicePort)
return app
except KeyError:
app['ports'][0] = int(servicePort)
return app
def select_next_port(app):
alt_port = int(app['labels']['HAPROXY_DEPLOYMENT_ALT_PORT'])
if int(app['ports'][0]) == alt_port:
return int(app['labels']['HAPROXY_0_PORT'])
else:
return alt_port
def select_next_colour(app):
if app['labels'].get('HAPROXY_DEPLOYMENT_COLOUR') == 'blue':
return 'green'
else:
return 'blue'
def sort_deploys(apps):
return sorted(apps, key=lambda a: a.get('labels', {})
.get('HAPROXY_DEPLOYMENT_STARTED_AT', '0'))
def select_last_deploy(apps):
return sort_deploys(apps).pop()
def select_last_two_deploys(apps):
return sort_deploys(apps)[:-3:-1]
def get_deployment_group(app):
return app.get('labels', {}).get('HAPROXY_DEPLOYMENT_GROUP')
def fetch_previous_deploys(args, app):
apps = list_marathon_apps(args)
app_deployment_group = get_deployment_group(app)
return [a for a in apps if get_deployment_group(a) == app_deployment_group]
def prepare_deploy(args, previous_deploys, app):
""" Return a blue or a green version of `app` based on preexisting deploys
"""
if len(previous_deploys) > 0:
last_deploy = select_last_deploy(previous_deploys)
next_colour = select_next_colour(last_deploy)
next_port = select_next_port(last_deploy)
deployment_target_instances = last_deploy['instances']
if args.new_instances > deployment_target_instances:
args.new_instances = deployment_target_instances
if args.new_instances and args.new_instances > 0:
if args.initial_instances > args.new_instances:
app['instances'] = args.new_instances
else:
app['instances'] = args.initial_instances
else:
if args.initial_instances > deployment_target_instances:
app['instances'] = deployment_target_instances
else:
app['instances'] = args.initial_instances
app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] = str(
args.new_instances)
else:
next_colour = 'blue'
next_port = get_service_port(app)
deployment_target_instances = app['instances']
app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] = "0"
app = set_app_ids(app, next_colour)
app = set_service_ports(app, next_port)
app['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'] = \
str(deployment_target_instances)
app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] = next_colour
app['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = datetime.now().isoformat()
return app
def load_app_json(args):
with open(args.json) as content_file:
return json.load(content_file)
def safe_resume_deploy(args, previous_deploys):
if args.complete_cur:
logger.info("Converting all instances to current config")
new_app, old_app = select_last_two_deploys(previous_deploys)
logger.info("Current config color is %s" % new_app[
'labels']['HAPROXY_DEPLOYMENT_COLOUR'])
logger.info("Considering %s color as existing app"
% old_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] +
" and %s color as new app"
% new_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'])
return swap_zdd_apps(args, new_app, old_app)
elif args.complete_prev:
logger.info("Converting all instances to previous config")
old_app, new_app = select_last_two_deploys(previous_deploys)
logger.info("Previous config color is %s" % new_app[
'labels']['HAPROXY_DEPLOYMENT_COLOUR'])
logger.info("Considering %s color as existing app"
% old_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] +
" and %s color as new app"
% new_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'])
return swap_zdd_apps(args, new_app, old_app)
elif args.resume:
logger.info("Found previous deployment, resuming")
new_app, old_app = select_last_two_deploys(previous_deploys)
return swap_zdd_apps(args, new_app, old_app)
else:
raise Exception("There appears to be an"
" existing deployment in progress")
def do_zdd(args, out=sys.stdout):
app = load_app_json(args)
validate_app(app)
previous_deploys = fetch_previous_deploys(args, app)
if len(previous_deploys) > 1:
# There is a stuck deploy or hybrid deploy
return safe_resume_deploy(args, previous_deploys)
if args.complete_cur or args.complete_prev:
raise InvalidArgException("Cannot use --complete-cur, --complete-prev"
" flags when config is not hybrid")
new_app = prepare_deploy(args, previous_deploys, app)
logger.info('Final app definition:')
out.write(json.dumps(new_app, sort_keys=True, indent=2))
out.write("\n")
if args.dry_run:
return True
if args.force or query_yes_no("Continue with deployment?"):
deploy_marathon_app(args, new_app)
if len(previous_deploys) == 0:
# This was the first deploy, nothing to swap
return True
else:
# This is a standard blue/green deploy, swap new app with old
old_app = select_last_deploy(previous_deploys)
return swap_zdd_apps(args, new_app, old_app)
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Zero-downtime deployment orchestrator for marathon-lb",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--longhelp",
help="Print out configuration details",
action="store_true"
)
parser.add_argument("--marathon", "-m",
help="[required] Marathon endpoint, eg. -m " +
"http://marathon1:8080"
)
parser.add_argument("--marathon-lb", "-l",
help="[required] Marathon-lb stats endpoint, eg. -l " +
"http://marathon-lb.marathon.mesos:9090"
)
parser.add_argument("--json", "-j",
help="[required] App JSON"
)
parser.add_argument("--dry-run", "-d",
help="Perform a dry run",
action="store_true"
)
parser.add_argument("--force", "-f",
help="Perform deployment un-prompted",
action="store_true"
)
parser.add_argument("--step-delay", "-s",
help="Delay (in seconds) between each successive"
" deployment step",
type=int, default=5
)
parser.add_argument("--initial-instances", "-i",
help="Initial number of app instances to launch."
" If this number is greater than total number of"
" existing instances, then this will be overridden"
" by the latter number",
type=int, default=1
)
parser.add_argument("--resume", "-r",
help="Resume from a previous deployment",
action="store_true"
)
parser.add_argument("--max-wait", "-w",
help="Maximum amount of time (in seconds) to wait"
" for HAProxy to drain connections",
type=int, default=300
)
parser.add_argument("--new-instances", "-n",
help="Number of new instances to replace the existing"
" instances. This is for having instances of both blue"
" and green at the same time",
type=int, default=0)
parser.add_argument("--complete-cur", "-c",
help="Change hybrid app entirely to"
" current (new) app's instances", action="store_true")
parser.add_argument("--complete-prev", "-p",
help="Change hybrid app entirely to"
" previous (old) app's instances", action="store_true")
parser.add_argument("--pre-kill-hook",
help="A path to an executable (such as a script) "
"which will be called before killing any tasks marked "
"for draining at each step. The script will be called "
"with 3 arguments (in JSON): the old app definition, "
"the list of tasks which will be killed, "
"and the new app definition. An exit "
"code of 0 indicates the deploy may continue. "
"If the hook returns a non-zero exit code, the deploy "
"will stop, and an operator must intervene."
)
parser = set_logging_args(parser)
parser = set_marathon_auth_args(parser)
return parser
def set_request_retries():
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=3)
s.mount('http://', a)
def process_arguments():
# Process arguments
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
if args.longhelp:
print(__doc__)
sys.exit()
# otherwise make sure that a Marathon URL was specified
else:
if args.marathon is None:
arg_parser.error('argument --marathon/-m is required')
if args.marathon_lb is None:
arg_parser.error('argument --marathon-lb/-l is required')
if args.json is None:
arg_parser.error('argument --json/-j is required')
return args
if __name__ == '__main__':
args = process_arguments()
set_request_retries()
setup_logging(logger, args.syslog_socket, args.log_format, args.log_level)
try:
if do_zdd(args):
sys.exit(0)
else:
sys.exit(1)
except Exception as e:
if hasattr(e, 'zdd_exit_status'):
if hasattr(e, 'error'):
logger.exception(str(e.error))
else:
logger.exception(traceback.print_exc())
sys.exit(e.zdd_exit_status)
else:
# For Unknown Exceptions
logger.exception(traceback.print_exc())
sys.exit(2)
| 35.325694 | 82 | 0.623937 |
import argparse
import csv
import json
import logging
import math
import socket
import subprocess
import sys
import time
import traceback
from datetime import datetime
from collections import namedtuple
import requests
import six.moves.urllib as urllib
from common import (get_marathon_auth_params, set_logging_args,
set_marathon_auth_args, setup_logging)
from utils import get_task_ip_and_ports
from zdd_exceptions import (
AppCreateException, AppDeleteException, AppScaleException,
InvalidArgException, MarathonEndpointException,
MarathonLbEndpointException, MissingFieldException)
logger = logging.getLogger('zdd')
def query_yes_no(question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def marathon_get_request(args, path):
url = args.marathon + path
try:
response = requests.get(url, auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise MarathonEndpointException(
"Error while querying marathon", url, traceback.format_exc())
return response
def list_marathon_apps(args):
response = marathon_get_request(args, "/v2/apps")
return response.json()['apps']
def fetch_marathon_app(args, app_id):
response = marathon_get_request(args, "/v2/apps" + app_id)
return response.json()['app']
def _get_alias_records(hostname):
return socket.gethostbyname_ex(hostname)[2]
def _unparse_url_alias(url, addr):
return urllib.parse.urlunparse((url[0],
addr + ":" + str(url.port),
url[2],
url[3],
url[4],
url[5]))
def get_marathon_lb_urls(args):
url = urllib.parse.urlparse(args.marathon_lb)
addrs = _get_alias_records(url.hostname)
return [_unparse_url_alias(url, addr) for addr in addrs]
def fetch_haproxy_pids(haproxy_url):
try:
response = requests.get(haproxy_url + "/_haproxy_getpids")
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception("Caught exception when retrieving HAProxy"
" pids from " + haproxy_url)
raise
return response.text.split()
def check_haproxy_reloading(haproxy_url):
try:
pids = fetch_haproxy_pids(haproxy_url)
except requests.exceptions.RequestException:
return True
if len(pids) > 1:
logger.info("Waiting for {} pids on {}".format(len(pids), haproxy_url))
return True
return False
def any_marathon_lb_reloading(marathon_lb_urls):
return any([check_haproxy_reloading(url) for url in marathon_lb_urls])
def fetch_haproxy_stats(haproxy_url):
try:
response = requests.get(haproxy_url + "/haproxy?stats;csv")
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception("Caught exception when retrieving HAProxy"
" stats from " + haproxy_url)
raise
return response.text
def fetch_combined_haproxy_stats(marathon_lb_urls):
raw = ''.join([fetch_haproxy_stats(url) for url in marathon_lb_urls])
return parse_haproxy_stats(raw)
def parse_haproxy_stats(csv_data):
rows = csv_data.splitlines()
headings = rows.pop(0).lstrip('# ').rstrip(',\n').split(',')
csv_reader = csv.reader(rows, delimiter=',', quotechar="'")
Row = namedtuple('Row', headings)
return [Row(*row[0:-1]) for row in csv_reader if row[0][0] != '
def get_deployment_label(app):
return get_deployment_group(app) + "_" + app['labels']['HAPROXY_0_PORT']
def _if_app_listener(app, listener):
return (listener.pxname == get_deployment_label(app) and
listener.svname not in ['BACKEND', 'FRONTEND'])
def fetch_app_listeners(app, marathon_lb_urls):
haproxy_stats = fetch_combined_haproxy_stats(marathon_lb_urls)
return [l for l in haproxy_stats if _if_app_listener(app, l)]
def waiting_for_listeners(new_app, old_app, listeners, haproxy_count):
listener_count = (len(listeners) / haproxy_count)
return listener_count != new_app['instances'] + old_app['instances']
def get_deployment_target(app):
if 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES' in app['labels']:
return int(app['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'])
else:
return app['instances']
def get_new_instance_count(app):
if 'HAPROXY_DEPLOYMENT_NEW_INSTANCES' in app['labels']:
return int(app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'])
else:
return 0
def waiting_for_up_listeners(app, listeners, haproxy_count):
up_listeners = [l for l in listeners if l.status == 'UP']
up_listener_count = (len(up_listeners) / haproxy_count)
return up_listener_count < get_deployment_target(app)
def select_draining_listeners(listeners):
return [l for l in listeners if l.status == 'MAINT']
def select_drained_listeners(listeners):
draining_listeners = select_draining_listeners(listeners)
return [l for l in draining_listeners if not _has_pending_requests(l)]
def get_svnames_from_task(app, task):
prefix = task['host'].replace('.', '_')
task_ip, task_port = get_task_ip_and_ports(app, task)
if task['host'] == task_ip:
for port in task['ports']:
yield('{}_{}'.format(prefix, port))
else:
for port in task['ports']:
yield('{}_{}_{}'.format(prefix, task_ip.replace('.', '_'), port))
def get_svnames_from_tasks(app, tasks):
svnames = []
for task in tasks:
svnames += get_svnames_from_task(app, task)
return svnames
def _has_pending_requests(listener):
return int(listener.qcur or 0) > 0 or int(listener.scur or 0) > 0
def is_hybrid_deployment(args, app):
if (get_new_instance_count(app) != 0 and not args.complete_cur and
not args.complete_prev):
return True
else:
return False
def find_drained_task_ids(app, listeners, haproxy_count):
tasks = zip(get_svnames_from_tasks(app, app['tasks']), app['tasks'])
drained_listeners = select_drained_listeners(listeners)
drained_task_ids = []
for svname, task in tasks:
task_listeners = [l for l in drained_listeners if l.svname == svname]
if len(task_listeners) == haproxy_count:
drained_task_ids.append(task['id'])
return drained_task_ids
def find_draining_task_ids(app, listeners, haproxy_count):
tasks = zip(get_svnames_from_tasks(app, app['tasks']), app['tasks'])
draining_listeners = select_draining_listeners(listeners)
draining_task_ids = []
for svname, task in tasks:
task_listeners = [l for l in draining_listeners if l.svname == svname]
if len(task_listeners) == haproxy_count:
draining_task_ids.append(task['id'])
return draining_task_ids
def max_wait_not_exceeded(max_wait, timestamp):
return time.time() - timestamp < max_wait
def find_tasks_to_kill(args, new_app, old_app, timestamp):
marathon_lb_urls = get_marathon_lb_urls(args)
haproxy_count = len(marathon_lb_urls)
try:
listeners = fetch_app_listeners(new_app, marathon_lb_urls)
except requests.exceptions.RequestException:
raise MarathonLbEndpointException(
"Error while querying Marathon-LB",
marathon_lb_urls,
traceback.format_exc())
while max_wait_not_exceeded(args.max_wait, timestamp):
time.sleep(args.step_delay)
logger.info("Existing app running {} instances, "
"new app running {} instances"
.format(old_app['instances'], new_app['instances']))
if any_marathon_lb_reloading(marathon_lb_urls):
continue
try:
listeners = fetch_app_listeners(new_app, marathon_lb_urls)
except requests.exceptions.RequestException:
# Restart loop if we hit an exception while loading listeners,
# this may be normal behaviour
continue
logger.info("Found {} app listeners across {} HAProxy instances"
.format(len(listeners), haproxy_count))
if waiting_for_listeners(new_app, old_app, listeners, haproxy_count):
continue
if waiting_for_up_listeners(new_app, listeners, haproxy_count):
continue
if waiting_for_drained_listeners(listeners):
continue
return find_drained_task_ids(old_app, listeners, haproxy_count)
logger.info('Timed out waiting for tasks to fully drain, find any draining'
' tasks and continue with deployment...')
return find_draining_task_ids(old_app, listeners, haproxy_count)
def deployment_in_progress(app):
return len(app['deployments']) > 0
def execute_pre_kill_hook(args, old_app, tasks_to_kill, new_app):
if args.pre_kill_hook is not None:
logger.info("Calling pre-kill hook '{}'".format(args.pre_kill_hook))
subprocess.check_call([args.pre_kill_hook,
json.dumps(old_app),
json.dumps(tasks_to_kill),
json.dumps(new_app)])
def swap_zdd_apps(args, new_app, old_app):
func_args = (args, new_app, old_app)
while True:
res = _swap_zdd_apps(func_args[0], func_args[1], func_args[2])
if isinstance(res, bool):
return res
func_args = res
def _swap_zdd_apps(args, new_app, old_app):
old_app = fetch_marathon_app(args, old_app['id'])
new_app = fetch_marathon_app(args, new_app['id'])
if deployment_in_progress(new_app):
time.sleep(args.step_delay)
return args, new_app, old_app
tasks_to_kill = find_tasks_to_kill(args, new_app, old_app, time.time())
if ready_to_delete_old_app(args, new_app, old_app, tasks_to_kill):
return safe_delete_app(args, old_app, new_app)
if len(tasks_to_kill) > 0:
execute_pre_kill_hook(args, old_app, tasks_to_kill, new_app)
logger.info("There are {} draining listeners, "
"about to kill the following tasks:\n - {}"
.format(len(tasks_to_kill),
"\n - ".join(tasks_to_kill)))
if args.force or query_yes_no("Continue?"):
logger.info("Scaling down old app by {} instances"
.format(len(tasks_to_kill)))
kill_marathon_tasks(args, tasks_to_kill)
else:
return False
if is_hybrid_deployment(args, new_app):
if new_app['instances'] < get_new_instance_count(new_app):
scale_new_app_instances(args, new_app, old_app)
else:
if new_app['instances'] < get_deployment_target(new_app):
scale_new_app_instances(args, new_app, old_app)
return (args, new_app, old_app)
def ready_to_delete_old_app(args, new_app, old_app, draining_task_ids):
new_instances = get_new_instance_count(new_app)
if is_hybrid_deployment(args, new_app):
return (int(new_app['instances']) == new_instances and
int(old_app['instances']) == (
get_deployment_target(old_app) - new_instances))
else:
return (int(new_app['instances']) == get_deployment_target(new_app) and
len(draining_task_ids) == int(old_app['instances']))
def waiting_for_drained_listeners(listeners):
return len(select_drained_listeners(listeners)) < 1
def scale_new_app_instances(args, new_app, old_app):
instances = (math.floor(new_app['instances'] +
(new_app['instances'] + 1) / 2))
if is_hybrid_deployment(args, new_app):
if instances > get_new_instance_count(new_app):
instances = get_new_instance_count(new_app)
else:
if instances >= old_app['instances']:
instances = get_deployment_target(new_app)
logger.info("Scaling new app up to {} instances".format(instances))
return scale_marathon_app_instances(args, new_app, instances)
def safe_delete_app(args, app, new_app):
if is_hybrid_deployment(args, new_app):
logger.info("Not deleting old app, as its hybrid configuration")
return True
else:
logger.info("About to delete old app {}".format(app['id']))
if args.force or query_yes_no("Continue?"):
delete_marathon_app(args, app)
return True
else:
return False
def delete_marathon_app(args, app):
url = args.marathon + '/v2/apps' + app['id']
try:
response = requests.delete(url,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise AppDeleteException(
"Error while deleting the app", url, traceback.format_exc())
return response
def kill_marathon_tasks(args, ids):
data = json.dumps({'ids': ids})
url = args.marathon + "/v2/tasks/delete?scale=true"
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
# This is App Scale Down, so raising AppScale Exception
raise AppScaleException(
"Error while scaling the app", url, data, traceback.format_exc())
return response
def scale_marathon_app_instances(args, app, instances):
url = args.marathon + "/v2/apps" + app['id']
data = json.dumps({'instances': instances})
headers = {'Content-Type': 'application/json'}
try:
response = requests.put(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
# This is App Scale Up, so raising AppScale Exception
raise AppScaleException(
"Error while scaling the app", url, data, traceback.format_exc())
return response
def deploy_marathon_app(args, app):
url = args.marathon + "/v2/apps"
data = json.dumps(app)
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise AppCreateException(
"Error while creating the app", url, data, traceback.format_exc())
return response
def get_service_port(app):
try:
return \
int(app['container']['docker']['portMappings'][0]['servicePort'])
except KeyError:
try:
return \
int(app['portDefinitions'][0]['port'])
except KeyError:
return int(app['ports'][0])
def set_service_port(app, servicePort):
try:
app['container']['docker']['portMappings'][0]['servicePort'] = \
int(servicePort)
except KeyError:
app['ports'][0] = int(servicePort)
return app
def validate_app(app):
if app['id'] is None:
raise MissingFieldException("App doesn't contain a valid App ID",
'id')
if 'labels' not in app:
raise MissingFieldException("No labels found. Please define the"
" HAPROXY_DEPLOYMENT_GROUP label",
'label')
if 'HAPROXY_DEPLOYMENT_GROUP' not in app['labels']:
raise MissingFieldException("Please define the "
"HAPROXY_DEPLOYMENT_GROUP label",
'HAPROXY_DEPLOYMENT_GROUP')
if 'HAPROXY_DEPLOYMENT_ALT_PORT' not in app['labels']:
raise MissingFieldException("Please define the "
"HAPROXY_DEPLOYMENT_ALT_PORT label",
'HAPROXY_DEPLOYMENT_ALT_PORT')
def set_app_ids(app, colour):
app['labels']['HAPROXY_APP_ID'] = app['id']
app['id'] = app['id'] + '-' + colour
if app['id'][0] != '/':
app['id'] = '/' + app['id']
return app
def set_service_ports(app, servicePort):
app['labels']['HAPROXY_0_PORT'] = str(get_service_port(app))
try:
app['container']['docker']['portMappings'][0]['servicePort'] = \
int(servicePort)
return app
except KeyError:
app['ports'][0] = int(servicePort)
return app
def select_next_port(app):
alt_port = int(app['labels']['HAPROXY_DEPLOYMENT_ALT_PORT'])
if int(app['ports'][0]) == alt_port:
return int(app['labels']['HAPROXY_0_PORT'])
else:
return alt_port
def select_next_colour(app):
if app['labels'].get('HAPROXY_DEPLOYMENT_COLOUR') == 'blue':
return 'green'
else:
return 'blue'
def sort_deploys(apps):
return sorted(apps, key=lambda a: a.get('labels', {})
.get('HAPROXY_DEPLOYMENT_STARTED_AT', '0'))
def select_last_deploy(apps):
return sort_deploys(apps).pop()
def select_last_two_deploys(apps):
return sort_deploys(apps)[:-3:-1]
def get_deployment_group(app):
return app.get('labels', {}).get('HAPROXY_DEPLOYMENT_GROUP')
def fetch_previous_deploys(args, app):
apps = list_marathon_apps(args)
app_deployment_group = get_deployment_group(app)
return [a for a in apps if get_deployment_group(a) == app_deployment_group]
def prepare_deploy(args, previous_deploys, app):
if len(previous_deploys) > 0:
last_deploy = select_last_deploy(previous_deploys)
next_colour = select_next_colour(last_deploy)
next_port = select_next_port(last_deploy)
deployment_target_instances = last_deploy['instances']
if args.new_instances > deployment_target_instances:
args.new_instances = deployment_target_instances
if args.new_instances and args.new_instances > 0:
if args.initial_instances > args.new_instances:
app['instances'] = args.new_instances
else:
app['instances'] = args.initial_instances
else:
if args.initial_instances > deployment_target_instances:
app['instances'] = deployment_target_instances
else:
app['instances'] = args.initial_instances
app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] = str(
args.new_instances)
else:
next_colour = 'blue'
next_port = get_service_port(app)
deployment_target_instances = app['instances']
app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] = "0"
app = set_app_ids(app, next_colour)
app = set_service_ports(app, next_port)
app['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'] = \
str(deployment_target_instances)
app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] = next_colour
app['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = datetime.now().isoformat()
return app
def load_app_json(args):
with open(args.json) as content_file:
return json.load(content_file)
def safe_resume_deploy(args, previous_deploys):
if args.complete_cur:
logger.info("Converting all instances to current config")
new_app, old_app = select_last_two_deploys(previous_deploys)
logger.info("Current config color is %s" % new_app[
'labels']['HAPROXY_DEPLOYMENT_COLOUR'])
logger.info("Considering %s color as existing app"
% old_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] +
" and %s color as new app"
% new_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'])
return swap_zdd_apps(args, new_app, old_app)
elif args.complete_prev:
logger.info("Converting all instances to previous config")
old_app, new_app = select_last_two_deploys(previous_deploys)
logger.info("Previous config color is %s" % new_app[
'labels']['HAPROXY_DEPLOYMENT_COLOUR'])
logger.info("Considering %s color as existing app"
% old_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] +
" and %s color as new app"
% new_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'])
return swap_zdd_apps(args, new_app, old_app)
elif args.resume:
logger.info("Found previous deployment, resuming")
new_app, old_app = select_last_two_deploys(previous_deploys)
return swap_zdd_apps(args, new_app, old_app)
else:
raise Exception("There appears to be an"
" existing deployment in progress")
def do_zdd(args, out=sys.stdout):
app = load_app_json(args)
validate_app(app)
previous_deploys = fetch_previous_deploys(args, app)
if len(previous_deploys) > 1:
return safe_resume_deploy(args, previous_deploys)
if args.complete_cur or args.complete_prev:
raise InvalidArgException("Cannot use --complete-cur, --complete-prev"
" flags when config is not hybrid")
new_app = prepare_deploy(args, previous_deploys, app)
logger.info('Final app definition:')
out.write(json.dumps(new_app, sort_keys=True, indent=2))
out.write("\n")
if args.dry_run:
return True
if args.force or query_yes_no("Continue with deployment?"):
deploy_marathon_app(args, new_app)
if len(previous_deploys) == 0:
return True
else:
old_app = select_last_deploy(previous_deploys)
return swap_zdd_apps(args, new_app, old_app)
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Zero-downtime deployment orchestrator for marathon-lb",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--longhelp",
help="Print out configuration details",
action="store_true"
)
parser.add_argument("--marathon", "-m",
help="[required] Marathon endpoint, eg. -m " +
"http://marathon1:8080"
)
parser.add_argument("--marathon-lb", "-l",
help="[required] Marathon-lb stats endpoint, eg. -l " +
"http://marathon-lb.marathon.mesos:9090"
)
parser.add_argument("--json", "-j",
help="[required] App JSON"
)
parser.add_argument("--dry-run", "-d",
help="Perform a dry run",
action="store_true"
)
parser.add_argument("--force", "-f",
help="Perform deployment un-prompted",
action="store_true"
)
parser.add_argument("--step-delay", "-s",
help="Delay (in seconds) between each successive"
" deployment step",
type=int, default=5
)
parser.add_argument("--initial-instances", "-i",
help="Initial number of app instances to launch."
" If this number is greater than total number of"
" existing instances, then this will be overridden"
" by the latter number",
type=int, default=1
)
parser.add_argument("--resume", "-r",
help="Resume from a previous deployment",
action="store_true"
)
parser.add_argument("--max-wait", "-w",
help="Maximum amount of time (in seconds) to wait"
" for HAProxy to drain connections",
type=int, default=300
)
parser.add_argument("--new-instances", "-n",
help="Number of new instances to replace the existing"
" instances. This is for having instances of both blue"
" and green at the same time",
type=int, default=0)
parser.add_argument("--complete-cur", "-c",
help="Change hybrid app entirely to"
" current (new) app's instances", action="store_true")
parser.add_argument("--complete-prev", "-p",
help="Change hybrid app entirely to"
" previous (old) app's instances", action="store_true")
parser.add_argument("--pre-kill-hook",
help="A path to an executable (such as a script) "
"which will be called before killing any tasks marked "
"for draining at each step. The script will be called "
"with 3 arguments (in JSON): the old app definition, "
"the list of tasks which will be killed, "
"and the new app definition. An exit "
"code of 0 indicates the deploy may continue. "
"If the hook returns a non-zero exit code, the deploy "
"will stop, and an operator must intervene."
)
parser = set_logging_args(parser)
parser = set_marathon_auth_args(parser)
return parser
def set_request_retries():
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=3)
s.mount('http://', a)
def process_arguments():
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
if args.longhelp:
print(__doc__)
sys.exit()
else:
if args.marathon is None:
arg_parser.error('argument --marathon/-m is required')
if args.marathon_lb is None:
arg_parser.error('argument --marathon-lb/-l is required')
if args.json is None:
arg_parser.error('argument --json/-j is required')
return args
if __name__ == '__main__':
args = process_arguments()
set_request_retries()
setup_logging(logger, args.syslog_socket, args.log_format, args.log_level)
try:
if do_zdd(args):
sys.exit(0)
else:
sys.exit(1)
except Exception as e:
if hasattr(e, 'zdd_exit_status'):
if hasattr(e, 'error'):
logger.exception(str(e.error))
else:
logger.exception(traceback.print_exc())
sys.exit(e.zdd_exit_status)
else:
logger.exception(traceback.print_exc())
sys.exit(2)
| true | true |
f726fa1e73e6103ef46be0193e0b17c20617c6fb | 2,819 | py | Python | generator/modules/caffe.py | kklemon/deepo | 038063faf9a4c883a853aac77471e859f61b0d0a | [
"MIT"
] | null | null | null | generator/modules/caffe.py | kklemon/deepo | 038063faf9a4c883a853aac77471e859f61b0d0a | [
"MIT"
] | null | null | null | generator/modules/caffe.py | kklemon/deepo | 038063faf9a4c883a853aac77471e859f61b0d0a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source
from .tools import Tools
from .boost import Boost
from .python import Python
from .opencv import Opencv
@dependency(Tools, Python, Boost, Opencv)
@source('git')
class Caffe(Module):
def build(self):
pyver = self.composer.ver(Python)
cpu_only = self.composer.cuda_ver is None
return (r'''
$GIT_CLONE https://github.com/BVLC/caffe ~/caffe && \
cp ~/caffe/Makefile.config.example ~/caffe/Makefile.config && \
sed -i 's/# %s/%s/g' ~/caffe/Makefile.config && \
''' % (
('CPU_ONLY', 'CPU_ONLY') if cpu_only else \
('USE_CUDNN', 'USE_CUDNN') \
)).rstrip() + (
'' if pyver == '2.7' else r'''
sed -i 's/# PYTHON_LIBRARIES/PYTHON_LIBRARIES/g' '''
+ r'''~/caffe/Makefile.config && \
'''.rstrip()
) + r'''
sed -i 's/# WITH_PYTHON_LAYER/WITH_PYTHON_LAYER/g' ''' \
+ r'''~/caffe/Makefile.config && \
sed -i 's/# OPENCV_VERSION/OPENCV_VERSION/g' ''' \
+ r'''~/caffe/Makefile.config && \
'''.rstrip() + (
r'' if cpu_only else r'''
sed -i 's/# USE_NCCL/USE_NCCL/g' ~/caffe/Makefile.config && \
sed -i 's/-gencode arch=compute_20,code=sm_20//g' ~/caffe/Makefile.config && \
sed -i 's/-gencode arch=compute_20,code=sm_21//g' ~/caffe/Makefile.config && \
'''.rstrip()
) + (r'''
sed -i 's/2\.7/3\.5/g' ~/caffe/Makefile.config && \
''' if pyver == '3.5' else (
r'''
sed -i 's/2\.7/3\.6/g' ~/caffe/Makefile.config && \
sed -i 's/3\.5/3\.6/g' ~/caffe/Makefile.config && \
''' if pyver == '3.6' else
r'''
'''
)).rstrip() + r'''
sed -i 's/\/usr\/lib\/python/\/usr\/local\/lib\/python/g' ''' \
+ r'''~/caffe/Makefile.config && \
sed -i 's/\/usr\/local\/include/\/usr\/local\/include ''' \
+ r'''\/usr\/include\/hdf5\/serial/g' ~/caffe/Makefile.config && \
sed -i 's/hdf5/hdf5_serial/g' ~/caffe/Makefile && \
cd ~/caffe && \
make -j"$(nproc)" -Wno-deprecated-gpu-targets distribute && \
# fix ValueError caused by python-dateutil 1.x
sed -i 's/,<2//g' ~/caffe/python/requirements.txt && \
$PIP_INSTALL \
-r ~/caffe/python/requirements.txt && \
cd ~/caffe/distribute/bin && \
for file in *.bin; do mv "$file" "${file%%%%.bin}"; done && \
cd ~/caffe/distribute && \
cp -r bin include lib proto /usr/local/ && \
cp -r python/caffe /usr/local/lib/python%s/dist-packages/ && \
''' % pyver
| 40.855072 | 90 | 0.496985 |
from .__module__ import Module, dependency, source
from .tools import Tools
from .boost import Boost
from .python import Python
from .opencv import Opencv
@dependency(Tools, Python, Boost, Opencv)
@source('git')
class Caffe(Module):
def build(self):
pyver = self.composer.ver(Python)
cpu_only = self.composer.cuda_ver is None
return (r'''
$GIT_CLONE https://github.com/BVLC/caffe ~/caffe && \
cp ~/caffe/Makefile.config.example ~/caffe/Makefile.config && \
sed -i 's/# %s/%s/g' ~/caffe/Makefile.config && \
''' % (
('CPU_ONLY', 'CPU_ONLY') if cpu_only else \
('USE_CUDNN', 'USE_CUDNN') \
)).rstrip() + (
'' if pyver == '2.7' else r'''
sed -i 's/# PYTHON_LIBRARIES/PYTHON_LIBRARIES/g' '''
+ r'''~/caffe/Makefile.config && \
'''.rstrip()
) + r'''
sed -i 's/# WITH_PYTHON_LAYER/WITH_PYTHON_LAYER/g' ''' \
+ r'''~/caffe/Makefile.config && \
sed -i 's/# OPENCV_VERSION/OPENCV_VERSION/g' ''' \
+ r'''~/caffe/Makefile.config && \
'''.rstrip() + (
r'' if cpu_only else r'''
sed -i 's/# USE_NCCL/USE_NCCL/g' ~/caffe/Makefile.config && \
sed -i 's/-gencode arch=compute_20,code=sm_20//g' ~/caffe/Makefile.config && \
sed -i 's/-gencode arch=compute_20,code=sm_21//g' ~/caffe/Makefile.config && \
'''.rstrip()
) + (r'''
sed -i 's/2\.7/3\.5/g' ~/caffe/Makefile.config && \
''' if pyver == '3.5' else (
r'''
sed -i 's/2\.7/3\.6/g' ~/caffe/Makefile.config && \
sed -i 's/3\.5/3\.6/g' ~/caffe/Makefile.config && \
''' if pyver == '3.6' else
r'''
'''
)).rstrip() + r'''
sed -i 's/\/usr\/lib\/python/\/usr\/local\/lib\/python/g' ''' \
+ r'''~/caffe/Makefile.config && \
sed -i 's/\/usr\/local\/include/\/usr\/local\/include ''' \
+ r'''\/usr\/include\/hdf5\/serial/g' ~/caffe/Makefile.config && \
sed -i 's/hdf5/hdf5_serial/g' ~/caffe/Makefile && \
cd ~/caffe && \
make -j"$(nproc)" -Wno-deprecated-gpu-targets distribute && \
# fix ValueError caused by python-dateutil 1.x
sed -i 's/,<2//g' ~/caffe/python/requirements.txt && \
$PIP_INSTALL \
-r ~/caffe/python/requirements.txt && \
cd ~/caffe/distribute/bin && \
for file in *.bin; do mv "$file" "${file%%%%.bin}"; done && \
cd ~/caffe/distribute && \
cp -r bin include lib proto /usr/local/ && \
cp -r python/caffe /usr/local/lib/python%s/dist-packages/ && \
''' % pyver
| true | true |
f726fac98d42191736a2bb1553a3990d3286b9b1 | 4,770 | py | Python | surfpy/simplegribmessage.py | mjmayank1/surfpy | 969b1a626db7606a42fab0eae445fcb351d6cbcd | [
"MIT"
] | 46 | 2018-04-08T15:56:32.000Z | 2022-01-05T17:36:55.000Z | surfpy/simplegribmessage.py | mjmayank1/surfpy | 969b1a626db7606a42fab0eae445fcb351d6cbcd | [
"MIT"
] | 13 | 2017-08-15T13:12:10.000Z | 2021-03-23T09:09:04.000Z | surfpy/simplegribmessage.py | mjmayank1/surfpy | 969b1a626db7606a42fab0eae445fcb351d6cbcd | [
"MIT"
] | 15 | 2018-03-08T16:52:19.000Z | 2021-12-27T21:17:37.000Z | try:
from grippy.message import Message
except:
Message = None
from .location import Location
import math
import datetime
from . import tools
class SimpleGribMessage(Message):
def __init__(self, data, offset):
super(SimpleGribMessage, self).__init__(data, offset)
@property
def model_time(self):
return self.sections[self.IDENTIFICATION_SECTION_INDEX].reference_date
@property
def hour(self):
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.forecast_time
@property
def forecast_time(self):
forc_time = self.model_time
return forc_time + datetime.timedelta(hours=self.hour)
@property
def var(self):
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.parameter_number.abbrev
@property
def is_array_var(self):
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.first_fixed_surface_type_value == 241
@property
def var_index(self):
if not self.is_array_var:
return -1
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.first_fixed_surface_scaled_value
@property
def lat_count(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.meridian_point_count
@property
def lon_count(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.parallel_point_count
@property
def start_lat(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.start_latitude
@property
def start_lon(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.start_longitude
@property
def lat_step(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.i_direction_increment
@property
def lon_step(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.j_direction_increment
@property
def end_lat(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.end_latitude
@property
def end_lon(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.end_longitude
@property
def lat_indices(self):
start = self.start_lat
step = self.lat_step
count = self.lat_count
return list([start + x*step for x in range(0, count)])
@property
def lon_indices(self):
start = self.start_lon
step = self.lon_step
count = self.lon_count
return list([start + x*step for x in range(0, count)])
@property
def origin_location(self):
lat = (self.start_lat + self.end_lat) * 0.5
lon = (self.start_lon + self.end_lon) * 0.5
return Location(lat, lon)
def location_for_index(self, index):
if index >= self.lat_count*self.lon_count:
return Location(float('NaN'), float('NaN'), 'invalid')
lat_index = int(index/self.lat_count)
lon_index = index % self.lat_count
return Location(self.start_lat + (lat_index*self.lat_step), self.start_lon + (lon_index*self.lon_step))
def index_for_location(self, location):
if location.latitude < self.start_lat or location.latitude > self.end_lat:
return -1
elif location.absolute_longitude < self.start_lon or location.absolute_longitude > self.end_lon:
return -1
closest_lat_index = tools.closest_index(self.lat_indices, location.latitude)
closest_lon_index = tools.closest_index(self.lon_indices, location.absolute_longitude)
return closest_lat_index*self.lon_count+closest_lon_index
@property
def data(self):
return self.sections[self.DATA_SECTION_INDEX].all_scaled_values(self.sections[self.BITMAP_SECTION_INDEX].all_bit_truths)
@property
def data_mean(self):
all_data = [x for x in self.data if not math.isnan(x)]
if len(all_data) < 1:
return 0
return sum(all_data)/float(len(all_data))
def read_simple_grib_messages_raw(all_data, count=-1):
messages = []
offset = 0
while offset < len(all_data):
messages.append(SimpleGribMessage(all_data, offset))
offset = offset + messages[-1].length
if count > 0 and len(messages) == count:
break
return messages
def read_simple_grib_messages(filename, count=-1):
messages = []
with open(filename, 'rb') as stream:
all_data = stream.read()
offset = 0
while offset < len(all_data):
messages.append(SimpleGribMessage(all_data, offset))
offset = offset + messages[-1].length
if count > 0 and len(messages) == count:
break
return messages
| 30.974026 | 128 | 0.692034 | try:
from grippy.message import Message
except:
Message = None
from .location import Location
import math
import datetime
from . import tools
class SimpleGribMessage(Message):
def __init__(self, data, offset):
super(SimpleGribMessage, self).__init__(data, offset)
@property
def model_time(self):
return self.sections[self.IDENTIFICATION_SECTION_INDEX].reference_date
@property
def hour(self):
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.forecast_time
@property
def forecast_time(self):
forc_time = self.model_time
return forc_time + datetime.timedelta(hours=self.hour)
@property
def var(self):
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.parameter_number.abbrev
@property
def is_array_var(self):
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.first_fixed_surface_type_value == 241
@property
def var_index(self):
if not self.is_array_var:
return -1
return self.sections[self.PRODUCT_DEFINITION_SECTION_INDEX].template.first_fixed_surface_scaled_value
@property
def lat_count(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.meridian_point_count
@property
def lon_count(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.parallel_point_count
@property
def start_lat(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.start_latitude
@property
def start_lon(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.start_longitude
@property
def lat_step(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.i_direction_increment
@property
def lon_step(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.j_direction_increment
@property
def end_lat(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.end_latitude
@property
def end_lon(self):
return self.sections[self.GRID_DEFINITION_SECTION_INDEX].template.end_longitude
@property
def lat_indices(self):
start = self.start_lat
step = self.lat_step
count = self.lat_count
return list([start + x*step for x in range(0, count)])
@property
def lon_indices(self):
start = self.start_lon
step = self.lon_step
count = self.lon_count
return list([start + x*step for x in range(0, count)])
@property
def origin_location(self):
lat = (self.start_lat + self.end_lat) * 0.5
lon = (self.start_lon + self.end_lon) * 0.5
return Location(lat, lon)
def location_for_index(self, index):
if index >= self.lat_count*self.lon_count:
return Location(float('NaN'), float('NaN'), 'invalid')
lat_index = int(index/self.lat_count)
lon_index = index % self.lat_count
return Location(self.start_lat + (lat_index*self.lat_step), self.start_lon + (lon_index*self.lon_step))
def index_for_location(self, location):
if location.latitude < self.start_lat or location.latitude > self.end_lat:
return -1
elif location.absolute_longitude < self.start_lon or location.absolute_longitude > self.end_lon:
return -1
closest_lat_index = tools.closest_index(self.lat_indices, location.latitude)
closest_lon_index = tools.closest_index(self.lon_indices, location.absolute_longitude)
return closest_lat_index*self.lon_count+closest_lon_index
@property
def data(self):
return self.sections[self.DATA_SECTION_INDEX].all_scaled_values(self.sections[self.BITMAP_SECTION_INDEX].all_bit_truths)
@property
def data_mean(self):
all_data = [x for x in self.data if not math.isnan(x)]
if len(all_data) < 1:
return 0
return sum(all_data)/float(len(all_data))
def read_simple_grib_messages_raw(all_data, count=-1):
messages = []
offset = 0
while offset < len(all_data):
messages.append(SimpleGribMessage(all_data, offset))
offset = offset + messages[-1].length
if count > 0 and len(messages) == count:
break
return messages
def read_simple_grib_messages(filename, count=-1):
messages = []
with open(filename, 'rb') as stream:
all_data = stream.read()
offset = 0
while offset < len(all_data):
messages.append(SimpleGribMessage(all_data, offset))
offset = offset + messages[-1].length
if count > 0 and len(messages) == count:
break
return messages
| true | true |
f726fb4d2abc77279d107a4f456ba056c71958e4 | 2,801 | py | Python | tests/test_lsp.py | Zhu-Liu/cp2k-input-tools | 3c84e82554bc5cde687395499e3d6f9e2b50e13b | [
"MIT"
] | null | null | null | tests/test_lsp.py | Zhu-Liu/cp2k-input-tools | 3c84e82554bc5cde687395499e3d6f9e2b50e13b | [
"MIT"
] | null | null | null | tests/test_lsp.py | Zhu-Liu/cp2k-input-tools | 3c84e82554bc5cde687395499e3d6f9e2b50e13b | [
"MIT"
] | 1 | 2020-12-22T19:20:53.000Z | 2020-12-22T19:20:53.000Z | from pathlib import Path
from time import sleep
import io
import sys
import pytest
from . import TEST_DIR
try:
from pygls.features import INITIALIZE, TEXT_DOCUMENT_DID_OPEN
from pygls.types import DidOpenTextDocumentParams, TextDocumentItem, InitializeParams
except ImportError:
pytest.skip("pygls unavailable", allow_module_level=True)
if hasattr(sys, "pypy_version_info"):
# the LSP implementation seems to behave completely different on pypy
pytest.skip("pypy is currently not supported", allow_module_level=True)
CALL_TIMEOUT = 2
def _initialize_server(server):
server.lsp.bf_initialize(InitializeParams(process_id=1234, root_uri=Path(__file__).parent.as_uri(), capabilities=None))
def test_initialize(client_server):
"""Simple initialization of the LSP server and single request"""
client, server = client_server
root_uri = Path(__file__).parent.as_uri()
process_id = 1234
response = client.lsp.send_request(INITIALIZE, {"processId": process_id, "rootUri": root_uri, "capabilities": None}).result(
timeout=CALL_TIMEOUT
)
assert server.process_id == process_id
assert server.workspace.root_uri == root_uri
assert hasattr(response, "capabilities")
def test_text_document_did_open(client_server):
"""Check that the server opens an input file"""
client, server = client_server
_initialize_server(server)
testpath = TEST_DIR / "inputs" / "test01.inp"
with testpath.open("r") as fhandle:
content = fhandle.read()
client.lsp.notify(TEXT_DOCUMENT_DID_OPEN, DidOpenTextDocumentParams(TextDocumentItem(str(testpath), "cp2k", 1, content)))
sleep(1)
assert len(server.lsp.workspace.documents) == 1
assert "Validating CP2K input..." in client.msg
def test_text_document_did_open_error(client_server):
"""Check that the server opens an input file with a syntax error"""
client, server = client_server
_initialize_server(server)
testpath = TEST_DIR / "inputs" / "unterminated_string.inp"
with testpath.open("r") as fhandle:
content = fhandle.read()
client.lsp.notify(TEXT_DOCUMENT_DID_OPEN, DidOpenTextDocumentParams(TextDocumentItem(str(testpath), "cp2k", 1, content)))
sleep(1)
assert len(server.lsp.workspace.documents) == 1
assert "Validating CP2K input..." in client.msg
assert "Syntax error: unterminated string detected" in client.diagnostics[0].message
@pytest.mark.script_launch_mode("subprocess")
def test_cli(script_runner):
"""Simply check whether the server reacts to an exist notification"""
stdin = io.StringIO('Content-Length: 45\r\n\r\n{"method":"exit","jsonrpc":"2.0","params":{}}')
ret = script_runner.run("cp2k-language-server", stdin=stdin)
assert ret.stderr == ""
assert ret.success
| 32.952941 | 128 | 0.735809 | from pathlib import Path
from time import sleep
import io
import sys
import pytest
from . import TEST_DIR
try:
from pygls.features import INITIALIZE, TEXT_DOCUMENT_DID_OPEN
from pygls.types import DidOpenTextDocumentParams, TextDocumentItem, InitializeParams
except ImportError:
pytest.skip("pygls unavailable", allow_module_level=True)
if hasattr(sys, "pypy_version_info"):
pytest.skip("pypy is currently not supported", allow_module_level=True)
CALL_TIMEOUT = 2
def _initialize_server(server):
server.lsp.bf_initialize(InitializeParams(process_id=1234, root_uri=Path(__file__).parent.as_uri(), capabilities=None))
def test_initialize(client_server):
client, server = client_server
root_uri = Path(__file__).parent.as_uri()
process_id = 1234
response = client.lsp.send_request(INITIALIZE, {"processId": process_id, "rootUri": root_uri, "capabilities": None}).result(
timeout=CALL_TIMEOUT
)
assert server.process_id == process_id
assert server.workspace.root_uri == root_uri
assert hasattr(response, "capabilities")
def test_text_document_did_open(client_server):
client, server = client_server
_initialize_server(server)
testpath = TEST_DIR / "inputs" / "test01.inp"
with testpath.open("r") as fhandle:
content = fhandle.read()
client.lsp.notify(TEXT_DOCUMENT_DID_OPEN, DidOpenTextDocumentParams(TextDocumentItem(str(testpath), "cp2k", 1, content)))
sleep(1)
assert len(server.lsp.workspace.documents) == 1
assert "Validating CP2K input..." in client.msg
def test_text_document_did_open_error(client_server):
client, server = client_server
_initialize_server(server)
testpath = TEST_DIR / "inputs" / "unterminated_string.inp"
with testpath.open("r") as fhandle:
content = fhandle.read()
client.lsp.notify(TEXT_DOCUMENT_DID_OPEN, DidOpenTextDocumentParams(TextDocumentItem(str(testpath), "cp2k", 1, content)))
sleep(1)
assert len(server.lsp.workspace.documents) == 1
assert "Validating CP2K input..." in client.msg
assert "Syntax error: unterminated string detected" in client.diagnostics[0].message
@pytest.mark.script_launch_mode("subprocess")
def test_cli(script_runner):
stdin = io.StringIO('Content-Length: 45\r\n\r\n{"method":"exit","jsonrpc":"2.0","params":{}}')
ret = script_runner.run("cp2k-language-server", stdin=stdin)
assert ret.stderr == ""
assert ret.success
| true | true |
f726fcafaecf7a7db97b64adcecf290f5e75fcde | 862 | py | Python | emailnetwork/tests/test_graph.py | utomoreza/emailnetwork | 5b9e3532173256be6e766e216d54aaa895210adc | [
"MIT"
] | 8 | 2021-03-26T12:36:47.000Z | 2022-03-16T22:48:05.000Z | emailnetwork/tests/test_graph.py | utomoreza/emailnetwork | 5b9e3532173256be6e766e216d54aaa895210adc | [
"MIT"
] | 8 | 2021-02-20T08:47:21.000Z | 2022-01-21T10:18:50.000Z | emailnetwork/tests/test_graph.py | utomoreza/emailnetwork | 5b9e3532173256be6e766e216d54aaa895210adc | [
"MIT"
] | 17 | 2021-01-28T02:38:38.000Z | 2022-03-27T08:07:49.000Z | import os
from unittest import TestCase, mock
from emailnetwork.extract import MBoxReader
# from emailnetwork.graph import plot_single_email
import emailnetwork.graph as graph
MBOX_PATH = f'{os.path.dirname(__file__)}/test.mbox'
@mock.patch(f"{__name__}.graph.plt")
def test_plot_single_directed(mock_plt):
reader = MBoxReader(MBOX_PATH)
graph.plot_single_directed(reader, 1, True)
mock_plt.title.assert_called_once_with("Three tips to get the most out of Gmail\n Delivery date: 04/17/2020", fontdict={'fontname': 'Helvetica', 'color': 'k', 'fontweight': 'bold', 'fontsize': 8})
assert mock_plt.figure.called
class TestGraph(TestCase):
def setUp(self):
self.reader = MBoxReader(MBOX_PATH)
self.emails = self.reader.extract()
def test_single_graph(self):
# TODO: to be implemented later
pass
| 31.925926 | 200 | 0.722738 | import os
from unittest import TestCase, mock
from emailnetwork.extract import MBoxReader
import emailnetwork.graph as graph
MBOX_PATH = f'{os.path.dirname(__file__)}/test.mbox'
@mock.patch(f"{__name__}.graph.plt")
def test_plot_single_directed(mock_plt):
reader = MBoxReader(MBOX_PATH)
graph.plot_single_directed(reader, 1, True)
mock_plt.title.assert_called_once_with("Three tips to get the most out of Gmail\n Delivery date: 04/17/2020", fontdict={'fontname': 'Helvetica', 'color': 'k', 'fontweight': 'bold', 'fontsize': 8})
assert mock_plt.figure.called
class TestGraph(TestCase):
def setUp(self):
self.reader = MBoxReader(MBOX_PATH)
self.emails = self.reader.extract()
def test_single_graph(self):
pass
| true | true |
f726fcb5e7e57b3c5f279ecd143cbfc0329a5cc9 | 5,866 | py | Python | globus_cli/parsing/command_state.py | glentner/globus-cli | a6542d6824cc123f60088bf2602cd7a0fdb0e64e | [
"Apache-2.0"
] | null | null | null | globus_cli/parsing/command_state.py | glentner/globus-cli | a6542d6824cc123f60088bf2602cd7a0fdb0e64e | [
"Apache-2.0"
] | null | null | null | globus_cli/parsing/command_state.py | glentner/globus-cli | a6542d6824cc123f60088bf2602cd7a0fdb0e64e | [
"Apache-2.0"
] | null | null | null | import warnings
import click
import jmespath
from globus_cli import config
# Format Enum for output formatting
# could use a namedtuple, but that's overkill
JSON_FORMAT = "json"
TEXT_FORMAT = "text"
UNIX_FORMAT = "unix"
class CommandState:
def __init__(self):
# default is config value, or TEXT if it's not set
self.output_format = config.get_output_format() or TEXT_FORMAT
# a jmespath expression to process on the json output
self.jmespath_expr = None
# default is always False
self.debug = False
# default is 0
self.verbosity = 0
# by default, empty dict
self.http_status_map = {}
def outformat_is_text(self):
return self.output_format == TEXT_FORMAT
def outformat_is_json(self):
return self.output_format == JSON_FORMAT
def outformat_is_unix(self):
return self.output_format == UNIX_FORMAT
def is_verbose(self):
return self.verbosity > 0
def format_option(f):
def callback(ctx, param, value):
if not value:
return
state = ctx.ensure_object(CommandState)
# when a jmespath expr is set, ignore --format=text
if value == TEXT_FORMAT and state.jmespath_expr:
return
state.output_format = value.lower()
def jmespath_callback(ctx, param, value):
if value is None:
return
state = ctx.ensure_object(CommandState)
state.jmespath_expr = jmespath.compile(value)
if state.output_format == TEXT_FORMAT:
state.output_format = JSON_FORMAT
f = click.option(
"-F",
"--format",
type=click.Choice(
[UNIX_FORMAT, JSON_FORMAT, TEXT_FORMAT], case_sensitive=False
),
help="Output format for stdout. Defaults to text",
expose_value=False,
callback=callback,
)(f)
f = click.option(
"--jmespath",
"--jq",
help=(
"A JMESPath expression to apply to json output. "
"Takes precedence over any specified '--format' and forces "
"the format to be json processed by this expression"
),
expose_value=False,
callback=jmespath_callback,
)(f)
return f
def debug_option(f):
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
# turn off warnings altogether
warnings.simplefilter("ignore")
return
warnings.simplefilter("default")
state = ctx.ensure_object(CommandState)
state.debug = True
config.setup_logging(level="DEBUG")
return click.option(
"--debug",
is_flag=True,
hidden=True,
expose_value=False,
callback=callback,
is_eager=True,
)(f)
def verbose_option(f):
def callback(ctx, param, value):
# set state verbosity value from option
state = ctx.ensure_object(CommandState)
state.verbosity = value
# no verbosity
# all warnings are ignored
# logging is not turned on
if value == 0:
warnings.simplefilter("ignore")
# verbosity level 1
# warnings set to once
# logging set to error
if value == 1:
warnings.simplefilter("once")
config.setup_logging(level="ERROR")
# verbosity level 2
# warnings set to default
# logging set to info
if value == 2:
warnings.simplefilter("default")
config.setup_logging(level="INFO")
# verbosity level 3+
# warnings set to always
# logging set to debug
# sets debug flag to true
if value >= 3:
warnings.simplefilter("always")
state.debug = True
config.setup_logging(level="DEBUG")
return click.option(
"--verbose",
"-v",
count=True,
expose_value=False,
callback=callback,
is_eager=True,
help="Control level of output",
)(f)
def map_http_status_option(f):
exit_stat_set = [0, 1] + list(range(50, 100))
def per_val_callback(ctx, value):
if value is None:
return None
state = ctx.ensure_object(CommandState)
try:
# we may be given a comma-delimited list of values
# any cases of empty strings are dropped
pairs = [x for x in (y.strip() for y in value.split(",")) if len(x)]
# iterate over those pairs, splitting them on `=` signs
for http_stat, exit_stat in (pair.split("=") for pair in pairs):
# "parse" as ints
http_stat, exit_stat = int(http_stat), int(exit_stat)
# force into the desired range
if exit_stat not in exit_stat_set:
raise ValueError()
# map the status
state.http_status_map[http_stat] = exit_stat
# two conditions can cause ValueError: split didn't give right number
# of args, or results weren't int()-able
except ValueError:
raise click.UsageError(
"--map-http-status must have an argument of the form "
'"INT=INT,INT=INT,..." and values of exit codes must be in '
"0,1,50-99"
)
def callback(ctx, param, value):
"""
Wrap the per-value callback -- multiple=True means that the value is
always a tuple of given vals.
"""
for v in value:
per_val_callback(ctx, v)
return click.option(
"--map-http-status",
help=(
"Map HTTP statuses to any of these exit codes: 0,1,50-99. "
'e.g. "404=50,403=51"'
),
expose_value=False,
callback=callback,
multiple=True,
)(f)
| 28.896552 | 80 | 0.579782 | import warnings
import click
import jmespath
from globus_cli import config
JSON_FORMAT = "json"
TEXT_FORMAT = "text"
UNIX_FORMAT = "unix"
class CommandState:
def __init__(self):
# default is config value, or TEXT if it's not set
self.output_format = config.get_output_format() or TEXT_FORMAT
self.jmespath_expr = None
self.debug = False
self.verbosity = 0
self.http_status_map = {}
def outformat_is_text(self):
return self.output_format == TEXT_FORMAT
def outformat_is_json(self):
return self.output_format == JSON_FORMAT
def outformat_is_unix(self):
return self.output_format == UNIX_FORMAT
def is_verbose(self):
return self.verbosity > 0
def format_option(f):
def callback(ctx, param, value):
if not value:
return
state = ctx.ensure_object(CommandState)
if value == TEXT_FORMAT and state.jmespath_expr:
return
state.output_format = value.lower()
def jmespath_callback(ctx, param, value):
if value is None:
return
state = ctx.ensure_object(CommandState)
state.jmespath_expr = jmespath.compile(value)
if state.output_format == TEXT_FORMAT:
state.output_format = JSON_FORMAT
f = click.option(
"-F",
"--format",
type=click.Choice(
[UNIX_FORMAT, JSON_FORMAT, TEXT_FORMAT], case_sensitive=False
),
help="Output format for stdout. Defaults to text",
expose_value=False,
callback=callback,
)(f)
f = click.option(
"--jmespath",
"--jq",
help=(
"A JMESPath expression to apply to json output. "
"Takes precedence over any specified '--format' and forces "
"the format to be json processed by this expression"
),
expose_value=False,
callback=jmespath_callback,
)(f)
return f
def debug_option(f):
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
warnings.simplefilter("ignore")
return
warnings.simplefilter("default")
state = ctx.ensure_object(CommandState)
state.debug = True
config.setup_logging(level="DEBUG")
return click.option(
"--debug",
is_flag=True,
hidden=True,
expose_value=False,
callback=callback,
is_eager=True,
)(f)
def verbose_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(CommandState)
state.verbosity = value
if value == 0:
warnings.simplefilter("ignore")
if value == 1:
warnings.simplefilter("once")
config.setup_logging(level="ERROR")
if value == 2:
warnings.simplefilter("default")
config.setup_logging(level="INFO")
if value >= 3:
warnings.simplefilter("always")
state.debug = True
config.setup_logging(level="DEBUG")
return click.option(
"--verbose",
"-v",
count=True,
expose_value=False,
callback=callback,
is_eager=True,
help="Control level of output",
)(f)
def map_http_status_option(f):
exit_stat_set = [0, 1] + list(range(50, 100))
def per_val_callback(ctx, value):
if value is None:
return None
state = ctx.ensure_object(CommandState)
try:
pairs = [x for x in (y.strip() for y in value.split(",")) if len(x)]
for http_stat, exit_stat in (pair.split("=") for pair in pairs):
http_stat, exit_stat = int(http_stat), int(exit_stat)
if exit_stat not in exit_stat_set:
raise ValueError()
state.http_status_map[http_stat] = exit_stat
# of args, or results weren't int()-able
except ValueError:
raise click.UsageError(
"--map-http-status must have an argument of the form "
'"INT=INT,INT=INT,..." and values of exit codes must be in '
"0,1,50-99"
)
def callback(ctx, param, value):
for v in value:
per_val_callback(ctx, v)
return click.option(
"--map-http-status",
help=(
"Map HTTP statuses to any of these exit codes: 0,1,50-99. "
'e.g. "404=50,403=51"'
),
expose_value=False,
callback=callback,
multiple=True,
)(f)
| true | true |
f726fe1931108c84f05c321fc08cb81032045981 | 272 | py | Python | accounts/views.py | Monkasen/blog_project | fac6618007d03e4f127f0c0c302a90595054ff12 | [
"CC0-1.0"
] | null | null | null | accounts/views.py | Monkasen/blog_project | fac6618007d03e4f127f0c0c302a90595054ff12 | [
"CC0-1.0"
] | null | null | null | accounts/views.py | Monkasen/blog_project | fac6618007d03e4f127f0c0c302a90595054ff12 | [
"CC0-1.0"
] | null | null | null | from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
class SignUpView(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
| 30.222222 | 54 | 0.797794 | from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
class SignUpView(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
| true | true |
f726fee2b9520f0732bc657c2498044fa21cf593 | 6,213 | py | Python | human_eval.py | nlindqv/pytorch_RVAE | d9e58134965f69aad557fb3bd2478500a51210f8 | [
"MIT"
] | null | null | null | human_eval.py | nlindqv/pytorch_RVAE | d9e58134965f69aad557fb3bd2478500a51210f8 | [
"MIT"
] | null | null | null | human_eval.py | nlindqv/pytorch_RVAE | d9e58134965f69aad557fb3bd2478500a51210f8 | [
"MIT"
] | null | null | null | import argparse
import os
import pandas as pd
import numpy as np
import torch as t
from torch.optim import Adam
import pickle5 as pickle
import json
import random
from sample import sample_with_input, sample_with_beam
from utils.batch_loader import BatchLoader, clean_str
from model.paraphraser import Paraphraser
from model.generator import Generator
from synonym_paraphraser import SynonymParaphraser
def main():
parser = argparse.ArgumentParser(description='Paraphraser')
parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA', help='use cuda (default: False)')
parser.add_argument('--seq-len', default=30, metavar='SL', help='max length of sequence (default: 30)')
parser.add_argument('--ml', type=bool, default=True, metavar='ML', help='sample by maximum likelihood')
args = parser.parse_args()
# Read data
if not os.path.exists('datasets/human_test.csv'):
source_file = 'datasets/test.csv'
source_data = pd.read_csv(source_file)[['question1', 'question2']]
sentence_categories = [[] for _ in range(5)]
for i in range(len(source_data)):
sent = clean_str(source_data['question1'][i])
sent_len = len(sent.split())
if sent_len < 6:
j = 0
elif sent_len < 11:
j = 1
elif sent_len < 16:
j = 2
elif sent_len < 21:
j = 3
else:
j = 4
sentence_categories[j].append([source_data['question1'][i], source_data['question2'][i]])
sample_data = []
for category in sentence_categories:
sample_data += random.sample(category, 20)
source_data = pd.DataFrame(sample_data, columns=['question1', 'question2'])
source_data.to_csv('datasets/human_test.csv')
else:
source_data = pd.read_csv('datasets/human_test_1.csv')[['question1', 'question2']]
# Sample from Guptas original model
batch_loader = BatchLoader()
from model.parameters import Parameters
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)
paraphraser = Paraphraser(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_ori_32', map_location=t.device('cpu')))
samples_ori, target, source_ori = sample_with_input(batch_loader, paraphraser, args,
decoder_only=True,
file_name='datasets/human_test.csv')
ref_items = generate_items(source_ori, target, 'ref')
ori_items = generate_items(source_ori, samples_ori[0], 'ori')
# Sample from Guptas model with two-path-loss
batch_loader = BatchLoader()
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size, use_two_path_loss=True)
paraphraser = Paraphraser(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_tpl_16_32', map_location=t.device('cpu')))
samples_tpl, target, source_tpl = sample_with_input(batch_loader, paraphraser, args,
decoder_only=False,
file_name='datasets/human_test.csv')
tpl_items = generate_items(source_tpl, samples_tpl[0], 'tpl')
# Sample from GAN model
batch_loader = BatchLoader()
from model.parametersGAN import Parameters
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)
paraphraser = Generator(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_generator_gan_140k', map_location=t.device('cpu')))
samples_gan, target, source_gan = sample_with_input(batch_loader, paraphraser, args,
decoder_only=False,
file_name='datasets/human_test.csv')
gan_items = generate_items(source_gan, samples_gan[0], 'gan')
# Sample from synonym model
paraphraser = SynonymParaphraser()
samples_synonym = paraphraser.generate_paraphrases('datasets/human_test.csv')
base_items = generate_items(source_data['question1'], samples_synonym, 'base')
all_items = ref_items + ori_items + tpl_items + gan_items + base_items
eval_results = {'name' : 'Paraphrase Survey Full Ordered', 'items' : all_items}
res = json.dumps(eval_results, ensure_ascii=False)
with open('datasets/human_test_ordered.json', 'w') as f:
f.write(res)
random.shuffle(all_items)
eval_results = {'name' : 'Paraphrase Survey Full Shuffled', 'items' : all_items}
res = json.dumps(eval_results, ensure_ascii=False)
with open('datasets/human_test_shuffled.json', 'w') as f:
f.write(res)
for i in range(10):
eval_results = {'name' : f'Paraphrase Survey Part {i+1}/{10}', 'items' : all_items[i*50:((i+1)*50)-1]}
res = json.dumps(eval_results, ensure_ascii=False)
with open(f'datasets/human_test_p_{i}_{10}.json', 'w') as f:
f.write(res)
def generate_items(original, paraphrase, model):
items = []
for i in range(len(original)):
questions = 'Fråga 1: ' + original[i] + '?<br>Fråga 2: ' + paraphrase[i] + '?'
item = {
'question' : questions,
'required' : True,
'extra' : {'model' : model},
'order': -1,
'answer_sets' : [
{
"type": "radio",
"name": "Fråga 1 är grammatiskt korrekt: ",
"choices": [ "0", "1", "2", "3"]
},
{
"type": "radio",
"name": "Fråga 2 är grammatiskt korrekt: ",
"choices": [ "0", "1", "2", "3"]
},
{
"type": "radio",
"name": "Fråga 2 är betyder samma sak som Fråga 1: ",
"choices": [ "0", "1", "2", "3"]
}]
}
items.append(item)
return items
if __name__ == '__main__':
main()
| 40.607843 | 116 | 0.597779 | import argparse
import os
import pandas as pd
import numpy as np
import torch as t
from torch.optim import Adam
import pickle5 as pickle
import json
import random
from sample import sample_with_input, sample_with_beam
from utils.batch_loader import BatchLoader, clean_str
from model.paraphraser import Paraphraser
from model.generator import Generator
from synonym_paraphraser import SynonymParaphraser
def main():
parser = argparse.ArgumentParser(description='Paraphraser')
parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA', help='use cuda (default: False)')
parser.add_argument('--seq-len', default=30, metavar='SL', help='max length of sequence (default: 30)')
parser.add_argument('--ml', type=bool, default=True, metavar='ML', help='sample by maximum likelihood')
args = parser.parse_args()
if not os.path.exists('datasets/human_test.csv'):
source_file = 'datasets/test.csv'
source_data = pd.read_csv(source_file)[['question1', 'question2']]
sentence_categories = [[] for _ in range(5)]
for i in range(len(source_data)):
sent = clean_str(source_data['question1'][i])
sent_len = len(sent.split())
if sent_len < 6:
j = 0
elif sent_len < 11:
j = 1
elif sent_len < 16:
j = 2
elif sent_len < 21:
j = 3
else:
j = 4
sentence_categories[j].append([source_data['question1'][i], source_data['question2'][i]])
sample_data = []
for category in sentence_categories:
sample_data += random.sample(category, 20)
source_data = pd.DataFrame(sample_data, columns=['question1', 'question2'])
source_data.to_csv('datasets/human_test.csv')
else:
source_data = pd.read_csv('datasets/human_test_1.csv')[['question1', 'question2']]
batch_loader = BatchLoader()
from model.parameters import Parameters
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)
paraphraser = Paraphraser(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_ori_32', map_location=t.device('cpu')))
samples_ori, target, source_ori = sample_with_input(batch_loader, paraphraser, args,
decoder_only=True,
file_name='datasets/human_test.csv')
ref_items = generate_items(source_ori, target, 'ref')
ori_items = generate_items(source_ori, samples_ori[0], 'ori')
batch_loader = BatchLoader()
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size, use_two_path_loss=True)
paraphraser = Paraphraser(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_tpl_16_32', map_location=t.device('cpu')))
samples_tpl, target, source_tpl = sample_with_input(batch_loader, paraphraser, args,
decoder_only=False,
file_name='datasets/human_test.csv')
tpl_items = generate_items(source_tpl, samples_tpl[0], 'tpl')
batch_loader = BatchLoader()
from model.parametersGAN import Parameters
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)
paraphraser = Generator(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_generator_gan_140k', map_location=t.device('cpu')))
samples_gan, target, source_gan = sample_with_input(batch_loader, paraphraser, args,
decoder_only=False,
file_name='datasets/human_test.csv')
gan_items = generate_items(source_gan, samples_gan[0], 'gan')
paraphraser = SynonymParaphraser()
samples_synonym = paraphraser.generate_paraphrases('datasets/human_test.csv')
base_items = generate_items(source_data['question1'], samples_synonym, 'base')
all_items = ref_items + ori_items + tpl_items + gan_items + base_items
eval_results = {'name' : 'Paraphrase Survey Full Ordered', 'items' : all_items}
res = json.dumps(eval_results, ensure_ascii=False)
with open('datasets/human_test_ordered.json', 'w') as f:
f.write(res)
random.shuffle(all_items)
eval_results = {'name' : 'Paraphrase Survey Full Shuffled', 'items' : all_items}
res = json.dumps(eval_results, ensure_ascii=False)
with open('datasets/human_test_shuffled.json', 'w') as f:
f.write(res)
for i in range(10):
eval_results = {'name' : f'Paraphrase Survey Part {i+1}/{10}', 'items' : all_items[i*50:((i+1)*50)-1]}
res = json.dumps(eval_results, ensure_ascii=False)
with open(f'datasets/human_test_p_{i}_{10}.json', 'w') as f:
f.write(res)
def generate_items(original, paraphrase, model):
items = []
for i in range(len(original)):
questions = 'Fråga 1: ' + original[i] + '?<br>Fråga 2: ' + paraphrase[i] + '?'
item = {
'question' : questions,
'required' : True,
'extra' : {'model' : model},
'order': -1,
'answer_sets' : [
{
"type": "radio",
"name": "Fråga 1 är grammatiskt korrekt: ",
"choices": [ "0", "1", "2", "3"]
},
{
"type": "radio",
"name": "Fråga 2 är grammatiskt korrekt: ",
"choices": [ "0", "1", "2", "3"]
},
{
"type": "radio",
"name": "Fråga 2 är betyder samma sak som Fråga 1: ",
"choices": [ "0", "1", "2", "3"]
}]
}
items.append(item)
return items
if __name__ == '__main__':
main()
| true | true |
f726fef4bfca13a95ea4893f0812a453b7a6ce20 | 727 | py | Python | setup.py | krajasek/pyjama | e8cfd7ac07cfca37a73f8060ff28867a0e35909e | [
"MIT"
] | null | null | null | setup.py | krajasek/pyjama | e8cfd7ac07cfca37a73f8060ff28867a0e35909e | [
"MIT"
] | null | null | null | setup.py | krajasek/pyjama | e8cfd7ac07cfca37a73f8060ff28867a0e35909e | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from pyjamaparty.strutils.string_builder import StringBuilder
description = 'Set of casual python utilities'
long_description = StringBuilder('{}, written standing on shoulders of giants.'.format(description))
long_description += ' Tools include a string builder, singleton decorator'
requirements = []
setup(
name='pyjamaparty',
version='0.2',
description=description,
license="MIT",
long_description=str(long_description),
author='Karthik Rajasekaran',
author_email='krajasek@gmail.com',
url="http://github.com/krajasek/pyjamaparty",
install_requires=requirements,
packages=find_packages(exclude=('pyjamaparty.tests',)),
python_requires='>=2.7'
) | 34.619048 | 100 | 0.763411 | from setuptools import setup, find_packages
from pyjamaparty.strutils.string_builder import StringBuilder
description = 'Set of casual python utilities'
long_description = StringBuilder('{}, written standing on shoulders of giants.'.format(description))
long_description += ' Tools include a string builder, singleton decorator'
requirements = []
setup(
name='pyjamaparty',
version='0.2',
description=description,
license="MIT",
long_description=str(long_description),
author='Karthik Rajasekaran',
author_email='krajasek@gmail.com',
url="http://github.com/krajasek/pyjamaparty",
install_requires=requirements,
packages=find_packages(exclude=('pyjamaparty.tests',)),
python_requires='>=2.7'
) | true | true |
f726ff12eef650ff5b72b0281b3558b574845521 | 2,507 | py | Python | app.py | jleclanche/quassel-weblog | 127de4f13f61e424fad4e33c89c288a64cef9b61 | [
"MIT"
] | 5 | 2016-08-08T17:32:52.000Z | 2019-06-04T13:21:18.000Z | app.py | quassel/quassel-weblog | 127de4f13f61e424fad4e33c89c288a64cef9b61 | [
"MIT"
] | null | null | null | app.py | quassel/quassel-weblog | 127de4f13f61e424fad4e33c89c288a64cef9b61 | [
"MIT"
] | null | null | null | import hashlib
import re
from datetime import date, timedelta
from flask import Flask, render_template, request, abort
from jinja2.utils import urlize
from sqlalchemy import asc, desc
from sqlalchemy.orm import joinedload
from quassel import quassel_session, Message, Buffer, Sender, Network
import settings
app = Flask(__name__)
app.config["PROPAGATE_EXCEPTIONS"] = True
## Quassel Connection
session = quassel_session(settings.uri)
def hash_nick(nick):
hash = hashlib.sha1(nick.encode("utf-8"))
return int(hash.hexdigest(), 16)
def process_message(message):
# NOTE: Working around jinja2.utils.urlize being far too greedy on matches
if not message:
return ""
message = message.replace("\x0f", " \x0f")
message = urlize(message)
message = message.replace(" \x0f", "\x0f")
message = re.sub("\x03(\\d\\d)", r'<span class="color\1">', message)
message = message.replace("\x03", "</span>")
message = message.replace("\x0f", "</b></em></u></span>") # Nasty.
while "\x02" in message:
message = message.replace("\x02", "<b>", 1)
message = message.replace("\x02", "</b>", 1)
while "\x1d" in message:
message = message.replace("\x1d", "<em>", 1)
message = message.replace("\x1d", "</em>", 1)
while "\x1f" in message:
message = message.replace("\x1f", "<u>", 1)
message = message.replace("\x1f", "</u>", 1)
return message
@app.route("/<name>/")
def channel_index(name):
if name not in settings.channels:
abort(404)
days = request.args.get("days", "")
if days.isdigit():
days = min(int(days), 200)
else:
days = settings.days
query = session.query(Message).join(Sender)
query = query.order_by(asc(Message.time))
query = query.filter(Message.time >= date.today() - timedelta(days))
#query = query.options(joinedload(Message.sender))
#query = query.options(joinedload(Message.buffer))
query = query.join(Message.buffer)
query = query.filter(Buffer.userid == 1)
channel_name = "#" + name # XXX
query = query.filter(Buffer.name == channel_name)
nick = request.args.get("nick")
if nick:
query = query.filter(Sender.name.startswith(nick))
search = request.args.get("search")
if search:
query = query.filter(Message.message.contains(search))
context = {
"channel": channel_name,
"highlight": request.args.get("highlight", "").lower(),
"messages": list(query),
"hash": hash_nick,
"process_message": process_message,
}
return render_template("backlog.html", **context)
if __name__ == "__main__":
app.debug = True
app.run()
session.close()
| 28.168539 | 75 | 0.691264 | import hashlib
import re
from datetime import date, timedelta
from flask import Flask, render_template, request, abort
from jinja2.utils import urlize
from sqlalchemy import asc, desc
from sqlalchemy.orm import joinedload
from quassel import quassel_session, Message, Buffer, Sender, Network
import settings
app = Flask(__name__)
app.config["PROPAGATE_EXCEPTIONS"] = True
ession(settings.uri)
def hash_nick(nick):
hash = hashlib.sha1(nick.encode("utf-8"))
return int(hash.hexdigest(), 16)
def process_message(message):
if not message:
return ""
message = message.replace("\x0f", " \x0f")
message = urlize(message)
message = message.replace(" \x0f", "\x0f")
message = re.sub("\x03(\\d\\d)", r'<span class="color\1">', message)
message = message.replace("\x03", "</span>")
message = message.replace("\x0f", "</b></em></u></span>")
while "\x02" in message:
message = message.replace("\x02", "<b>", 1)
message = message.replace("\x02", "</b>", 1)
while "\x1d" in message:
message = message.replace("\x1d", "<em>", 1)
message = message.replace("\x1d", "</em>", 1)
while "\x1f" in message:
message = message.replace("\x1f", "<u>", 1)
message = message.replace("\x1f", "</u>", 1)
return message
@app.route("/<name>/")
def channel_index(name):
if name not in settings.channels:
abort(404)
days = request.args.get("days", "")
if days.isdigit():
days = min(int(days), 200)
else:
days = settings.days
query = session.query(Message).join(Sender)
query = query.order_by(asc(Message.time))
query = query.filter(Message.time >= date.today() - timedelta(days))
query = query.join(Message.buffer)
query = query.filter(Buffer.userid == 1)
channel_name = "#" + name
query = query.filter(Buffer.name == channel_name)
nick = request.args.get("nick")
if nick:
query = query.filter(Sender.name.startswith(nick))
search = request.args.get("search")
if search:
query = query.filter(Message.message.contains(search))
context = {
"channel": channel_name,
"highlight": request.args.get("highlight", "").lower(),
"messages": list(query),
"hash": hash_nick,
"process_message": process_message,
}
return render_template("backlog.html", **context)
if __name__ == "__main__":
app.debug = True
app.run()
session.close()
| true | true |
f727011bf8d2cc213c21de27b98b3b27c47d249a | 520 | py | Python | tests/nnapi/specs/V1_2/reduce_any_2D_nnfw.mod.py | bogus-sudo/ONE-1 | 7052a817eff661ec2854ed2e7ee0de5e8ba82b55 | [
"Apache-2.0"
] | 255 | 2020-05-22T07:45:29.000Z | 2022-03-29T23:58:22.000Z | tests/nnapi/specs/V1_2/reduce_any_2D_nnfw.mod.py | bogus-sudo/ONE-1 | 7052a817eff661ec2854ed2e7ee0de5e8ba82b55 | [
"Apache-2.0"
] | 5,102 | 2020-05-22T07:48:33.000Z | 2022-03-31T23:43:39.000Z | tests/nnapi/specs/V1_2/reduce_any_2D_nnfw.mod.py | bogus-sudo/ONE-1 | 7052a817eff661ec2854ed2e7ee0de5e8ba82b55 | [
"Apache-2.0"
] | 120 | 2020-05-22T07:51:08.000Z | 2022-02-16T19:08:05.000Z | # model
model = Model()
i1 = Input("input", "TENSOR_BOOL8", "{3, 4}")
axis = Int32Scalar("axis", 1)
keepDims = False
out1 = Output("output", "TENSOR_BOOL8", "{3}")
model = model.Operation("REDUCE_ANY", i1, axis, keepDims).To(out1)
# Example 1. Input in operand 0, 1
input0 = {i1: # input 0
[False, False, False, False,
False, True, False, False,
True, False, True, False]}
output0 = {out1: # output 0
[False, True, True]}
# Instantiate an example
Example((input0, output0))
| 26 | 66 | 0.611538 |
model = Model()
i1 = Input("input", "TENSOR_BOOL8", "{3, 4}")
axis = Int32Scalar("axis", 1)
keepDims = False
out1 = Output("output", "TENSOR_BOOL8", "{3}")
model = model.Operation("REDUCE_ANY", i1, axis, keepDims).To(out1)
input0 = {i1:
[False, False, False, False,
False, True, False, False,
True, False, True, False]}
output0 = {out1:
[False, True, True]}
Example((input0, output0))
| true | true |
f727017762f29818a9fcaf162bb13d318487b8a6 | 1,219 | py | Python | var/spack/repos/builtin/packages/relax/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/relax/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/relax/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Relax(CMakePackage):
"""A set of Reflex libraries for the most common used general data types in
the LHC Computing Grid"""
homepage = "https://twiki.cern.ch/twiki/bin/view/LCG/RELAX"
url = "http://lcgpackages.web.cern.ch/lcgpackages/tarFiles/sources/RELAX-1.tar.gz"
tags = ['hep']
version('root6', sha256='1d24b1a0884bbe99d60f7d02fea45d59695c158ab5e53516ac3fb780eb460bb4')
depends_on('clhep')
depends_on('gsl')
depends_on('hepmc@:2')
depends_on('root@6.0.0:')
def cmake_args(self):
spec = self.spec
cxxstd = self.spec['root'].variants['cxxstd'].value
hepmc_lib = spec['hepmc'].prefix.lib.join('libHepMC.so')
args = [
'-DCMAKE_CXX_STANDARD={0}'.format(cxxstd),
'-DROOT_BINARY_PATH={0}'.format(spec['root'].prefix.bin),
'-DHEPMC_INCLUDE_DIR={0}'.format(spec['hepmc'].prefix.include),
'-DHEPMC_LIBRARIES={0}'.format(hepmc_lib)
]
return args
| 32.078947 | 95 | 0.656276 |
from spack import *
class Relax(CMakePackage):
homepage = "https://twiki.cern.ch/twiki/bin/view/LCG/RELAX"
url = "http://lcgpackages.web.cern.ch/lcgpackages/tarFiles/sources/RELAX-1.tar.gz"
tags = ['hep']
version('root6', sha256='1d24b1a0884bbe99d60f7d02fea45d59695c158ab5e53516ac3fb780eb460bb4')
depends_on('clhep')
depends_on('gsl')
depends_on('hepmc@:2')
depends_on('root@6.0.0:')
def cmake_args(self):
spec = self.spec
cxxstd = self.spec['root'].variants['cxxstd'].value
hepmc_lib = spec['hepmc'].prefix.lib.join('libHepMC.so')
args = [
'-DCMAKE_CXX_STANDARD={0}'.format(cxxstd),
'-DROOT_BINARY_PATH={0}'.format(spec['root'].prefix.bin),
'-DHEPMC_INCLUDE_DIR={0}'.format(spec['hepmc'].prefix.include),
'-DHEPMC_LIBRARIES={0}'.format(hepmc_lib)
]
return args
| true | true |
f72701a8444dcb76142a4a452fafb56971989631 | 4,930 | py | Python | Fashion_Test.py | denis19973/Keras-RFCN | e62670c2e01ac1e942f513d324642cf8d6aee368 | [
"MIT"
] | 88 | 2018-05-04T08:04:02.000Z | 2022-01-05T02:57:28.000Z | Fashion_Test.py | denis19973/Keras-RFCN | e62670c2e01ac1e942f513d324642cf8d6aee368 | [
"MIT"
] | 16 | 2018-07-03T11:58:51.000Z | 2021-07-12T04:49:05.000Z | Fashion_Test.py | mitulrm/FaceRFCN | 5e1fdaf197b3a93c22a82d9476a3f9a1c804e398 | [
"MIT"
] | 33 | 2018-05-04T08:02:32.000Z | 2022-01-09T14:39:06.000Z | """
Keras RFCN
Copyright (c) 2018
Licensed under the MIT License (see LICENSE for details)
Written by parap1uie-s@github.com
"""
'''
This is a demo to Eval a RFCN model with DeepFashion Dataset
http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion.html
'''
from KerasRFCN.Model.Model import RFCN_Model
from KerasRFCN.Config import Config
import KerasRFCN.Utils
import os
from keras.preprocessing import image
import pickle
import numpy as np
import argparse
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class RFCNNConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "Fashion"
# Backbone model
# choose one from ['resnet50', 'resnet101', 'resnet50_dilated', 'resnet101_dilated']
BACKBONE = "resnet101"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
C = 1 + 46 # background + 2 tags
NUM_CLASSES = C
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 640
IMAGE_MAX_DIM = 768
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) # anchor side in pixels
# Use same strides on stage 4-6 if use dilated resnet of DetNet
# Like BACKBONE_STRIDES = [4, 8, 16, 16, 16]
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 200
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
RPN_NMS_THRESHOLD = 0.7
DETECTION_MIN_CONFIDENCE = 0.4
POOL_SIZE = 7
def Test(model, loadpath, savepath):
assert not loadpath == savepath, "loadpath should'n same with savepath"
model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
model.load_weights(model_path, by_name=True)
print("Loading weights from ", model_path)
if os.path.isdir(loadpath):
for idx, imgname in enumerate(os.listdir(loadpath)):
if not imgname.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
print(imgname)
imageoriChannel = np.array(plt.imread( os.path.join(loadpath, imgname) )) / 255.0
img = image.img_to_array( image.load_img(os.path.join(loadpath, imgname)) )
TestSinglePic(img, imageoriChannel, model, savepath=savepath, imgname=imgname)
elif os.path.isfile(loadpath):
if not loadpath.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
print("not image file!")
return
print(loadpath)
imageoriChannel = np.array(plt.imread( loadpath )) / 255.0
img = image.img_to_array( image.load_img(loadpath) )
(filename,extension) = os.path.splitext(loadpath)
TestSinglePic(img, imageoriChannel, model, savepath=savepath, imgname=filename)
def TestSinglePic(image, image_ori, model, savepath, imgname):
r = model.detect([image], verbose=1)[0]
print(r)
def get_ax(rows=1, cols=1, size=8):
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
ax = get_ax(1)
assert not savepath == "", "empty save path"
assert not imgname == "", "empty image file name"
for box in r['rois']:
y1, x1, y2, x2 = box
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor="red", facecolor='none')
ax.add_patch(p)
ax.imshow(image_ori)
plt.savefig(os.path.join(savepath, imgname),bbox_inches='tight')
plt.clf()
if __name__ == '__main__':
ROOT_DIR = os.getcwd()
parser = argparse.ArgumentParser()
parser.add_argument('--loadpath', required=False,
default="images/",
metavar="evaluate images loadpath",
help="evaluate images loadpath")
parser.add_argument('--savepath', required=False,
default="result/",
metavar="evaluate images savepath",
help="evaluate images savepath")
config = RFCNNConfig()
args = parser.parse_args()
model = RFCN_Model(mode="inference", config=config,
model_dir=os.path.join(ROOT_DIR, "logs") )
Test(model, args.loadpath, args.savepath) | 35.214286 | 96 | 0.650913 |
from KerasRFCN.Model.Model import RFCN_Model
from KerasRFCN.Config import Config
import KerasRFCN.Utils
import os
from keras.preprocessing import image
import pickle
import numpy as np
import argparse
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class RFCNNConfig(Config):
NAME = "Fashion"
BACKBONE = "resnet101"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
C = 1 + 46
NUM_CLASSES = C
IMAGE_MIN_DIM = 640
IMAGE_MAX_DIM = 768
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
TRAIN_ROIS_PER_IMAGE = 200
STEPS_PER_EPOCH = 100
VALIDATION_STEPS = 5
RPN_NMS_THRESHOLD = 0.7
DETECTION_MIN_CONFIDENCE = 0.4
POOL_SIZE = 7
def Test(model, loadpath, savepath):
assert not loadpath == savepath, "loadpath should'n same with savepath"
model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
model.load_weights(model_path, by_name=True)
print("Loading weights from ", model_path)
if os.path.isdir(loadpath):
for idx, imgname in enumerate(os.listdir(loadpath)):
if not imgname.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
print(imgname)
imageoriChannel = np.array(plt.imread( os.path.join(loadpath, imgname) )) / 255.0
img = image.img_to_array( image.load_img(os.path.join(loadpath, imgname)) )
TestSinglePic(img, imageoriChannel, model, savepath=savepath, imgname=imgname)
elif os.path.isfile(loadpath):
if not loadpath.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
print("not image file!")
return
print(loadpath)
imageoriChannel = np.array(plt.imread( loadpath )) / 255.0
img = image.img_to_array( image.load_img(loadpath) )
(filename,extension) = os.path.splitext(loadpath)
TestSinglePic(img, imageoriChannel, model, savepath=savepath, imgname=filename)
def TestSinglePic(image, image_ori, model, savepath, imgname):
r = model.detect([image], verbose=1)[0]
print(r)
def get_ax(rows=1, cols=1, size=8):
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
ax = get_ax(1)
assert not savepath == "", "empty save path"
assert not imgname == "", "empty image file name"
for box in r['rois']:
y1, x1, y2, x2 = box
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor="red", facecolor='none')
ax.add_patch(p)
ax.imshow(image_ori)
plt.savefig(os.path.join(savepath, imgname),bbox_inches='tight')
plt.clf()
if __name__ == '__main__':
ROOT_DIR = os.getcwd()
parser = argparse.ArgumentParser()
parser.add_argument('--loadpath', required=False,
default="images/",
metavar="evaluate images loadpath",
help="evaluate images loadpath")
parser.add_argument('--savepath', required=False,
default="result/",
metavar="evaluate images savepath",
help="evaluate images savepath")
config = RFCNNConfig()
args = parser.parse_args()
model = RFCN_Model(mode="inference", config=config,
model_dir=os.path.join(ROOT_DIR, "logs") )
Test(model, args.loadpath, args.savepath) | true | true |
f72701ca82258a63b2f05eaaa0b57d341079e90e | 13,760 | py | Python | mhdb/write_ttl.py | charlie42/mhdb-tables2turtles | b289cc79b85e7c5d63bdf1b718e4e1d7bf188864 | [
"Apache-2.0"
] | 1 | 2020-04-15T14:22:14.000Z | 2020-04-15T14:22:14.000Z | mhdb/write_ttl.py | charlie42/mhdb-tables2turtles | b289cc79b85e7c5d63bdf1b718e4e1d7bf188864 | [
"Apache-2.0"
] | 3 | 2020-03-03T17:49:04.000Z | 2020-03-09T18:40:26.000Z | mhdb/write_ttl.py | charlie42/mhdb-tables2turtles | b289cc79b85e7c5d63bdf1b718e4e1d7bf188864 | [
"Apache-2.0"
] | 1 | 2020-04-20T15:05:42.000Z | 2020-04-20T15:05:42.000Z | #!/usr/bin/env python3
"""
This program contains generic functions to build a Turtle (Terse RDF Triple Language) document.
Authors:
- Arno Klein, 2017-2020 (arno@childmind.org) http://binarybottle.com
- Jon Clucas, 2017–2018 (jon.clucas@childmind.org)
Copyright 2020, Child Mind Institute (http://childmind.org), Apache v2.0 License
"""
import os
import sys
top_dir = os.path.abspath(os.path.join(
(__file__),
os.pardir,
os.pardir
))
if top_dir not in sys.path:
sys.path.append(top_dir)
import numpy as np
def language_string(s, lang="en"):
"""
Function to encode a literal as being in a specific language.
Parameters
----------
s : string
lang : string
ISO character code, default="en"
Returns
-------
s : string
triple quoted Turtle literal with language encoding
Example
-------
>>> print(language_string("Canada goose"))
\"""Canada goose\"""@en
"""
return(
"\"\"\"{0}\"\"\"@{1}".format(
return_string(
s,
[
'"'
],
[
"'"
]
),
lang
)
)
def return_string(input_string, replace=[], replace_with=[]):
"""
Return a stripped string with optional character replacements.
Parameters
----------
input_string : string
arbitrary string
replace : list of strings
strings to substitute
replace_with : list of strings
strings with which to substitute 'replace' strings
Returns
-------
output_string : string
stripped input_string
"""
if input_string:
if not isinstance(input_string, str):
input_string = str(input_string)
output_string = input_string.replace(
"\n",
" "
).replace(
"\"",
"\\\""
).strip()
if replace:
if len(replace) == len(replace_with):
for i, s in enumerate(replace):
output_string = output_string.replace(s, replace_with[i])
return output_string
else:
raise Exception("replace and replace_with should be the same length.")
else:
return output_string
else:
return ""
def create_label(input_string):
"""
Clean up a string and create a corresponding (shortened) label.
Parameters
----------
input_string : string
arbitrary string
Returns
-------
output_string : string
stripped input_string
label_string : string
alphanumeric characters of input_string
"""
from mhdb.spreadsheet_io import return_string
from mhdb.spreadsheet_io import convert_string_to_label
if input_string:
if isinstance(input_string, str):
output_string = return_string(input_string,
replace=['"', '\n'],
replace_with=['', ''])
if output_string:
label_string = convert_string_to_label(output_string)
return output_string, label_string
else:
return '', ''
else:
raise Exception('input_string is not a string!')
else:
raise Exception('input_string is None!')
def convert_string_to_label(input_string, label_type='delimited'):
"""
Remove all non-alphanumeric characters from a string.
Parameters
----------
input_string : string
input string
label_type: string
'PascalCase', 'camelCase', or 'delimited'
('delimited' uses '_' delimiters and keeps hyphens)
Returns
-------
output_string : string
output string
"""
def toPascal(s):
"""
Usage: toPascal("WRITE this in pascalcase")
'WriteThisInPascalCase'
"""
return ''.join(x for x in s.title() if not x.isspace())
def toCamel(s):
"""
Usage: toCamel("WRITE this in camelcase")
'writeThisInCamelcase'
(from: https://stackoverflow.com/questions/8347048/
how-to-convert-string-to-title-case-in-python)
"""
ret = s.split(' ')
return ret[0].lower() + \
''.join(x.title() for x in ret[1:] if not x.isspace())
def toDelimit(s):
"""
Usage: toDelimit("WRITE this-in delimited")
'WRITE_this-in_delimited'
"""
while " " in s:
s = s.replace(" ", "_")
while "__" in s:
s = s.replace("__", "_")
s = s.replace("_-_", "-")
while "--" in s:
s = s.replace("--", "-")
return s
# input_string = return_string(input_string,
# replace=['"', '\n'],
# replace_with=['', ''])
if input_string:
if label_type == 'PascalCase':
output_string = toPascal(input_string)
elif label_type == 'camelCase':
output_string = toCamel(input_string)
elif label_type == 'delimited':
output_string = toDelimit(input_string)
else:
Exception('label_type input is incorrect')
keep_chars = ('-', '_')
output_string = "".join(c for c in str(output_string) if c.isalnum()
or c in keep_chars).rstrip()
#output_string = ''.join(x for x in output_string if not x.isspace())
return output_string
else:
raise Exception('"{0}" is not a string!'.format(input_string))
def check_iri(iri, label_type='delimited'):
"""
Function to format IRIs by type, such as <iri> or prefix:iri
Parameters
---------
iri: string
label_type: string
'PascalCase', 'camelCase', or 'delimited'
('delimited' uses '_' delimiters and keeps hyphens)
Removed:
prefixes: set of 2-or-3-tuples
prefixes={("mhdb", "mhdb-states", "mhdb-disorders", "mhdb-resources",
"mhdb-assessments", "mhdb-measures")}
Returns
-------
iri: string
"""
#prefix_strings = {"","_"} if not prefixes else {
# "",
# "_",
# *[prefix[0] for prefix in prefixes]
#}
iri = str(iri).strip()
if ":" in iri and not [x for x in iri if x.isspace()]:
if iri.endswith(":"):
return check_iri(iri[:-1], label_type) #, prefixes)
elif ":/" in iri and \
not iri.startswith('<') and not iri.endswith('>'):
return "<{0}>".format(convert_string_to_label(iri, label_type))
# elif iri.split(":")[0] in prefix_strings:
# return iri
else:
return iri
else:
return ":" + convert_string_to_label(iri, label_type)
def turtle_from_dict(ttl_dict):
"""
Function to convert a dictionary to a Terse Triple Language string
Parameters
----------
ttl_dict: dictionary
key: string
RDF subject
value: dictionary
key: string
RDF predicate
value: {string}
set of RDF objects
Returns
-------
ttl_string: str
ttl
Example
-------
>>> turtle_from_dict({
... "duck": {
... "continues": {
... "sitting"
... }
... },
... "goose": {
... "begins": {
... "chasing"
... }
... }
... })
'duck continues sitting .\\n\\ngoose begins chasing .'
"""
x = [
":None",
":nan",
"nan",
np.nan,
None
]
return(
"\n\n".join([
"{0} {1} .".format(
subject,
" ;\n\t".join([
"{0} {1}".format(
predicate,
object
) for predicate in ttl_dict[
subject
] for object in ttl_dict[
subject
][
predicate
]
])
) for subject in ttl_dict
])
)
def write_about_statement(subject, predicate, object, predicates):
"""
Function to write one or more rdf statements in terse triple format.
Parameters
----------
subject: string
subject of this statement
predicate: string
predicate of this statement
object: string
object of this statement
predicates: iterable of 2-tuples
predicate: string
nth property
object: string
nth object
Returns
-------
ttl_string: string
Turtle string
Example
-------
>>> statement = {"duck": {"continues": {"sitting"}}}
>>> predicates = {
... ("source", '"Duck Duck Goose"'),
... ("statementType", "role")
... }
>>> for subject in statement:
... for predicate in statement[subject]:
... for object in statement[subject][predicate]:
... print(len(write_about_statement(
... subject, predicate, object, predicates
... )))
168
"""
return(
write_ttl(
"_:{0}".format(create_label("_".join([
subject,
predicate,
object
]))),
[
("rdf:type", "rdf:Statement"),
("rdf:subject", subject),
("rdf:predicate", predicate),
("rdf:object", object),
*predicates
]
)
)
def write_header(base_uri, base_prefix, version, label, comment, prefixes):
"""
Print out the beginning of an RDF text file.
Parameters
----------
base_uri : string
base URI
base_prefix : string
base prefix
version : string
version
label : string
label
comment : string
comment
prefixes : list
list of 2-or-3-tuples of TTL prefix strings and prefix IRIs
each tuple is
[0] a prefix string
[1] an iri string
[2] an optional import URL
eg, ("owl", "http://www.w3.org/2002/07/owl#")
REMOVED:
imports : Boolean, optional, default=False
import external ontologies?
Returns
-------
header : string
owl header
"""
header = write_header_prefixes(base_uri, base_prefix, prefixes)
header = """{4}<{0}> a owl:Ontology ;
owl:versionIRI <{0}/{1}> ;
owl:versionInfo "{1}"^^rdfs:Literal ;
rdfs:label "{2}"^^rdfs:Literal ;
rdfs:comment \"\"\"{3}\"\"\"@en .
""".format(base_uri, version, label, comment, header)
return header
def write_header_prefixes(base_uri, base_prefix, prefixes):
"""
Write turtle-formatted header prefix string for list of (prefix, iri) tuples.
Parameter
---------
base_uri : string
base URI
base_prefix : string
base prefix
prefixes: list of 2 or 3-tuples
each tuple is
[0] a prefix string
[1] an iri string
[2] an optional import URL
REMOVED:
imports : Boolean, optional, default=False
import external ontologies?
Returns
-------
header_prefix: string
"""
header_prefix = ""
for prefix in prefixes:
header_prefix="""{0}PREFIX {1}: <{2}> \n""".format(
header_prefix,
prefix[0],
prefix[1]
)
#header_prefix = """{0}\nBASE <{1}#> \n""".format(
# header_prefix, base_uri
#)
header_prefix = """{0}\nPREFIX : <{1}#> \n""".format(
header_prefix, base_uri
)
# if imports:
# header_prefix = """{0}\n<> owl:imports {1} .\n\n""".format(
# header_prefix,
# " ,\n\t".join(
# [check_iri(prefix[1])
# if ((len(prefix) < 3) or (isinstance(prefix[2], float))
# ) else check_iri(prefix[2]) for prefix in prefixes if (
# (prefix[0] not in [base_prefix]) and
# (prefix[1] not in [base_uri])
# )
# ]
# )
# )
return header_prefix
def write_ttl(subject, predicates, common_statements=None):
"""
Function to write one or more rdf statements in terse triple format.
Parameters
----------
subject: string
subject of all triples in these statements
predicates: iterable of 2-tuples
statements about subject
predicate: string
nth property
object: string
nth object
common_statements: iterable of 2-tuples, optional
statements about all previous statements
predicate: string
nth property
object: string
nth object
Returns
-------
ttl_string: string
Turtle string
"""
ttl_string = ""
if common_statements:
ttl_string = "\n\n".join([
write_about_statement(
subject,
predicate[0],
predicate[1],
common_statements
) for predicate in predicates
])
ttl_string = "{0}\n\n".format(ttl_string) if len(ttl_string) else ""
ttl_string = "".join([
ttl_string,
"{0} {1} .".format(
subject,
" ;\n\t".join([
" ".join([
predicate[0],
predicate[1]
]) for predicate in predicates
])
)
])
return(ttl_string)
| 25.063752 | 95 | 0.50952 |
import os
import sys
top_dir = os.path.abspath(os.path.join(
(__file__),
os.pardir,
os.pardir
))
if top_dir not in sys.path:
sys.path.append(top_dir)
import numpy as np
def language_string(s, lang="en"):
return(
"\"\"\"{0}\"\"\"@{1}".format(
return_string(
s,
[
'"'
],
[
"'"
]
),
lang
)
)
def return_string(input_string, replace=[], replace_with=[]):
if input_string:
if not isinstance(input_string, str):
input_string = str(input_string)
output_string = input_string.replace(
"\n",
" "
).replace(
"\"",
"\\\""
).strip()
if replace:
if len(replace) == len(replace_with):
for i, s in enumerate(replace):
output_string = output_string.replace(s, replace_with[i])
return output_string
else:
raise Exception("replace and replace_with should be the same length.")
else:
return output_string
else:
return ""
def create_label(input_string):
from mhdb.spreadsheet_io import return_string
from mhdb.spreadsheet_io import convert_string_to_label
if input_string:
if isinstance(input_string, str):
output_string = return_string(input_string,
replace=['"', '\n'],
replace_with=['', ''])
if output_string:
label_string = convert_string_to_label(output_string)
return output_string, label_string
else:
return '', ''
else:
raise Exception('input_string is not a string!')
else:
raise Exception('input_string is None!')
def convert_string_to_label(input_string, label_type='delimited'):
def toPascal(s):
return ''.join(x for x in s.title() if not x.isspace())
def toCamel(s):
ret = s.split(' ')
return ret[0].lower() + \
''.join(x.title() for x in ret[1:] if not x.isspace())
def toDelimit(s):
while " " in s:
s = s.replace(" ", "_")
while "__" in s:
s = s.replace("__", "_")
s = s.replace("_-_", "-")
while "--" in s:
s = s.replace("--", "-")
return s
# input_string = return_string(input_string,
# replace=['"', '\n'],
# replace_with=['', ''])
if input_string:
if label_type == 'PascalCase':
output_string = toPascal(input_string)
elif label_type == 'camelCase':
output_string = toCamel(input_string)
elif label_type == 'delimited':
output_string = toDelimit(input_string)
else:
Exception('label_type input is incorrect')
keep_chars = ('-', '_')
output_string = "".join(c for c in str(output_string) if c.isalnum()
or c in keep_chars).rstrip()
#output_string = ''.join(x for x in output_string if not x.isspace())
return output_string
else:
raise Exception('"{0}" is not a string!'.format(input_string))
def check_iri(iri, label_type='delimited'):
#prefix_strings = {"","_"} if not prefixes else {
# "",
# "_",
# *[prefix[0] for prefix in prefixes]
#}
iri = str(iri).strip()
if ":" in iri and not [x for x in iri if x.isspace()]:
if iri.endswith(":"):
return check_iri(iri[:-1], label_type) #, prefixes)
elif ":/" in iri and \
not iri.startswith('<') and not iri.endswith('>'):
return "<{0}>".format(convert_string_to_label(iri, label_type))
# elif iri.split(":")[0] in prefix_strings:
# return iri
else:
return iri
else:
return ":" + convert_string_to_label(iri, label_type)
def turtle_from_dict(ttl_dict):
x = [
":None",
":nan",
"nan",
np.nan,
None
]
return(
"\n\n".join([
"{0} {1} .".format(
subject,
" ;\n\t".join([
"{0} {1}".format(
predicate,
object
) for predicate in ttl_dict[
subject
] for object in ttl_dict[
subject
][
predicate
]
])
) for subject in ttl_dict
])
)
def write_about_statement(subject, predicate, object, predicates):
return(
write_ttl(
"_:{0}".format(create_label("_".join([
subject,
predicate,
object
]))),
[
("rdf:type", "rdf:Statement"),
("rdf:subject", subject),
("rdf:predicate", predicate),
("rdf:object", object),
*predicates
]
)
)
def write_header(base_uri, base_prefix, version, label, comment, prefixes):
header = write_header_prefixes(base_uri, base_prefix, prefixes)
header = """{4}<{0}> a owl:Ontology ;
owl:versionIRI <{0}/{1}> ;
owl:versionInfo "{1}"^^rdfs:Literal ;
rdfs:label "{2}"^^rdfs:Literal ;
rdfs:comment \"\"\"{3}\"\"\"@en .
""".format(base_uri, version, label, comment, header)
return header
def write_header_prefixes(base_uri, base_prefix, prefixes):
header_prefix = ""
for prefix in prefixes:
header_prefix="""{0}PREFIX {1}: <{2}> \n""".format(
header_prefix,
prefix[0],
prefix[1]
)
#header_prefix = """{0}\nBASE <{1}#> \n""".format(
# header_prefix, base_uri
#)
header_prefix = """{0}\nPREFIX : <{1}#> \n""".format(
header_prefix, base_uri
)
# if imports:
# header_prefix = """{0}\n<> owl:imports {1} .\n\n""".format(
# header_prefix,
# " ,\n\t".join(
# [check_iri(prefix[1])
# if ((len(prefix) < 3) or (isinstance(prefix[2], float))
# ) else check_iri(prefix[2]) for prefix in prefixes if (
# (prefix[0] not in [base_prefix]) and
# (prefix[1] not in [base_uri])
# )
# ]
# )
# )
return header_prefix
def write_ttl(subject, predicates, common_statements=None):
ttl_string = ""
if common_statements:
ttl_string = "\n\n".join([
write_about_statement(
subject,
predicate[0],
predicate[1],
common_statements
) for predicate in predicates
])
ttl_string = "{0}\n\n".format(ttl_string) if len(ttl_string) else ""
ttl_string = "".join([
ttl_string,
"{0} {1} .".format(
subject,
" ;\n\t".join([
" ".join([
predicate[0],
predicate[1]
]) for predicate in predicates
])
)
])
return(ttl_string)
| true | true |
f72703a3d0c01193efa4ecd4a94ed6ea309de133 | 3,106 | py | Python | Question_prepare/answers/answer_rotation.py | KuKuXia/DeepLearningMugenKnock | 979cf05e65e352da36453337380a418a2a2fdccb | [
"MIT"
] | null | null | null | Question_prepare/answers/answer_rotation.py | KuKuXia/DeepLearningMugenKnock | 979cf05e65e352da36453337380a418a2a2fdccb | [
"MIT"
] | null | null | null | Question_prepare/answers/answer_rotation.py | KuKuXia/DeepLearningMugenKnock | 979cf05e65e352da36453337380a418a2a2fdccb | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
np.random.seed(0)
num_classes = 2
img_height, img_width = 64, 64
CLS = ['akahara', 'madara']
# get train data
def data_load(path, hf=False, vf=False, rot=None):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot is not None:
angle = rot
scale = 1
# show
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
plt.subplot(h_num, w_num, count)
plt.axis('off')
plt.imshow(x)
plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(x)
ts.append(t)
paths.append(path)
# show
count += 1
plt.subplot(h_num, w_num, count)
plt.imshow(_x)
plt.axis('off')
plt.title("angle={}".format(angle))
angle += rot
plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
xs, ts, paths = data_load("../Dataset/train/images/", hf=True, vf=True, rot=1)
mb = 3
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(10):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
print(mb_ind)
| 26.775862 | 87 | 0.433033 | import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
np.random.seed(0)
num_classes = 2
img_height, img_width = 64, 64
CLS = ['akahara', 'madara']
def data_load(path, hf=False, vf=False, rot=None):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot is not None:
angle = rot
scale = 1
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
plt.subplot(h_num, w_num, count)
plt.axis('off')
plt.imshow(x)
plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(x)
ts.append(t)
paths.append(path)
count += 1
plt.subplot(h_num, w_num, count)
plt.imshow(_x)
plt.axis('off')
plt.title("angle={}".format(angle))
angle += rot
plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
xs, ts, paths = data_load("../Dataset/train/images/", hf=True, vf=True, rot=1)
mb = 3
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(10):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
print(mb_ind)
| true | true |
f72703e878cca7379abbf6d41d3989ee572b5ae9 | 283 | py | Python | app/user/urls.py | Eslamhathout/restuarant_reservation_api | 67292e95eed13b5bee423a443180230b9de4c036 | [
"MIT"
] | null | null | null | app/user/urls.py | Eslamhathout/restuarant_reservation_api | 67292e95eed13b5bee423a443180230b9de4c036 | [
"MIT"
] | null | null | null | app/user/urls.py | Eslamhathout/restuarant_reservation_api | 67292e95eed13b5bee423a443180230b9de4c036 | [
"MIT"
] | null | null | null | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('create/', views.createUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
] | 31.444444 | 67 | 0.689046 | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('create/', views.createUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
] | true | true |
f7270451a42cc428358813a37592ce306c2d3a9e | 263 | py | Python | courses/templatetags/course_tags.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | courses/templatetags/course_tags.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | courses/templatetags/course_tags.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.filter
def model_name(obj):
try:
return obj._meta.model_name
except AttributeError:
return None
@register.filter
def filter_course_id(obj, filter_):
return obj.filter(course_id=filter_) | 18.785714 | 37 | 0.790875 | from django import template
register = template.Library()
@register.filter
def model_name(obj):
try:
return obj._meta.model_name
except AttributeError:
return None
@register.filter
def filter_course_id(obj, filter_):
return obj.filter(course_id=filter_) | true | true |
f72704eca60cfb15f7653086792eaae9dad19395 | 21,810 | py | Python | backend/opnreco/syncbase.py | OpenPaymentNetwork/opnreco | 99c8955d7e200fe11fc23c3568879c543940b168 | [
"MIT"
] | null | null | null | backend/opnreco/syncbase.py | OpenPaymentNetwork/opnreco | 99c8955d7e200fe11fc23c3568879c543940b168 | [
"MIT"
] | null | null | null | backend/opnreco/syncbase.py | OpenPaymentNetwork/opnreco | 99c8955d7e200fe11fc23c3568879c543940b168 | [
"MIT"
] | null | null | null |
from decimal import Decimal
from opnreco.models.db import File
from opnreco.models.db import Movement
from opnreco.models.db import now_func
from opnreco.models.db import OwnerLog
from opnreco.models.db import Peer
from opnreco.models.db import TransferDownloadRecord
from opnreco.models.db import TransferRecord
from opnreco.mvinterp import MovementInterpreter
from opnreco.util import check_requests_response
from opnreco.util import to_datetime
from pyramid.decorator import reify
import collections
import logging
import os
import requests
log = logging.getLogger(__name__)
zero = Decimal()
null = None
class VerificationFailure(Exception):
"""A transfer failed verification"""
def __init__(self, msg, transfer_id):
Exception.__init__(self, msg)
self.transfer_id = transfer_id
class SyncBase:
"""Base class for views that sync with OPN.
This is a base class for either downloading all transfers and movements
since the last sync or for verifying that existing transfers and
movements have not changed.
"""
write_enabled = True
batch_limit = None
def __init__(self, request):
self.request = request
self.owner = owner = request.owner
self.owner_id = owner.id
self.api_url = os.environ['opn_api_url']
self.change_log = []
# peers is a cache of {peer_id: Peer}.
self.peers = {}
def download_batch(self, sync_ts_iso, sync_transfer_id, count_remain):
url = '%s/wallet/history_sync' % self.api_url
postdata = {
'sync_ts': sync_ts_iso,
'transfer_id': sync_transfer_id,
}
if count_remain:
postdata['count_remain'] = 'true'
if self.batch_limit:
postdata['limit'] = self.batch_limit
r = requests.post(
url,
data=postdata,
headers={'Authorization': 'Bearer %s' % self.request.access_token})
check_requests_response(r)
return r.json()
def import_transfer_records(self, transfers_download):
"""Add and update TransferRecord rows."""
dbsession = self.request.dbsession
owner_id = self.owner_id
write_enabled = self.write_enabled
change_log = self.change_log
transfer_ids = [item['id'] for item in transfers_download['results']]
if not transfer_ids:
return
record_list = (
dbsession.query(TransferRecord)
.filter(
TransferRecord.owner_id == owner_id,
TransferRecord.transfer_id.in_(transfer_ids),
)
.all())
record_map = {record.transfer_id: record for record in record_list}
existing_movements_map = self.get_existing_movements_map(transfer_ids)
# peer_ids is the set of all peer IDs referenced by the transfers.
peer_ids = set()
peer_ids.add(self.owner_id)
for tsum in transfers_download['results']:
sender_id = tsum['sender_id']
if sender_id:
peer_ids.add(sender_id)
recipient_id = tsum['recipient_id']
if recipient_id:
peer_ids.add(recipient_id)
for m in tsum['movements']:
from_id = m['from_id']
if from_id:
peer_ids.add(from_id)
peer_ids.add(m['to_id'])
for loop in m['loops']:
peer_ids.add(loop['issuer_id'])
peer_rows = (
dbsession.query(Peer)
.filter(
Peer.owner_id == owner_id,
Peer.peer_id.in_(peer_ids),
).all())
for peer in peer_rows:
self.peers[peer.peer_id] = peer
if write_enabled:
self.import_peer(self.owner_id, None)
for tsum in transfers_download['results']:
if write_enabled:
self.import_peer(tsum['sender_id'], tsum['sender_info'])
if tsum.get('recipient_is_dfi_account'):
recipient_info = {}
recipient_info.update(tsum['recipient_info'])
recipient_info['is_dfi_account'] = True
else:
recipient_info = tsum['recipient_info']
if write_enabled:
self.import_peer(tsum['recipient_id'], recipient_info)
transfer_id = tsum['id']
bundled_transfers = tsum.get('bundled_transfers')
if (bundled_transfers is not None and
not isinstance(bundled_transfers, list)):
# Don't let something weird get into the database.
raise ValueError(
"Transfer %s: bundled_transfers should be None or a list, "
"not %s" % (transfer_id, repr(bundled_transfers)))
bundle_transfer_id = tsum.get('bundle_transfer_id')
if bundle_transfer_id:
bundle_transfer_id = str(bundle_transfer_id)
changed = []
kw = {
'workflow_type': tsum['workflow_type'],
'start': to_datetime(tsum['start']),
'currency': tsum['currency'],
'amount': Decimal(tsum['amount']),
'timestamp': to_datetime(tsum['timestamp']),
'next_activity': tsum['next_activity'],
'completed': tsum['completed'],
'canceled': tsum['canceled'],
'sender_id': tsum['sender_id'] or None,
'sender_uid': tsum['sender_uid'] or None,
'sender_info': tsum['sender_info'],
'recipient_id': tsum['recipient_id'] or None,
'recipient_uid': tsum['recipient_uid'] or None,
'recipient_info': tsum['recipient_info'],
'bundled_transfers': bundled_transfers,
'bundle_transfer_id': bundle_transfer_id,
}
record = record_map.get(transfer_id)
if record is None:
# Add a TransferRecord.
is_new_record = True
if write_enabled:
record = TransferRecord(
transfer_id=transfer_id,
owner_id=owner_id,
**kw)
changed.append(kw)
dbsession.add(record)
dbsession.flush() # Assign record.id
record_map[transfer_id] = record
change_log.append({
'event_type': 'transfer_add',
'transfer_id': transfer_id,
})
else:
# Update a TransferRecord.
is_new_record = False
immutable_attrs = ('workflow_type', 'start')
for attr in immutable_attrs:
if kw[attr] != getattr(record, attr):
msg = (
"Verification failure in transfer %s. "
"Immutable attribute changed. "
"Old %s was %s, new %s is %s" %
(transfer_id, attr, repr(getattr(record, attr)),
attr, repr(kw[attr])))
log.error(msg)
raise VerificationFailure(msg, transfer_id=transfer_id)
changed_map = {}
for attr, value in sorted(kw.items()):
if getattr(record, attr) != value:
if write_enabled:
setattr(record, attr, value)
changed_map[attr] = value
if changed_map:
changed.append(changed_map)
change_log.append({
'event_type': 'transfer_changes',
'transfer_id': transfer_id,
'changes': sorted(changed_map.keys()),
})
if write_enabled:
dbsession.add(TransferDownloadRecord(
opn_download_id=self.opn_download_id,
transfer_record_id=record.id,
transfer_id=transfer_id,
changed=changed))
if record is not None:
self.import_movements(
record, tsum,
is_new_record=is_new_record,
existing_movements=existing_movements_map[record.id])
dbsession.flush()
def get_existing_movements_map(self, transfer_ids):
"""List all movements recorded for the given transfer IDs.
Return a defaultdict: {transfer_record_id: [Movement]}.
"""
dbsession = self.request.dbsession
owner_id = self.owner_id
all_movements = (
dbsession.query(Movement)
.join(
TransferRecord,
TransferRecord.id == Movement.transfer_record_id)
.filter(
TransferRecord.owner_id == owner_id,
TransferRecord.transfer_id.in_(transfer_ids))
.all())
res = collections.defaultdict(list)
for m in all_movements:
res[m.transfer_record_id].append(m)
return res
@reify
def account_map(self):
# Get the map of accounts from /wallet/info.
account_list = self.request.wallet_info['profile']['accounts']
return {a['id']: a for a in account_list}
def import_peer(self, peer_id, info):
"""Import a peer from a transfer record or other source."""
if not peer_id:
# A transfer's sender or recipient is not yet known.
# There's nothing to import.
return
if not self.write_enabled:
# This method doesn't need to do anything when writing is
# disabled.
return
if peer_id == self.owner_id:
# Get better info from the owner profile.
info = {
'title': self.owner.title,
'screen_name': self.owner.username,
'is_dfi_account': False,
'is_own_dfi_account': False,
}
else:
# Is the peer an account held by the user? If so, get
# better info from the account map.
account = self.account_map.get(peer_id)
if account:
title = '%s at %s' % (
account['redacted_account_num'],
account['rdfi_name'],
)
if account['alias']:
title += ' (%s)' % account['alias']
info = {
'title': title,
'screen_name': '',
'is_dfi_account': True,
'is_own_dfi_account': True,
}
dbsession = self.request.dbsession
peer = self.peers.get(peer_id)
if peer is None:
peer = Peer(
owner_id=self.owner_id,
peer_id=peer_id,
title=info.get('title'),
username=info.get('screen_name'),
is_dfi_account=info.get('is_dfi_account'),
is_own_dfi_account=info.get('is_own_dfi_account'),
last_update=now_func,
)
dbsession.add(peer)
self.change_log.append({
'event_type': 'peer_add',
'peer_id': peer_id,
})
self.peers[peer_id] = peer
dbsession.add(OwnerLog(
owner_id=self.owner_id,
personal_id=self.request.personal_id,
event_type='peer_add',
content={
'peer_id': peer_id,
'info': info,
}))
else:
attrs_found = 0
changes = {}
# Changeable attrs
attrs = (
('title', 'title'),
('screen_name', 'username'),
)
for source_attr, dest_attr in attrs:
value = info.get(source_attr)
if value:
attrs_found += 1
if getattr(peer, dest_attr) != value:
changes[dest_attr] = value
setattr(peer, dest_attr, value)
# One-shot boolean attrs (once set, stay set)
attrs = (
('is_dfi_account', 'is_dfi_account'),
('is_own_dfi_account', 'is_own_dfi_account'),
)
for source_attr, dest_attr in attrs:
value = info.get(source_attr)
if value is not None:
attrs_found += 1
if value and not getattr(peer, dest_attr):
changes[dest_attr] = True
setattr(peer, dest_attr, True)
if attrs_found:
peer.last_update = now_func
if changes:
self.change_log.append({
'event_type': 'peer_update',
'peer_id': peer_id,
})
dbsession.add(OwnerLog(
owner_id=self.owner_id,
personal_id=self.request.personal_id,
event_type='peer_update',
content={
'peer_id': peer_id,
'changes': changes,
}))
def import_movements(
self, record, item, is_new_record, existing_movements):
transfer_id = item['id']
dbsession = self.request.dbsession
write_enabled = self.write_enabled
change_log = self.change_log
# Prepare movement_dict, a dict of movements already imported.
# movement_dict: {
# (number, amount_index, loop_id, currency, issuer_id): Movement
# }
movement_dict = {}
for movement in existing_movements:
row_key = (
movement.number,
movement.amount_index,
movement.loop_id,
movement.currency,
movement.issuer_id,
)
movement_dict[row_key] = movement
movements_unseen = set(movement_dict.keys())
item_movements = item['movements'] or ()
for movement in item_movements:
number = movement.get('number')
if not number:
raise ValueError(
"The OPN service needs to be migrated to support "
"movement numbers. (OPN: upgrade and run bin/resummarize)")
ts = to_datetime(movement['timestamp'])
action = movement['action']
from_id = movement['from_id']
to_id = movement['to_id']
by_loop = self.summarize_movement(
movement=movement, transfer_id=transfer_id, ts=ts)
# Add movement records based on the by_ploop dict.
for loop_key, delta_list in sorted(by_loop.items()):
loop_id, currency, issuer_id = loop_key
for amount_index, amount in enumerate(delta_list):
row_key = (number, amount_index) + loop_key
old_movement = movement_dict.get(row_key)
if old_movement is not None:
# The movement is already recorded.
movements_unseen.discard(row_key)
# Verify it has not changed, then continue.
self.verify_old_movement(
transfer_id=transfer_id,
number=number,
old_movement=old_movement,
ts=ts,
from_id=from_id,
to_id=to_id,
action=action,
amount=amount,
loop_id=loop_id,
currency=currency,
issuer_id=issuer_id,
)
continue
if write_enabled:
# Record the new movement.
movement = Movement(
transfer_record_id=record.id,
owner_id=self.owner_id,
number=number,
amount_index=amount_index,
loop_id=loop_id,
currency=currency,
issuer_id=issuer_id,
from_id=from_id,
to_id=to_id,
amount=amount,
action=action,
ts=ts,
)
dbsession.add(movement)
movement_dict[row_key] = movement
existing_movements.append(movement)
change_log.append({
'event_type': 'movement_add',
'transfer_id': transfer_id,
'movement_number': number,
})
if movements_unseen:
old_movement_numbers = sorted(
row_key[0] for row_key in movement_dict.keys())
new_movement_numbers = sorted(
movement['number'] for movement in item_movements)
msg = (
"Verification failure in transfer %s. "
"Previously downloaded movement(s) are no longer available. "
"Old movement numbers: %s, new movement numbers: %s" %
(transfer_id, old_movement_numbers, new_movement_numbers))
log.error(msg)
raise VerificationFailure(msg, transfer_id=transfer_id)
if write_enabled:
dbsession.flush() # Assign the movement IDs and log the movements
for interpreter in self.interpreters:
interpreter.sync_file_movements(
record=record,
movements=list(movement_dict.values()),
is_new_record=is_new_record)
def summarize_movement(self, movement, transfer_id, ts):
"""Summarize a movement.
Return {(loop_id, currency, issuer_id): [amount]}.
"""
if not movement['to_id']:
number = movement['number']
raise AssertionError(
"Movement %s in transfer %s has no to_id"
% (number, transfer_id))
# res: {(loop_id, currency, issuer_id): [amount]}
res = collections.defaultdict(list)
for loop in movement['loops']:
loop_id = loop['loop_id']
currency = loop['currency']
issuer_id = loop['issuer_id']
amount = Decimal(loop['amount'])
res[(loop_id, currency, issuer_id)].append(amount)
return res
def verify_old_movement(
self, old_movement, transfer_id, number,
ts, from_id, to_id, action,
amount, issuer_id, loop_id, currency):
if old_movement.ts != ts:
msg = (
"Verification failure in transfer %s. "
"Movement %s has changed: "
"recorded timestamp is %s, "
"new timestamp is %s" % (
transfer_id, number,
old_movement.ts.isoformat(),
ts.isoformat()))
raise VerificationFailure(msg, transfer_id=transfer_id)
if (old_movement.from_id != from_id or
old_movement.to_id != to_id):
msg = (
"Verification failure in transfer %s. "
"Movement %s has changed: "
"movement was from %s to %s, "
"new movement is from %s to %s" % (
transfer_id, number,
old_movement.from_id,
old_movement.to_id,
from_id,
to_id))
raise VerificationFailure(msg, transfer_id=transfer_id)
for attr, new_value in (
('currency', currency),
('loop_id', loop_id),
('amount', amount),
('issuer_id', issuer_id),
('action', action),
):
old_value = getattr(old_movement, attr)
if new_value != old_value:
msg = (
"Verification failure in transfer %s. "
"Movement %s has changed: "
"recorded %s is %s, new %s is %s" % (
transfer_id, number,
attr, old_value,
attr, new_value))
raise VerificationFailure(msg, transfer_id=transfer_id)
@reify
def interpreters(self):
"""Prepare the owner's file-specific movement interpreters.
Ignore all archived Files.
"""
request = self.request
dbsession = request.dbsession
owner_id = self.owner_id
files = (
dbsession.query(File)
.filter(File.owner_id == owner_id, ~File.archived)
.order_by(File.id)
.all())
return [
MovementInterpreter(
request=self.request,
file=file,
change_log=self.change_log)
for file in files]
def sync_missing(self):
"""Fill in any missing transfer interpretations for the user's Files.
"""
for interpreter in self.interpreters:
interpreter.sync_missing()
| 36.966102 | 79 | 0.509078 |
from decimal import Decimal
from opnreco.models.db import File
from opnreco.models.db import Movement
from opnreco.models.db import now_func
from opnreco.models.db import OwnerLog
from opnreco.models.db import Peer
from opnreco.models.db import TransferDownloadRecord
from opnreco.models.db import TransferRecord
from opnreco.mvinterp import MovementInterpreter
from opnreco.util import check_requests_response
from opnreco.util import to_datetime
from pyramid.decorator import reify
import collections
import logging
import os
import requests
log = logging.getLogger(__name__)
zero = Decimal()
null = None
class VerificationFailure(Exception):
def __init__(self, msg, transfer_id):
Exception.__init__(self, msg)
self.transfer_id = transfer_id
class SyncBase:
write_enabled = True
batch_limit = None
def __init__(self, request):
self.request = request
self.owner = owner = request.owner
self.owner_id = owner.id
self.api_url = os.environ['opn_api_url']
self.change_log = []
self.peers = {}
def download_batch(self, sync_ts_iso, sync_transfer_id, count_remain):
url = '%s/wallet/history_sync' % self.api_url
postdata = {
'sync_ts': sync_ts_iso,
'transfer_id': sync_transfer_id,
}
if count_remain:
postdata['count_remain'] = 'true'
if self.batch_limit:
postdata['limit'] = self.batch_limit
r = requests.post(
url,
data=postdata,
headers={'Authorization': 'Bearer %s' % self.request.access_token})
check_requests_response(r)
return r.json()
def import_transfer_records(self, transfers_download):
dbsession = self.request.dbsession
owner_id = self.owner_id
write_enabled = self.write_enabled
change_log = self.change_log
transfer_ids = [item['id'] for item in transfers_download['results']]
if not transfer_ids:
return
record_list = (
dbsession.query(TransferRecord)
.filter(
TransferRecord.owner_id == owner_id,
TransferRecord.transfer_id.in_(transfer_ids),
)
.all())
record_map = {record.transfer_id: record for record in record_list}
existing_movements_map = self.get_existing_movements_map(transfer_ids)
peer_ids = set()
peer_ids.add(self.owner_id)
for tsum in transfers_download['results']:
sender_id = tsum['sender_id']
if sender_id:
peer_ids.add(sender_id)
recipient_id = tsum['recipient_id']
if recipient_id:
peer_ids.add(recipient_id)
for m in tsum['movements']:
from_id = m['from_id']
if from_id:
peer_ids.add(from_id)
peer_ids.add(m['to_id'])
for loop in m['loops']:
peer_ids.add(loop['issuer_id'])
peer_rows = (
dbsession.query(Peer)
.filter(
Peer.owner_id == owner_id,
Peer.peer_id.in_(peer_ids),
).all())
for peer in peer_rows:
self.peers[peer.peer_id] = peer
if write_enabled:
self.import_peer(self.owner_id, None)
for tsum in transfers_download['results']:
if write_enabled:
self.import_peer(tsum['sender_id'], tsum['sender_info'])
if tsum.get('recipient_is_dfi_account'):
recipient_info = {}
recipient_info.update(tsum['recipient_info'])
recipient_info['is_dfi_account'] = True
else:
recipient_info = tsum['recipient_info']
if write_enabled:
self.import_peer(tsum['recipient_id'], recipient_info)
transfer_id = tsum['id']
bundled_transfers = tsum.get('bundled_transfers')
if (bundled_transfers is not None and
not isinstance(bundled_transfers, list)):
raise ValueError(
"Transfer %s: bundled_transfers should be None or a list, "
"not %s" % (transfer_id, repr(bundled_transfers)))
bundle_transfer_id = tsum.get('bundle_transfer_id')
if bundle_transfer_id:
bundle_transfer_id = str(bundle_transfer_id)
changed = []
kw = {
'workflow_type': tsum['workflow_type'],
'start': to_datetime(tsum['start']),
'currency': tsum['currency'],
'amount': Decimal(tsum['amount']),
'timestamp': to_datetime(tsum['timestamp']),
'next_activity': tsum['next_activity'],
'completed': tsum['completed'],
'canceled': tsum['canceled'],
'sender_id': tsum['sender_id'] or None,
'sender_uid': tsum['sender_uid'] or None,
'sender_info': tsum['sender_info'],
'recipient_id': tsum['recipient_id'] or None,
'recipient_uid': tsum['recipient_uid'] or None,
'recipient_info': tsum['recipient_info'],
'bundled_transfers': bundled_transfers,
'bundle_transfer_id': bundle_transfer_id,
}
record = record_map.get(transfer_id)
if record is None:
# Add a TransferRecord.
is_new_record = True
if write_enabled:
record = TransferRecord(
transfer_id=transfer_id,
owner_id=owner_id,
**kw)
changed.append(kw)
dbsession.add(record)
dbsession.flush() # Assign record.id
record_map[transfer_id] = record
change_log.append({
'event_type': 'transfer_add',
'transfer_id': transfer_id,
})
else:
# Update a TransferRecord.
is_new_record = False
immutable_attrs = ('workflow_type', 'start')
for attr in immutable_attrs:
if kw[attr] != getattr(record, attr):
msg = (
"Verification failure in transfer %s. "
"Immutable attribute changed. "
"Old %s was %s, new %s is %s" %
(transfer_id, attr, repr(getattr(record, attr)),
attr, repr(kw[attr])))
log.error(msg)
raise VerificationFailure(msg, transfer_id=transfer_id)
changed_map = {}
for attr, value in sorted(kw.items()):
if getattr(record, attr) != value:
if write_enabled:
setattr(record, attr, value)
changed_map[attr] = value
if changed_map:
changed.append(changed_map)
change_log.append({
'event_type': 'transfer_changes',
'transfer_id': transfer_id,
'changes': sorted(changed_map.keys()),
})
if write_enabled:
dbsession.add(TransferDownloadRecord(
opn_download_id=self.opn_download_id,
transfer_record_id=record.id,
transfer_id=transfer_id,
changed=changed))
if record is not None:
self.import_movements(
record, tsum,
is_new_record=is_new_record,
existing_movements=existing_movements_map[record.id])
dbsession.flush()
def get_existing_movements_map(self, transfer_ids):
dbsession = self.request.dbsession
owner_id = self.owner_id
all_movements = (
dbsession.query(Movement)
.join(
TransferRecord,
TransferRecord.id == Movement.transfer_record_id)
.filter(
TransferRecord.owner_id == owner_id,
TransferRecord.transfer_id.in_(transfer_ids))
.all())
res = collections.defaultdict(list)
for m in all_movements:
res[m.transfer_record_id].append(m)
return res
@reify
def account_map(self):
# Get the map of accounts from /wallet/info.
account_list = self.request.wallet_info['profile']['accounts']
return {a['id']: a for a in account_list}
def import_peer(self, peer_id, info):
if not peer_id:
# A transfer's sender or recipient is not yet known.
return
if not self.write_enabled:
# This method doesn't need to do anything when writing is
return
if peer_id == self.owner_id:
info = {
'title': self.owner.title,
'screen_name': self.owner.username,
'is_dfi_account': False,
'is_own_dfi_account': False,
}
else:
account = self.account_map.get(peer_id)
if account:
title = '%s at %s' % (
account['redacted_account_num'],
account['rdfi_name'],
)
if account['alias']:
title += ' (%s)' % account['alias']
info = {
'title': title,
'screen_name': '',
'is_dfi_account': True,
'is_own_dfi_account': True,
}
dbsession = self.request.dbsession
peer = self.peers.get(peer_id)
if peer is None:
peer = Peer(
owner_id=self.owner_id,
peer_id=peer_id,
title=info.get('title'),
username=info.get('screen_name'),
is_dfi_account=info.get('is_dfi_account'),
is_own_dfi_account=info.get('is_own_dfi_account'),
last_update=now_func,
)
dbsession.add(peer)
self.change_log.append({
'event_type': 'peer_add',
'peer_id': peer_id,
})
self.peers[peer_id] = peer
dbsession.add(OwnerLog(
owner_id=self.owner_id,
personal_id=self.request.personal_id,
event_type='peer_add',
content={
'peer_id': peer_id,
'info': info,
}))
else:
attrs_found = 0
changes = {}
attrs = (
('title', 'title'),
('screen_name', 'username'),
)
for source_attr, dest_attr in attrs:
value = info.get(source_attr)
if value:
attrs_found += 1
if getattr(peer, dest_attr) != value:
changes[dest_attr] = value
setattr(peer, dest_attr, value)
attrs = (
('is_dfi_account', 'is_dfi_account'),
('is_own_dfi_account', 'is_own_dfi_account'),
)
for source_attr, dest_attr in attrs:
value = info.get(source_attr)
if value is not None:
attrs_found += 1
if value and not getattr(peer, dest_attr):
changes[dest_attr] = True
setattr(peer, dest_attr, True)
if attrs_found:
peer.last_update = now_func
if changes:
self.change_log.append({
'event_type': 'peer_update',
'peer_id': peer_id,
})
dbsession.add(OwnerLog(
owner_id=self.owner_id,
personal_id=self.request.personal_id,
event_type='peer_update',
content={
'peer_id': peer_id,
'changes': changes,
}))
def import_movements(
self, record, item, is_new_record, existing_movements):
transfer_id = item['id']
dbsession = self.request.dbsession
write_enabled = self.write_enabled
change_log = self.change_log
movement_dict = {}
for movement in existing_movements:
row_key = (
movement.number,
movement.amount_index,
movement.loop_id,
movement.currency,
movement.issuer_id,
)
movement_dict[row_key] = movement
movements_unseen = set(movement_dict.keys())
item_movements = item['movements'] or ()
for movement in item_movements:
number = movement.get('number')
if not number:
raise ValueError(
"The OPN service needs to be migrated to support "
"movement numbers. (OPN: upgrade and run bin/resummarize)")
ts = to_datetime(movement['timestamp'])
action = movement['action']
from_id = movement['from_id']
to_id = movement['to_id']
by_loop = self.summarize_movement(
movement=movement, transfer_id=transfer_id, ts=ts)
for loop_key, delta_list in sorted(by_loop.items()):
loop_id, currency, issuer_id = loop_key
for amount_index, amount in enumerate(delta_list):
row_key = (number, amount_index) + loop_key
old_movement = movement_dict.get(row_key)
if old_movement is not None:
movements_unseen.discard(row_key)
self.verify_old_movement(
transfer_id=transfer_id,
number=number,
old_movement=old_movement,
ts=ts,
from_id=from_id,
to_id=to_id,
action=action,
amount=amount,
loop_id=loop_id,
currency=currency,
issuer_id=issuer_id,
)
continue
if write_enabled:
movement = Movement(
transfer_record_id=record.id,
owner_id=self.owner_id,
number=number,
amount_index=amount_index,
loop_id=loop_id,
currency=currency,
issuer_id=issuer_id,
from_id=from_id,
to_id=to_id,
amount=amount,
action=action,
ts=ts,
)
dbsession.add(movement)
movement_dict[row_key] = movement
existing_movements.append(movement)
change_log.append({
'event_type': 'movement_add',
'transfer_id': transfer_id,
'movement_number': number,
})
if movements_unseen:
old_movement_numbers = sorted(
row_key[0] for row_key in movement_dict.keys())
new_movement_numbers = sorted(
movement['number'] for movement in item_movements)
msg = (
"Verification failure in transfer %s. "
"Previously downloaded movement(s) are no longer available. "
"Old movement numbers: %s, new movement numbers: %s" %
(transfer_id, old_movement_numbers, new_movement_numbers))
log.error(msg)
raise VerificationFailure(msg, transfer_id=transfer_id)
if write_enabled:
dbsession.flush()
for interpreter in self.interpreters:
interpreter.sync_file_movements(
record=record,
movements=list(movement_dict.values()),
is_new_record=is_new_record)
def summarize_movement(self, movement, transfer_id, ts):
if not movement['to_id']:
number = movement['number']
raise AssertionError(
"Movement %s in transfer %s has no to_id"
% (number, transfer_id))
res = collections.defaultdict(list)
for loop in movement['loops']:
loop_id = loop['loop_id']
currency = loop['currency']
issuer_id = loop['issuer_id']
amount = Decimal(loop['amount'])
res[(loop_id, currency, issuer_id)].append(amount)
return res
def verify_old_movement(
self, old_movement, transfer_id, number,
ts, from_id, to_id, action,
amount, issuer_id, loop_id, currency):
if old_movement.ts != ts:
msg = (
"Verification failure in transfer %s. "
"Movement %s has changed: "
"recorded timestamp is %s, "
"new timestamp is %s" % (
transfer_id, number,
old_movement.ts.isoformat(),
ts.isoformat()))
raise VerificationFailure(msg, transfer_id=transfer_id)
if (old_movement.from_id != from_id or
old_movement.to_id != to_id):
msg = (
"Verification failure in transfer %s. "
"Movement %s has changed: "
"movement was from %s to %s, "
"new movement is from %s to %s" % (
transfer_id, number,
old_movement.from_id,
old_movement.to_id,
from_id,
to_id))
raise VerificationFailure(msg, transfer_id=transfer_id)
for attr, new_value in (
('currency', currency),
('loop_id', loop_id),
('amount', amount),
('issuer_id', issuer_id),
('action', action),
):
old_value = getattr(old_movement, attr)
if new_value != old_value:
msg = (
"Verification failure in transfer %s. "
"Movement %s has changed: "
"recorded %s is %s, new %s is %s" % (
transfer_id, number,
attr, old_value,
attr, new_value))
raise VerificationFailure(msg, transfer_id=transfer_id)
@reify
def interpreters(self):
request = self.request
dbsession = request.dbsession
owner_id = self.owner_id
files = (
dbsession.query(File)
.filter(File.owner_id == owner_id, ~File.archived)
.order_by(File.id)
.all())
return [
MovementInterpreter(
request=self.request,
file=file,
change_log=self.change_log)
for file in files]
def sync_missing(self):
for interpreter in self.interpreters:
interpreter.sync_missing()
| true | true |
f727055625800f39e74865dd3234c711f006f0de | 23,966 | py | Python | electroncash/tests/test_transaction.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 208 | 2017-07-25T19:52:15.000Z | 2018-09-21T13:44:58.000Z | electroncash/tests/test_transaction.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 1,478 | 2018-09-24T09:30:13.000Z | 2022-03-29T15:48:17.000Z | electroncash/tests/test_transaction.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 159 | 2018-09-24T12:56:47.000Z | 2022-03-28T23:52:17.000Z | import unittest
from pprint import pprint
from .. import transaction
from ..address import Address, ScriptOutput, PublicKey
from ..bitcoin import TYPE_ADDRESS, TYPE_PUBKEY, TYPE_SCRIPT
from ..keystore import xpubkey_to_address
from ..util import bh2u
unsigned_blob = '010000000149f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed010000005701ff4c53ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300feffffffd8e43201000000000118e43201000000001976a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac5fbd0700'
signed_blob = '010000000149f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed010000006a473044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885412103b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166feffffff0118e43201000000001976a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac5fbd0700'
v2_blob = "0200000001191601a44a81e061502b7bfbc6eaa1cef6d1e6af5308ef96c9342f71dbf4b9b5000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff026b20fa04000000001976a914024db2e87dd7cfd0e5f266c5f212e21a31d805a588aca0860100000000001976a91421919b94ae5cefcdf0271191459157cdb41c4cbf88aca6240700"
nonmin_blob = '010000000142b88360bd83813139af3a251922b7f3d2ac88e45a2a703c28db8ee8580dc3a300000000654c41151dc44bece88c5933d737176499209a0b1688d5eb51eb6f1fd9fcf2fb32d138c94b96a4311673b75a31c054210b2058735ce6c12e529ddea4a6b91e4a3786d94121034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1feffffff012e030000000000001976a914480d1be8ab76f8cdd85ce4077f51d35b0baaa25a88ac4b521400'
class TestBCDataStream(unittest.TestCase):
def test_compact_size(self):
s = transaction.BCDataStream()
values = [0, 1, 252, 253, 2**16-1, 2**16, 2**32-1, 2**32, 2**64-1]
for v in values:
s.write_compact_size(v)
with self.assertRaises(transaction.SerializationError):
s.write_compact_size(-1)
self.assertEqual(bh2u(s.input),
'0001fcfdfd00fdfffffe00000100feffffffffff0000000001000000ffffffffffffffffff')
for v in values:
self.assertEqual(s.read_compact_size(), v)
with self.assertRaises(transaction.SerializationError):
s.read_compact_size()
def test_string(self):
s = transaction.BCDataStream()
with self.assertRaises(transaction.SerializationError):
s.read_string()
msgs = ['Hello', ' ', 'World', '', '!']
for msg in msgs:
s.write_string(msg)
for msg in msgs:
self.assertEqual(s.read_string(), msg)
with self.assertRaises(transaction.SerializationError):
s.read_string()
def test_bytes(self):
s = transaction.BCDataStream()
s.write(b'foobar')
self.assertEqual(s.read_bytes(3), b'foo')
self.assertEqual(s.read_bytes(2), b'ba')
self.assertEqual(s.read_bytes(4), b'r')
self.assertEqual(s.read_bytes(1), b'')
class TestTransaction(unittest.TestCase):
def test_tx_unsigned(self):
expected = {
'inputs': [{'address': Address.from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN'),
'num_sig': 1,
'prevout_hash': 'ed6a4d07e546b677abf6ba1257c2546128c694f23f4b9ebbd822fdfe435ef349',
'prevout_n': 1,
'pubkeys': ['03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166'],
'sequence': 4294967294,
'signatures': [None],
'type': 'p2pkh',
'value': 20112600,
'x_pubkeys': ['ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300']}],
'lockTime': 507231,
'outputs': [{'address': Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK'),
'prevout_n': 0,
'scriptPubKey': '76a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac',
'type': 0,
'value': 20112408}],
'version': 1}
tx = transaction.Transaction(unsigned_blob)
calc = tx.deserialize()
self.assertEqual(calc, expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': unsigned_blob, 'complete': False, 'final': True})
self.assertEqual(tx.get_outputs(), [(Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK'), 20112408)])
self.assertEqual(tx.get_output_addresses(), [Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK')])
self.assertTrue(tx.has_address(Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK')))
self.assertTrue(tx.has_address(Address.from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN')))
self.assertFalse(tx.has_address(Address.from_string('1CQj15y1N7LDHp7wTt28eoD1QhHgFgxECH')))
self.assertEqual(tx.serialize(), unsigned_blob)
tx.update_signatures(['3044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885'])
self.assertEqual(tx.raw, signed_blob)
tx.update(unsigned_blob)
tx.raw = None
blob = str(tx)
self.assertEqual(transaction.deserialize(blob), expected)
def test_tx_signed(self):
expected = {
'inputs': [{'address': Address.from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN'),
'num_sig': 1,
'prevout_hash': 'ed6a4d07e546b677abf6ba1257c2546128c694f23f4b9ebbd822fdfe435ef349',
'prevout_n': 1,
'pubkeys': ['03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166'],
'scriptSig': '473044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885412103b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166',
'sequence': 4294967294,
'signatures': ['3044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f4688541'],
'type': 'p2pkh',
'x_pubkeys': ['03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166']}],
'lockTime': 507231,
'outputs': [{'address': Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK'),
'prevout_n': 0,
'scriptPubKey': '76a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac',
'type': 0,
'value': 20112408}],
'version': 1
}
tx = transaction.Transaction(signed_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': signed_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), signed_blob)
tx.update_signatures([expected['inputs'][0]['signatures'][0][:-2]])
self.assertEqual(tx.estimated_size(), 191)
def test_tx_nonminimal_scriptSig(self):
# The nonminimal push is the '4c41...' (PUSHDATA1 length=0x41 [...]) at
# the start of the scriptSig. Minimal is '41...' (PUSH0x41 [...]).
expected = {
'inputs': [{'address': Address.from_pubkey('034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1'),
'num_sig': 1,
'prevout_hash': 'a3c30d58e88edb283c702a5ae488acd2f3b72219253aaf39318183bd6083b842',
'prevout_n': 0,
'pubkeys': ['034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1'],
'scriptSig': '4c41151dc44bece88c5933d737176499209a0b1688d5eb51eb6f1fd9fcf2fb32d138c94b96a4311673b75a31c054210b2058735ce6c12e529ddea4a6b91e4a3786d94121034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1',
'sequence': 4294967294,
'signatures': ['151dc44bece88c5933d737176499209a0b1688d5eb51eb6f1fd9fcf2fb32d138c94b96a4311673b75a31c054210b2058735ce6c12e529ddea4a6b91e4a3786d941'],
'type': 'p2pkh',
'x_pubkeys': ['034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1']}],
'lockTime': 1331787,
'outputs': [{'address': Address.from_pubkey('034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1'),
'prevout_n': 0,
'scriptPubKey': '76a914480d1be8ab76f8cdd85ce4077f51d35b0baaa25a88ac',
'type': 0,
'value': 814}],
'version': 1
}
tx = transaction.Transaction(nonmin_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': nonmin_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), nonmin_blob)
# if original push is lost, will wrongly be e64808c1eb86e8cab68fcbd8b7f3b01f8cc8f39bd05722f1cf2d7cd9b35fb4e3
self.assertEqual(tx.txid(), '66020177ae3273d874728667b6a24e0a1c0200079119f3d0c294da40f0e85d34')
# cause it to lose the original push, and reserialize with minimal
del tx.inputs()[0]['scriptSig']
self.assertEqual(tx.txid(), 'e64808c1eb86e8cab68fcbd8b7f3b01f8cc8f39bd05722f1cf2d7cd9b35fb4e3')
def test_errors(self):
with self.assertRaises(TypeError):
transaction.Transaction.pay_script(output_type=None, addr='')
with self.assertRaises(BaseException):
xpubkey_to_address('')
def test_parse_xpub(self):
res = xpubkey_to_address('fe4e13b0f311a55b8a5db9a32e959da9f011b131019d4cebe6141b9e2c93edcbfc0954c358b062a9f94111548e50bde5847a3096b8b7872dcffadb0e9579b9017b01000200')
self.assertEqual(res, ('04ee98d63800824486a1cf5b4376f2f574d86e0a3009a6448105703453f3368e8e1d8d090aaecdd626a45cc49876709a3bbb6dc96a4311b3cac03e225df5f63dfc', Address.from_string('19h943e4diLc68GXW7G75QNe2KWuMu7BaJ')))
def test_version_field(self):
tx = transaction.Transaction(v2_blob)
self.assertEqual(tx.txid(), "b97f9180173ab141b61b9f944d841e60feec691d6daab4d4d932b24dd36606fe")
def test_txid_coinbase_to_p2pk(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4103400d0302ef02062f503253482f522cfabe6d6dd90d39663d10f8fd25ec88338295d4c6ce1c90d4aeb368d8bdbadcc1da3b635801000000000000000474073e03ffffffff013c25cf2d01000000434104b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e6537a576782eba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c1e0908ef7bac00000000')
self.assertEqual('dbaf14e1c476e76ea05a8b71921a46d6b06f0a950f17c5f9f1a03b8fae467f10', tx.txid())
def test_txid_coinbase_to_p2pkh(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff25033ca0030400001256124d696e656420627920425443204775696c640800000d41000007daffffffff01c00d1298000000001976a91427a1f12771de5cc3b73941664b2537c15316be4388ac00000000')
self.assertEqual('4328f9311c6defd9ae1bd7f4516b62acf64b361eb39dfcf09d9925c5fd5c61e8', tx.txid())
def test_txid_p2pk_to_p2pkh(self):
tx = transaction.Transaction('010000000118231a31d2df84f884ced6af11dc24306319577d4d7c340124a7e2dd9c314077000000004847304402200b6c45891aed48937241907bc3e3868ee4c792819821fcde33311e5a3da4789a02205021b59692b652a01f5f009bd481acac2f647a7d9c076d71d85869763337882e01fdffffff016c95052a010000001976a9149c4891e7791da9e622532c97f43863768264faaf88ac00000000')
self.assertEqual('90ba90a5b115106d26663fce6c6215b8699c5d4b2672dd30756115f3337dddf9', tx.txid())
def test_txid_p2pk_to_p2sh(self):
tx = transaction.Transaction('0100000001e4643183d6497823576d17ac2439fb97eba24be8137f312e10fcc16483bb2d070000000048473044022032bbf0394dfe3b004075e3cbb3ea7071b9184547e27f8f73f967c4b3f6a21fa4022073edd5ae8b7b638f25872a7a308bb53a848baa9b9cc70af45fcf3c683d36a55301fdffffff011821814a0000000017a9143c640bc28a346749c09615b50211cb051faff00f8700000000')
self.assertEqual('172bdf5a690b874385b98d7ab6f6af807356f03a26033c6a65ab79b4ac2085b5', tx.txid())
def test_txid_p2pkh_to_p2pkh(self):
tx = transaction.Transaction('0100000001f9dd7d33f315617530dd72264b5d9c69b815626cce3f66266d1015b1a590ba90000000006a4730440220699bfee3d280a499daf4af5593e8750b54fef0557f3c9f717bfa909493a84f60022057718eec7985b7796bb8630bf6ea2e9bf2892ac21bd6ab8f741a008537139ffe012103b4289890b40590447b57f773b5843bf0400e9cead08be225fac587b3c2a8e973fdffffff01ec24052a010000001976a914ce9ff3d15ed5f3a3d94b583b12796d063879b11588ac00000000')
self.assertEqual('24737c68f53d4b519939119ed83b2a8d44d716d7f3ca98bcecc0fbb92c2085ce', tx.txid())
def test_txid_p2pkh_to_p2sh(self):
tx = transaction.Transaction('010000000195232c30f6611b9f2f82ec63f5b443b132219c425e1824584411f3d16a7a54bc000000006b4830450221009f39ac457dc8ff316e5cc03161c9eff6212d8694ccb88d801dbb32e85d8ed100022074230bb05e99b85a6a50d2b71e7bf04d80be3f1d014ea038f93943abd79421d101210317be0f7e5478e087453b9b5111bdad586038720f16ac9658fd16217ffd7e5785fdffffff0200e40b540200000017a914d81df3751b9e7dca920678cc19cac8d7ec9010b08718dfd63c2c0000001976a914303c42b63569ff5b390a2016ff44651cd84c7c8988acc7010000')
self.assertEqual('155e4740fa59f374abb4e133b87247dccc3afc233cb97c2bf2b46bba3094aedc', tx.txid())
def test_txid_p2sh_to_p2pkh(self):
tx = transaction.Transaction('0100000001b98d550fa331da21038952d6931ffd3607c440ab2985b75477181b577de118b10b000000fdfd0000483045022100a26ea637a6d39aa27ea7a0065e9691d477e23ad5970b5937a9b06754140cf27102201b00ed050b5c468ee66f9ef1ff41dfb3bd64451469efaab1d4b56fbf92f9df48014730440220080421482a37cc9a98a8dc3bf9d6b828092ad1a1357e3be34d9c5bbdca59bb5f02206fa88a389c4bf31fa062977606801f3ea87e86636da2625776c8c228bcd59f8a014c69522102420e820f71d17989ed73c0ff2ec1c1926cf989ad6909610614ee90cf7db3ef8721036eae8acbae031fdcaf74a824f3894bf54881b42911bd3ad056ea59a33ffb3d312103752669b75eb4dc0cca209af77a59d2c761cbb47acc4cf4b316ded35080d92e8253aeffffffff0101ac3a00000000001976a914a6b6bcc85975bf6a01a0eabb2ac97d5a418223ad88ac00000000')
self.assertEqual('0ea982e8e601863e604ef6d9acf9317ae59d3eac9cafee6dd946abadafd35af8', tx.txid())
def test_txid_p2sh_to_p2sh(self):
tx = transaction.Transaction('01000000018695eef2250b3a3b6ef45fe065e601610e69dd7a56de742092d40e6276e6c9ec00000000fdfd000047304402203199bf8e49f7203e8bcbfd754aa356c6ba61643a3490f8aef3888e0aaa7c048c02201e7180bfd670f4404e513359b4020fbc85d6625e3e265e0c357e8611f11b83e401483045022100e60f897db114679f9a310a032a22e9a7c2b8080affe2036c480ff87bf6f45ada02202dbd27af38dd97d418e24d89c3bb7a97e359dd927c1094d8c9e5cac57df704fb014c69522103adc563b9f5e506f485978f4e913c10da208eac6d96d49df4beae469e81a4dd982102c52bc9643a021464a31a3bfa99cfa46afaa4b3acda31e025da204b4ee44cc07a2103a1c8edcc3310b3d7937e9e4179e7bd9cdf31c276f985f4eb356f21b874225eb153aeffffffff02b8ce05000000000017a9145c9c158430b7b79c3ad7ef9bdf981601eda2412d87b82400000000000017a9146bf3ff89019ecc5971a39cdd4f1cabd3b647ad5d8700000000')
self.assertEqual('2caab5a11fa1ec0f5bb014b8858d00fecf2c001e15d22ad04379ad7b36fef305', tx.txid())
def test_parse_output_p2pkh(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000001976a914aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa88ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_ADDRESS, Address.from_P2PKH_hash(b'\xaa'*20), 0)])
self.assertEqual('7a0e3fcbdaa9ecc6ccce1ad325b6b661e774a57f2e8519c679964e2dd32e200f', tx.txid())
def test_parse_output_p2pkh_nonmin(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000001a76a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa88ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('76a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa88ac')), 0)])
self.assertEqual('69706667959fd2e6aa3385acdcd2c478e875344422e1f4c94eb06065268540d1', tx.txid())
def test_parse_output_p2sh(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000017a914aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa8700000000')
self.assertEqual(tx.outputs(), [(TYPE_ADDRESS, Address.from_P2SH_hash(b'\xaa'*20), 0)])
self.assertEqual('d33750908965d24a411d94371fdc64ebb06f13bf4d19e73372347e6b4eeca49f', tx.txid())
def test_parse_output_p2sh_nonmin(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000018a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa8700000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa87')), 0)])
self.assertEqual('dd4b174d7094c63c9f530703702a8d76c7b3fe5fc278ba2837dbd75bc5b0b296', tx.txid())
def test_parse_output_p2pk(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000002321030000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_PUBKEY, PublicKey.from_pubkey(b'\x03' + b'\x00'*32), 0)])
self.assertEqual('78afa0576a4ee6e7db663a58202f11bab8e860dd4a2226f856a2490187046b3d', tx.txid())
def test_parse_output_p2pk_badpubkey(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000002321040000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('21040000000000000000000000000000000000000000000000000000000000000000ac')), 0)])
self.assertEqual('8e57f026081b6589570dc5e6e339b706d2ac75e6cbd1896275dee176b8d35ba6', tx.txid())
def test_parse_output_p2pk_nonmin(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000244c21030000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('4c21030000000000000000000000000000000000000000000000000000000000000000ac')), 0)])
self.assertEqual('730d77384d7bfc965caa338b501e7b071092474320af6ea19052859c93bfaf98', tx.txid())
def test_parse_output_p2pk_uncomp(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000043410400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_PUBKEY, PublicKey.from_pubkey(b'\x04' + b'\x00'*64), 0)])
self.assertEqual('053626542393dd957a14bb2bcbfdcf3564a5f438e923799e1b9714c4a8e70a7c', tx.txid())
def test_parse_output_p2pk_uncomp_badpubkey(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000043410300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x41\x03' + b'\x00'*64 + b'\xac'), 0)])
self.assertEqual('a15a9f86f5a47ef7efc28ae701f5b2a353aff76a21cb22ff08b77759533fb59b', tx.txid())
def test_parse_output_p2pk_uncomp_nonmin(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000444c410400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x4c\x41\x04' + b'\x00'*64 + b'\xac'), 0)])
self.assertEqual('bd8e0827c8bacd6bac10dd28d5fc6ad52f3fef3f91200c7c1d8698531c9325e9', tx.txid())
def test_parse_output_baremultisig(self):
# no special support for recognizing bare multisig outputs
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000025512103000000000000000000000000000000000000000000000000000000000000000051ae00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x51\x21\x03' + b'\x00'*32 + b'\x51\xae'), 0)])
self.assertEqual('b1f66fde0aa3d5af03be3c69f599069aad217e939f36cacc2372ea4fece7d57b', tx.txid())
def test_parse_output_baremultisig_nonmin(self):
# even if bare multisig support is added, note that this case should still remain unrecognized
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000026514c2103000000000000000000000000000000000000000000000000000000000000000051ae00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x51\x4c\x21\x03' + b'\x00'*32 + b'\x51\xae'), 0)])
self.assertEqual('eb0b69c86a05499cabc42b12d4706b18eab97ed6155fc966e488a433edf05932', tx.txid())
def test_parse_output_truncated1(self):
# truncated in middle of PUSHDATA2's first argument
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000024d0100000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x4d\x01'), 0)])
self.assertIn("Invalid script", tx.outputs()[0][1].to_ui_string())
self.assertEqual('72d8af8edcc603c6c64390ac5eb913b97a80efe0f5ae7c00ad5397eb5786cd33', tx.txid())
def test_parse_output_truncated1(self):
# truncated in middle of PUSHDATA2's second argument
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000044d0200ff00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x4d\x02\x00\xff'), 0)])
self.assertIn("Invalid script", tx.outputs()[0][1].to_ui_string())
self.assertEqual('976667816c4955189973cc56ac839844da4ed32a8bd22a8c6217c2c04e69e9d7', tx.txid())
def test_parse_output_empty(self):
# nothing wrong with empty output script
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b''), 0)])
self.assertEqual("", tx.outputs()[0][1].to_ui_string())
self.assertEqual('50fa7bd4e5e2d3220fd2e84effec495b9845aba379d853408779d59a4b0b4f59', tx.txid())
class NetworkMock(object):
def __init__(self, unspent):
self.unspent = unspent
def synchronous_get(self, arg):
return self.unspent
| 78.320261 | 780 | 0.791747 | import unittest
from pprint import pprint
from .. import transaction
from ..address import Address, ScriptOutput, PublicKey
from ..bitcoin import TYPE_ADDRESS, TYPE_PUBKEY, TYPE_SCRIPT
from ..keystore import xpubkey_to_address
from ..util import bh2u
unsigned_blob = '010000000149f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed010000005701ff4c53ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300feffffffd8e43201000000000118e43201000000001976a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac5fbd0700'
signed_blob = '010000000149f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed010000006a473044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885412103b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166feffffff0118e43201000000001976a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac5fbd0700'
v2_blob = "0200000001191601a44a81e061502b7bfbc6eaa1cef6d1e6af5308ef96c9342f71dbf4b9b5000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff026b20fa04000000001976a914024db2e87dd7cfd0e5f266c5f212e21a31d805a588aca0860100000000001976a91421919b94ae5cefcdf0271191459157cdb41c4cbf88aca6240700"
nonmin_blob = '010000000142b88360bd83813139af3a251922b7f3d2ac88e45a2a703c28db8ee8580dc3a300000000654c41151dc44bece88c5933d737176499209a0b1688d5eb51eb6f1fd9fcf2fb32d138c94b96a4311673b75a31c054210b2058735ce6c12e529ddea4a6b91e4a3786d94121034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1feffffff012e030000000000001976a914480d1be8ab76f8cdd85ce4077f51d35b0baaa25a88ac4b521400'
class TestBCDataStream(unittest.TestCase):
def test_compact_size(self):
s = transaction.BCDataStream()
values = [0, 1, 252, 253, 2**16-1, 2**16, 2**32-1, 2**32, 2**64-1]
for v in values:
s.write_compact_size(v)
with self.assertRaises(transaction.SerializationError):
s.write_compact_size(-1)
self.assertEqual(bh2u(s.input),
'0001fcfdfd00fdfffffe00000100feffffffffff0000000001000000ffffffffffffffffff')
for v in values:
self.assertEqual(s.read_compact_size(), v)
with self.assertRaises(transaction.SerializationError):
s.read_compact_size()
def test_string(self):
s = transaction.BCDataStream()
with self.assertRaises(transaction.SerializationError):
s.read_string()
msgs = ['Hello', ' ', 'World', '', '!']
for msg in msgs:
s.write_string(msg)
for msg in msgs:
self.assertEqual(s.read_string(), msg)
with self.assertRaises(transaction.SerializationError):
s.read_string()
def test_bytes(self):
s = transaction.BCDataStream()
s.write(b'foobar')
self.assertEqual(s.read_bytes(3), b'foo')
self.assertEqual(s.read_bytes(2), b'ba')
self.assertEqual(s.read_bytes(4), b'r')
self.assertEqual(s.read_bytes(1), b'')
class TestTransaction(unittest.TestCase):
def test_tx_unsigned(self):
expected = {
'inputs': [{'address': Address.from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN'),
'num_sig': 1,
'prevout_hash': 'ed6a4d07e546b677abf6ba1257c2546128c694f23f4b9ebbd822fdfe435ef349',
'prevout_n': 1,
'pubkeys': ['03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166'],
'sequence': 4294967294,
'signatures': [None],
'type': 'p2pkh',
'value': 20112600,
'x_pubkeys': ['ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300']}],
'lockTime': 507231,
'outputs': [{'address': Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK'),
'prevout_n': 0,
'scriptPubKey': '76a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac',
'type': 0,
'value': 20112408}],
'version': 1}
tx = transaction.Transaction(unsigned_blob)
calc = tx.deserialize()
self.assertEqual(calc, expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': unsigned_blob, 'complete': False, 'final': True})
self.assertEqual(tx.get_outputs(), [(Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK'), 20112408)])
self.assertEqual(tx.get_output_addresses(), [Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK')])
self.assertTrue(tx.has_address(Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK')))
self.assertTrue(tx.has_address(Address.from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN')))
self.assertFalse(tx.has_address(Address.from_string('1CQj15y1N7LDHp7wTt28eoD1QhHgFgxECH')))
self.assertEqual(tx.serialize(), unsigned_blob)
tx.update_signatures(['3044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885'])
self.assertEqual(tx.raw, signed_blob)
tx.update(unsigned_blob)
tx.raw = None
blob = str(tx)
self.assertEqual(transaction.deserialize(blob), expected)
def test_tx_signed(self):
expected = {
'inputs': [{'address': Address.from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN'),
'num_sig': 1,
'prevout_hash': 'ed6a4d07e546b677abf6ba1257c2546128c694f23f4b9ebbd822fdfe435ef349',
'prevout_n': 1,
'pubkeys': ['03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166'],
'scriptSig': '473044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885412103b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166',
'sequence': 4294967294,
'signatures': ['3044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f4688541'],
'type': 'p2pkh',
'x_pubkeys': ['03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166']}],
'lockTime': 507231,
'outputs': [{'address': Address.from_string('1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK'),
'prevout_n': 0,
'scriptPubKey': '76a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac',
'type': 0,
'value': 20112408}],
'version': 1
}
tx = transaction.Transaction(signed_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': signed_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), signed_blob)
tx.update_signatures([expected['inputs'][0]['signatures'][0][:-2]])
self.assertEqual(tx.estimated_size(), 191)
def test_tx_nonminimal_scriptSig(self):
expected = {
'inputs': [{'address': Address.from_pubkey('034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1'),
'num_sig': 1,
'prevout_hash': 'a3c30d58e88edb283c702a5ae488acd2f3b72219253aaf39318183bd6083b842',
'prevout_n': 0,
'pubkeys': ['034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1'],
'scriptSig': '4c41151dc44bece88c5933d737176499209a0b1688d5eb51eb6f1fd9fcf2fb32d138c94b96a4311673b75a31c054210b2058735ce6c12e529ddea4a6b91e4a3786d94121034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1',
'sequence': 4294967294,
'signatures': ['151dc44bece88c5933d737176499209a0b1688d5eb51eb6f1fd9fcf2fb32d138c94b96a4311673b75a31c054210b2058735ce6c12e529ddea4a6b91e4a3786d941'],
'type': 'p2pkh',
'x_pubkeys': ['034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1']}],
'lockTime': 1331787,
'outputs': [{'address': Address.from_pubkey('034a29987f30ad5d23d79ed5215e034c51f6825bdb2aa595c2bdeb37902960b3d1'),
'prevout_n': 0,
'scriptPubKey': '76a914480d1be8ab76f8cdd85ce4077f51d35b0baaa25a88ac',
'type': 0,
'value': 814}],
'version': 1
}
tx = transaction.Transaction(nonmin_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': nonmin_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), nonmin_blob)
self.assertEqual(tx.txid(), '66020177ae3273d874728667b6a24e0a1c0200079119f3d0c294da40f0e85d34')
del tx.inputs()[0]['scriptSig']
self.assertEqual(tx.txid(), 'e64808c1eb86e8cab68fcbd8b7f3b01f8cc8f39bd05722f1cf2d7cd9b35fb4e3')
def test_errors(self):
with self.assertRaises(TypeError):
transaction.Transaction.pay_script(output_type=None, addr='')
with self.assertRaises(BaseException):
xpubkey_to_address('')
def test_parse_xpub(self):
res = xpubkey_to_address('fe4e13b0f311a55b8a5db9a32e959da9f011b131019d4cebe6141b9e2c93edcbfc0954c358b062a9f94111548e50bde5847a3096b8b7872dcffadb0e9579b9017b01000200')
self.assertEqual(res, ('04ee98d63800824486a1cf5b4376f2f574d86e0a3009a6448105703453f3368e8e1d8d090aaecdd626a45cc49876709a3bbb6dc96a4311b3cac03e225df5f63dfc', Address.from_string('19h943e4diLc68GXW7G75QNe2KWuMu7BaJ')))
def test_version_field(self):
tx = transaction.Transaction(v2_blob)
self.assertEqual(tx.txid(), "b97f9180173ab141b61b9f944d841e60feec691d6daab4d4d932b24dd36606fe")
def test_txid_coinbase_to_p2pk(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4103400d0302ef02062f503253482f522cfabe6d6dd90d39663d10f8fd25ec88338295d4c6ce1c90d4aeb368d8bdbadcc1da3b635801000000000000000474073e03ffffffff013c25cf2d01000000434104b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e6537a576782eba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c1e0908ef7bac00000000')
self.assertEqual('dbaf14e1c476e76ea05a8b71921a46d6b06f0a950f17c5f9f1a03b8fae467f10', tx.txid())
def test_txid_coinbase_to_p2pkh(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff25033ca0030400001256124d696e656420627920425443204775696c640800000d41000007daffffffff01c00d1298000000001976a91427a1f12771de5cc3b73941664b2537c15316be4388ac00000000')
self.assertEqual('4328f9311c6defd9ae1bd7f4516b62acf64b361eb39dfcf09d9925c5fd5c61e8', tx.txid())
def test_txid_p2pk_to_p2pkh(self):
tx = transaction.Transaction('010000000118231a31d2df84f884ced6af11dc24306319577d4d7c340124a7e2dd9c314077000000004847304402200b6c45891aed48937241907bc3e3868ee4c792819821fcde33311e5a3da4789a02205021b59692b652a01f5f009bd481acac2f647a7d9c076d71d85869763337882e01fdffffff016c95052a010000001976a9149c4891e7791da9e622532c97f43863768264faaf88ac00000000')
self.assertEqual('90ba90a5b115106d26663fce6c6215b8699c5d4b2672dd30756115f3337dddf9', tx.txid())
def test_txid_p2pk_to_p2sh(self):
tx = transaction.Transaction('0100000001e4643183d6497823576d17ac2439fb97eba24be8137f312e10fcc16483bb2d070000000048473044022032bbf0394dfe3b004075e3cbb3ea7071b9184547e27f8f73f967c4b3f6a21fa4022073edd5ae8b7b638f25872a7a308bb53a848baa9b9cc70af45fcf3c683d36a55301fdffffff011821814a0000000017a9143c640bc28a346749c09615b50211cb051faff00f8700000000')
self.assertEqual('172bdf5a690b874385b98d7ab6f6af807356f03a26033c6a65ab79b4ac2085b5', tx.txid())
def test_txid_p2pkh_to_p2pkh(self):
tx = transaction.Transaction('0100000001f9dd7d33f315617530dd72264b5d9c69b815626cce3f66266d1015b1a590ba90000000006a4730440220699bfee3d280a499daf4af5593e8750b54fef0557f3c9f717bfa909493a84f60022057718eec7985b7796bb8630bf6ea2e9bf2892ac21bd6ab8f741a008537139ffe012103b4289890b40590447b57f773b5843bf0400e9cead08be225fac587b3c2a8e973fdffffff01ec24052a010000001976a914ce9ff3d15ed5f3a3d94b583b12796d063879b11588ac00000000')
self.assertEqual('24737c68f53d4b519939119ed83b2a8d44d716d7f3ca98bcecc0fbb92c2085ce', tx.txid())
def test_txid_p2pkh_to_p2sh(self):
tx = transaction.Transaction('010000000195232c30f6611b9f2f82ec63f5b443b132219c425e1824584411f3d16a7a54bc000000006b4830450221009f39ac457dc8ff316e5cc03161c9eff6212d8694ccb88d801dbb32e85d8ed100022074230bb05e99b85a6a50d2b71e7bf04d80be3f1d014ea038f93943abd79421d101210317be0f7e5478e087453b9b5111bdad586038720f16ac9658fd16217ffd7e5785fdffffff0200e40b540200000017a914d81df3751b9e7dca920678cc19cac8d7ec9010b08718dfd63c2c0000001976a914303c42b63569ff5b390a2016ff44651cd84c7c8988acc7010000')
self.assertEqual('155e4740fa59f374abb4e133b87247dccc3afc233cb97c2bf2b46bba3094aedc', tx.txid())
def test_txid_p2sh_to_p2pkh(self):
tx = transaction.Transaction('0100000001b98d550fa331da21038952d6931ffd3607c440ab2985b75477181b577de118b10b000000fdfd0000483045022100a26ea637a6d39aa27ea7a0065e9691d477e23ad5970b5937a9b06754140cf27102201b00ed050b5c468ee66f9ef1ff41dfb3bd64451469efaab1d4b56fbf92f9df48014730440220080421482a37cc9a98a8dc3bf9d6b828092ad1a1357e3be34d9c5bbdca59bb5f02206fa88a389c4bf31fa062977606801f3ea87e86636da2625776c8c228bcd59f8a014c69522102420e820f71d17989ed73c0ff2ec1c1926cf989ad6909610614ee90cf7db3ef8721036eae8acbae031fdcaf74a824f3894bf54881b42911bd3ad056ea59a33ffb3d312103752669b75eb4dc0cca209af77a59d2c761cbb47acc4cf4b316ded35080d92e8253aeffffffff0101ac3a00000000001976a914a6b6bcc85975bf6a01a0eabb2ac97d5a418223ad88ac00000000')
self.assertEqual('0ea982e8e601863e604ef6d9acf9317ae59d3eac9cafee6dd946abadafd35af8', tx.txid())
def test_txid_p2sh_to_p2sh(self):
tx = transaction.Transaction('01000000018695eef2250b3a3b6ef45fe065e601610e69dd7a56de742092d40e6276e6c9ec00000000fdfd000047304402203199bf8e49f7203e8bcbfd754aa356c6ba61643a3490f8aef3888e0aaa7c048c02201e7180bfd670f4404e513359b4020fbc85d6625e3e265e0c357e8611f11b83e401483045022100e60f897db114679f9a310a032a22e9a7c2b8080affe2036c480ff87bf6f45ada02202dbd27af38dd97d418e24d89c3bb7a97e359dd927c1094d8c9e5cac57df704fb014c69522103adc563b9f5e506f485978f4e913c10da208eac6d96d49df4beae469e81a4dd982102c52bc9643a021464a31a3bfa99cfa46afaa4b3acda31e025da204b4ee44cc07a2103a1c8edcc3310b3d7937e9e4179e7bd9cdf31c276f985f4eb356f21b874225eb153aeffffffff02b8ce05000000000017a9145c9c158430b7b79c3ad7ef9bdf981601eda2412d87b82400000000000017a9146bf3ff89019ecc5971a39cdd4f1cabd3b647ad5d8700000000')
self.assertEqual('2caab5a11fa1ec0f5bb014b8858d00fecf2c001e15d22ad04379ad7b36fef305', tx.txid())
def test_parse_output_p2pkh(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000001976a914aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa88ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_ADDRESS, Address.from_P2PKH_hash(b'\xaa'*20), 0)])
self.assertEqual('7a0e3fcbdaa9ecc6ccce1ad325b6b661e774a57f2e8519c679964e2dd32e200f', tx.txid())
def test_parse_output_p2pkh_nonmin(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000001a76a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa88ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('76a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa88ac')), 0)])
self.assertEqual('69706667959fd2e6aa3385acdcd2c478e875344422e1f4c94eb06065268540d1', tx.txid())
def test_parse_output_p2sh(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000017a914aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa8700000000')
self.assertEqual(tx.outputs(), [(TYPE_ADDRESS, Address.from_P2SH_hash(b'\xaa'*20), 0)])
self.assertEqual('d33750908965d24a411d94371fdc64ebb06f13bf4d19e73372347e6b4eeca49f', tx.txid())
def test_parse_output_p2sh_nonmin(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000018a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa8700000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('a94c14aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa87')), 0)])
self.assertEqual('dd4b174d7094c63c9f530703702a8d76c7b3fe5fc278ba2837dbd75bc5b0b296', tx.txid())
def test_parse_output_p2pk(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000002321030000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_PUBKEY, PublicKey.from_pubkey(b'\x03' + b'\x00'*32), 0)])
self.assertEqual('78afa0576a4ee6e7db663a58202f11bab8e860dd4a2226f856a2490187046b3d', tx.txid())
def test_parse_output_p2pk_badpubkey(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000002321040000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('21040000000000000000000000000000000000000000000000000000000000000000ac')), 0)])
self.assertEqual('8e57f026081b6589570dc5e6e339b706d2ac75e6cbd1896275dee176b8d35ba6', tx.txid())
def test_parse_output_p2pk_nonmin(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000244c21030000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(bytes.fromhex('4c21030000000000000000000000000000000000000000000000000000000000000000ac')), 0)])
self.assertEqual('730d77384d7bfc965caa338b501e7b071092474320af6ea19052859c93bfaf98', tx.txid())
def test_parse_output_p2pk_uncomp(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000043410400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_PUBKEY, PublicKey.from_pubkey(b'\x04' + b'\x00'*64), 0)])
self.assertEqual('053626542393dd957a14bb2bcbfdcf3564a5f438e923799e1b9714c4a8e70a7c', tx.txid())
def test_parse_output_p2pk_uncomp_badpubkey(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000043410300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x41\x03' + b'\x00'*64 + b'\xac'), 0)])
self.assertEqual('a15a9f86f5a47ef7efc28ae701f5b2a353aff76a21cb22ff08b77759533fb59b', tx.txid())
def test_parse_output_p2pk_uncomp_nonmin(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000444c410400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ac00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x4c\x41\x04' + b'\x00'*64 + b'\xac'), 0)])
self.assertEqual('bd8e0827c8bacd6bac10dd28d5fc6ad52f3fef3f91200c7c1d8698531c9325e9', tx.txid())
def test_parse_output_baremultisig(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000025512103000000000000000000000000000000000000000000000000000000000000000051ae00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x51\x21\x03' + b'\x00'*32 + b'\x51\xae'), 0)])
self.assertEqual('b1f66fde0aa3d5af03be3c69f599069aad217e939f36cacc2372ea4fece7d57b', tx.txid())
def test_parse_output_baremultisig_nonmin(self):
tx = transaction.Transaction('0100000001000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000026514c2103000000000000000000000000000000000000000000000000000000000000000051ae00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x51\x4c\x21\x03' + b'\x00'*32 + b'\x51\xae'), 0)])
self.assertEqual('eb0b69c86a05499cabc42b12d4706b18eab97ed6155fc966e488a433edf05932', tx.txid())
def test_parse_output_truncated1(self):
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000024d0100000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x4d\x01'), 0)])
self.assertIn("Invalid script", tx.outputs()[0][1].to_ui_string())
self.assertEqual('72d8af8edcc603c6c64390ac5eb913b97a80efe0f5ae7c00ad5397eb5786cd33', tx.txid())
def test_parse_output_truncated1(self):
# truncated in middle of PUSHDATA2's second argument
tx = transaction.Transaction('01000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000044d0200ff00000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b'\x4d\x02\x00\xff'), 0)])
self.assertIn("Invalid script", tx.outputs()[0][1].to_ui_string())
self.assertEqual('976667816c4955189973cc56ac839844da4ed32a8bd22a8c6217c2c04e69e9d7', tx.txid())
def test_parse_output_empty(self):
tx = transaction.Transaction('010000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000')
self.assertEqual(tx.outputs(), [(TYPE_SCRIPT, ScriptOutput(b''), 0)])
self.assertEqual("", tx.outputs()[0][1].to_ui_string())
self.assertEqual('50fa7bd4e5e2d3220fd2e84effec495b9845aba379d853408779d59a4b0b4f59', tx.txid())
class NetworkMock(object):
def __init__(self, unspent):
self.unspent = unspent
def synchronous_get(self, arg):
return self.unspent
| true | true |
f7270752dcf18a0603f052723ab81ca799050193 | 2,977 | py | Python | parsers/archived/US_BPA.py | electricitymap/electricitymap-contrib | 6572b12d1cef72c734b80273598e156ebe3c22ea | [
"MIT"
] | 143 | 2022-01-01T10:56:58.000Z | 2022-03-31T11:25:47.000Z | parsers/archived/US_BPA.py | electricitymap/electricitymap-contrib | 6572b12d1cef72c734b80273598e156ebe3c22ea | [
"MIT"
] | 276 | 2021-12-30T15:57:15.000Z | 2022-03-31T14:57:16.000Z | parsers/archived/US_BPA.py | electricitymap/electricitymap-contrib | 6572b12d1cef72c734b80273598e156ebe3c22ea | [
"MIT"
] | 44 | 2021-12-30T19:48:42.000Z | 2022-03-29T22:46:16.000Z | #!/usr/bin/env python3
# Archive reason: No longer in use.
"""Parser for the Bonneville Power Administration area of the USA."""
import logging
from io import StringIO
import arrow
import pandas as pd
import requests
GENERATION_URL = "https://transmission.bpa.gov/business/operations/Wind/baltwg.txt"
GENERATION_MAPPING = {
"Wind": "wind",
"Hydro": "hydro",
"Fossil/Biomass": "unknown",
"Nuclear": "nuclear",
}
def get_data(url, session=None):
"""Returns a pandas dataframe."""
s = session or requests.Session()
req = s.get(url)
df = pd.read_table(StringIO(req.text), skiprows=11)
return df
def timestamp_converter(timestamp):
"""Turns a timestamp str into an aware datetime object."""
arr_dt_naive = arrow.get(timestamp, "MM/DD/YYYY HH:mm")
dt_aware = arr_dt_naive.replace(tzinfo="America/Los_Angeles").datetime
return dt_aware
def data_processor(df, logger) -> list:
"""
Takes a dataframe and drops all generation rows that are empty or more than 1 day old.
Turns each row into a dictionary and removes any generation types that are unknown.
:return: list of tuples in the form of (datetime, production).
"""
df = df.dropna(thresh=2)
df.columns = df.columns.str.strip()
# 5min data for the last 24 hours.
df = df.tail(288)
df["Date/Time"] = df["Date/Time"].map(timestamp_converter)
known_keys = GENERATION_MAPPING.keys() | {"Date/Time", "Load"}
column_headers = set(df.columns)
unknown_keys = column_headers - known_keys
for k in unknown_keys:
logger.warning(
"New data {} seen in US-BPA data source".format(k), extra={"key": "US-BPA"}
)
keys_to_remove = unknown_keys | {"Load"}
processed_data = []
for index, row in df.iterrows():
production = row.to_dict()
dt = production.pop("Date/Time")
dt = dt.to_pydatetime()
mapped_production = {
GENERATION_MAPPING[k]: v
for k, v in production.items()
if k not in keys_to_remove
}
processed_data.append((dt, mapped_production))
return processed_data
def fetch_production(
zone_key="US-BPA",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> list:
"""Requests the last known production mix (in MW) of a given zone."""
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
raw_data = get_data(GENERATION_URL, session=session)
processed_data = data_processor(raw_data, logger)
data = []
for item in processed_data:
datapoint = {
"zoneKey": zone_key,
"datetime": item[0],
"production": item[1],
"storage": {},
"source": "bpa.gov",
}
data.append(datapoint)
return data
if __name__ == "__main__":
print("fetch_production() ->")
print(fetch_production())
| 25.228814 | 90 | 0.64125 |
import logging
from io import StringIO
import arrow
import pandas as pd
import requests
GENERATION_URL = "https://transmission.bpa.gov/business/operations/Wind/baltwg.txt"
GENERATION_MAPPING = {
"Wind": "wind",
"Hydro": "hydro",
"Fossil/Biomass": "unknown",
"Nuclear": "nuclear",
}
def get_data(url, session=None):
s = session or requests.Session()
req = s.get(url)
df = pd.read_table(StringIO(req.text), skiprows=11)
return df
def timestamp_converter(timestamp):
arr_dt_naive = arrow.get(timestamp, "MM/DD/YYYY HH:mm")
dt_aware = arr_dt_naive.replace(tzinfo="America/Los_Angeles").datetime
return dt_aware
def data_processor(df, logger) -> list:
df = df.dropna(thresh=2)
df.columns = df.columns.str.strip()
df = df.tail(288)
df["Date/Time"] = df["Date/Time"].map(timestamp_converter)
known_keys = GENERATION_MAPPING.keys() | {"Date/Time", "Load"}
column_headers = set(df.columns)
unknown_keys = column_headers - known_keys
for k in unknown_keys:
logger.warning(
"New data {} seen in US-BPA data source".format(k), extra={"key": "US-BPA"}
)
keys_to_remove = unknown_keys | {"Load"}
processed_data = []
for index, row in df.iterrows():
production = row.to_dict()
dt = production.pop("Date/Time")
dt = dt.to_pydatetime()
mapped_production = {
GENERATION_MAPPING[k]: v
for k, v in production.items()
if k not in keys_to_remove
}
processed_data.append((dt, mapped_production))
return processed_data
def fetch_production(
zone_key="US-BPA",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> list:
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
raw_data = get_data(GENERATION_URL, session=session)
processed_data = data_processor(raw_data, logger)
data = []
for item in processed_data:
datapoint = {
"zoneKey": zone_key,
"datetime": item[0],
"production": item[1],
"storage": {},
"source": "bpa.gov",
}
data.append(datapoint)
return data
if __name__ == "__main__":
print("fetch_production() ->")
print(fetch_production())
| true | true |
f727078e22cc661d90d89e25a90adb97e4f7dee0 | 2,049 | py | Python | pre_commit_hooks/detect_aws_credentials.py | pk026/pre-commit-hooks | 3fa02652357ff0dbb42b5bc78c673b7bc105fcf3 | [
"MIT"
] | null | null | null | pre_commit_hooks/detect_aws_credentials.py | pk026/pre-commit-hooks | 3fa02652357ff0dbb42b5bc78c673b7bc105fcf3 | [
"MIT"
] | null | null | null | pre_commit_hooks/detect_aws_credentials.py | pk026/pre-commit-hooks | 3fa02652357ff0dbb42b5bc78c673b7bc105fcf3 | [
"MIT"
] | 1 | 2016-05-06T15:27:07.000Z | 2016-05-06T15:27:07.000Z | from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
from six.moves import configparser # pylint: disable=import-error
def get_your_keys(credentials_file):
"""reads the secret keys in your credentials file in order to be able to
look for them in the submitted code.
"""
aws_credentials_file_path = os.path.expanduser(credentials_file)
if not os.path.exists(aws_credentials_file_path):
return None
parser = configparser.ConfigParser()
parser.read(aws_credentials_file_path)
keys = set()
for section in parser.sections():
keys.add(parser.get(section, 'aws_secret_access_key'))
return keys
def check_file_for_aws_keys(filenames, keys):
bad_files = []
for filename in filenames:
with open(filename, 'r') as content:
text_body = content.read()
if any(key in text_body for key in keys):
# naively match the entire file, low chance of incorrect collision
bad_files.append(filename)
return bad_files
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to run')
parser.add_argument(
'--credentials-file',
default='~/.aws/credentials',
help=(
'location of aws credentials file from which to get the secret '
"keys we're looking for"
),
)
args = parser.parse_args(argv)
keys = get_your_keys(args.credentials_file)
if not keys:
print(
'No aws keys were configured at {0}\n'
'Configure them with --credentials-file'.format(
args.credentials_file,
),
)
return 2
bad_filenames = check_file_for_aws_keys(args.filenames, keys)
if bad_filenames:
for bad_file in bad_filenames:
print('AWS secret key found: {0}'.format(bad_file))
return 1
else:
return 0
if __name__ == '__main__':
exit(main())
| 28.458333 | 82 | 0.646657 | from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
from six.moves import configparser
def get_your_keys(credentials_file):
aws_credentials_file_path = os.path.expanduser(credentials_file)
if not os.path.exists(aws_credentials_file_path):
return None
parser = configparser.ConfigParser()
parser.read(aws_credentials_file_path)
keys = set()
for section in parser.sections():
keys.add(parser.get(section, 'aws_secret_access_key'))
return keys
def check_file_for_aws_keys(filenames, keys):
bad_files = []
for filename in filenames:
with open(filename, 'r') as content:
text_body = content.read()
if any(key in text_body for key in keys):
bad_files.append(filename)
return bad_files
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to run')
parser.add_argument(
'--credentials-file',
default='~/.aws/credentials',
help=(
'location of aws credentials file from which to get the secret '
"keys we're looking for"
),
)
args = parser.parse_args(argv)
keys = get_your_keys(args.credentials_file)
if not keys:
print(
'No aws keys were configured at {0}\n'
'Configure them with --credentials-file'.format(
args.credentials_file,
),
)
return 2
bad_filenames = check_file_for_aws_keys(args.filenames, keys)
if bad_filenames:
for bad_file in bad_filenames:
print('AWS secret key found: {0}'.format(bad_file))
return 1
else:
return 0
if __name__ == '__main__':
exit(main())
| true | true |
f72707b300b185159ce19245e032dddc604d32ab | 17,706 | py | Python | pytorch_src/ResnetV2.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | pytorch_src/ResnetV2.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | 1 | 2020-12-09T07:29:00.000Z | 2020-12-09T07:29:00.000Z | pytorch_src/ResnetV2.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file:
# @brief:
# @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com
# @version: 0.0.1
# @creation date: 23-10-2019
# @last modified: Wed 30 Oct 2019 03:17:36 PM EDT
"""
file: ResnetV2.py
author: Changjiang Cai
mark: adopted from:
1) pytorch source code, and
2) and https://github.com/MandyMo/pytorch_HMR.git
3) and https://github.com/lucasb-eyer/lbtoolbox/blob/master/lbtoolbox/pytorch.py#L61;
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.nn.parameter import Parameter
import torch.optim as optim
import numpy as np
import math
import torchvision
import sys
#from dollections import OrderedDict
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
"""
########################################
# Kaiming's blocks
########################################
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return nn.Conv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias,
groups=groups)
def conv1x1(cin, cout, stride=1,bias=False):
return nn.Conv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias)
# bottleneck_v2
# x-->BN --> ReLU-->(conv1, BN, ReLU)-->(conv2, BN, ReLU) --> conv3
# | |
# | |
# | |
# |--------------------------------------------> Addition --> x_new
class Bottleneck_V2(nn.Module):
expansion = 4
def __init__(self, cin, cout, stride):
super(Bottleneck_V2, self).__init__()
cmid = cout// self.expansion
self.relu = nn.ReLU(inplace=True)
""" Pre Act """
self.bn0 = nn.BatchNorm2d(cin)
""" (conv1, BN, ReLU)"""
self.conv1 = conv1x1(cin, cmid, bias=False) #conv1
self.bn1 = nn.BatchNorm2d(cmid) #conv1/BatchNorm
""" (conv2, BN, ReLU)"""
self.conv2 = conv3x3(cmid, cmid, stride, bias=False) #conv2
self.bn2 = nn.BatchNorm2d(cmid) #conv2/BatchNorm
""" (conv3 )"""
self.conv3 = conv1x1(cmid, cout, bias=True) # conv3
self.stride = stride
self.maxpool2d= nn.MaxPool2d(kernel_size=1, stride = stride)
self.shortcut = None
if cin != cout:
# conv, 1 x 1
self.shortcut = conv1x1(cin, cout, stride, bias = True)
def forward(self, x):
""" Pre Act """
preact = self.relu(self.bn0(x))
if self.shortcut is not None:
shortcut = self.shortcut(preact) # e.g., stride = 2
else:
shortcut = self.maxpool2d(x)
""" (conv1, BN, ReLU)"""
residual = self.relu(self.bn1(self.conv1(preact)))
""" (conv2, BN, ReLU)"""
residual = self.relu(self.bn2(self.conv2(residual)))
""" (conv3 )"""
residual = self.conv3(residual)
output = shortcut + residual
return output
class ResNet_V2(nn.Module):
def __init__(self, block, layers, num_classes=None, global_pool = True,
isFetchDictForDebug = False):
self.isFetchDictForDebug = isFetchDictForDebug
self.inplanes = 64
self.expansion = 4
super(ResNet_V2, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True)
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
#self.bn1 = nn.BatchNorm2d(64)
#self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
#Updated to implement 'same' padding in tensorflow; do manually padding to bottom and right,
# then apply the follwoing maxpool with padding = 0 as its argument;
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
# padding size: starting from the last dimension and moving forward;
self.maxpool_pad = (0,1,0,1)# i.e, (padding_left, padding_right, padding_top, padding_bottom)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
self.postnorm = nn.BatchNorm2d(512*self.expansion)
self.relu = nn.ReLU(inplace=True)
#self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output is of size 1 x 1 here;
self.global_pool = global_pool
#Note: in HMR project, we set `num_classes=None`;
if num_classes is not None:
self.fc = nn.Linear(512 * block.expansion, num_classes)
else:
self.fc = None
#leave it here FYI:
#for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# the new version is shown below:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#def __init__(self, cin, cout, stride=1):
def _make_layer(self, block, planes, numBlocks, stride):
expansion = block.expansion
layers = []
for i in range(0, numBlocks):
cur_inplanes = planes * expansion if i > 0 else self.inplanes
tmp_stride = 1 if i < (numBlocks - 1) else stride
layers.append(block(cur_inplanes, planes*expansion, tmp_stride))
#update self.inplanes = output planes, for next incoming Residual block, with new palnes #;
self.inplanes = planes * expansion
return nn.Sequential(*layers)
def forward(self, x):
""" fetch dict """
fetch_dict = {}
x = self.conv1(x)
fetch_dict['x_conv1'] = x
#Updated to implement 'same' padding in tensorflow; do manually padding to bottom and right,
# then apply the follwoing maxpool with padding = 0 as its argument;
x = F.pad(x, pad = self.maxpool_pad, mode = 'constant', value = 0)
x = self.maxpool(x)
fetch_dict['x_maxpool'] = x
x = self.layer1(x)
fetch_dict['x_layer1'] = x
x = self.layer2(x)
fetch_dict['x_layer2'] = x
x = self.layer3(x)
fetch_dict['x_layer3'] = x
x = self.layer4(x)
fetch_dict['x_layer4'] = x
x = self.postnorm(x)
#Updated on 2019/10/30: missing the relu added!!!
x = self.relu(x)
fetch_dict['x_postnorm'] = x
if self.global_pool:
x = torch.mean(x, dim=[2,3], keepdim = True)
fetch_dict['x_global_pool'] = x
if self.fc is not None:
x = self.fc(torch.flatten(x,1))
if self.isFetchDictForDebug:
return x, fetch_dict
else:
return x
def resnet_v2_50(num_classes=None, global_pool = True, isFetchDictForDebug = False):
model = ResNet_V2(Bottleneck_V2, [3,4,6,3],num_classes, global_pool, isFetchDictForDebug)
return model
def get_tf2pt_key_map_dict():
map_dict = {
'' : '',
# for root block: conv1 --> pool1
# that is: input x --> (conv1 --> pool1 )--> (residual-block1,2,3,4) --> postnorm --> global avg-pool --> output
'conv1/weights' : 'conv1.weight',
'conv1/biases' : 'conv1.bias',
# for post norm:
'postnorm/beta': 'postnorm.bias',
'postnorm/gamma': 'postnorm.weight',
'postnorm/moving_mean': 'postnorm.running_mean',
'postnorm/moving_variance': 'postnorm.running_var',
}
""" block 1, has 3 unites """
""" block 2, has 4 unites """
""" block 3, has 6 unites """
""" block 4, has 3 unites """
# processing tf_key_1
blks = [(1,3), (2,4), (3,6), (4,3)]
for t in blks:
b_idx = t[0]
for u_idx in range(t[1]):
key = 'block{}/unit_{}'.format(b_idx, u_idx + 1)
vaule = 'layer{}.{}'.format(b_idx, u_idx )
map_dict[key] = vaule
# processing tf_key_2
#Example: (tf_key, pt_key)
""" In each bottleneck block: we have the following: """
bottleneck_tf_pt_tuples = [
# Note: 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/beta/Adam':
# 'Adam' is related to Adam Optimization, so here we do not use it!!!
# Pre-Act: bn0"""
# BN: out = gamma * X_norm + beta, so beta is bias, gamma is weight;
['preact/gamma','bn0.weight'],
['preact/beta', 'bn0.bias'],
['preact/moving_mean', 'bn0.running_mean'],
['preact/moving_variance', 'bn0.running_var'],
#conv1 + bn1 + relu1
['conv1/weights', 'conv1.weight'],
['conv1/BatchNorm/gamma', 'bn1.weight'],
['conv1/BatchNorm/beta', 'bn1.bias'],
['conv1/BatchNorm/moving_mean', 'bn1.running_mean'],
['conv1/BatchNorm/moving_variance', 'bn1.running_var'],
#conv2 + bn2 + relu2
['conv2/weights', 'conv2.weight'],
['conv2/BatchNorm/gamma', 'bn2.weight'],
['conv2/BatchNorm/beta', 'bn2.bias'],
['conv2/BatchNorm/moving_mean', 'bn2.running_mean'],
['conv2/BatchNorm/moving_variance', 'bn2.running_var'],
#conv3
['conv3/weights', 'conv3.weight'],
['conv3/biases', 'conv3.bias'],
#shortcut
['shortcut/weights', 'shortcut.weight'],
['shortcut/biases', 'shortcut.bias'],
]
for cur_tuple in bottleneck_tf_pt_tuples:
map_dict[cur_tuple[0]] = cur_tuple[1]
#print (map_dict)
return map_dict
def map_tf_dictKeys_2PyTorch_dictKeys( map_dict,
tf_key = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/BatchNorm/beta'):
# E.g.:
# tf_key = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/BatchNorm/beta'
# or tf_key = 'resnet_v2_50/conv1/biases'
# 1) skip the first part : 'resnet_v2_50'
tf_key = tf_key[len('resnet_v2_50')+1:]
# 2) find 'bottleneck_v2' if exists, and pick the part before and after 'bottleneck_v2'
pos = tf_key.find('bottleneck_v2')
if pos > 0: # if found 'bottleneck_v2'
tf_key_1, tf_key_2 = tf_key[0:pos-1], tf_key[pos+1+len('bottleneck_v2'):]
else: # no found 'bottleneck_v2'
tf_key_1, tf_key_2 = '', tf_key
# processing tf_key_1
#print (tf_key_1)
pt_key_1 = map_dict[tf_key_1]
#print (pt_key_1)
#print (tf_key_2)
pt_key_2 = map_dict[tf_key_2]
#print (pt_key_2)
if pt_key_1 == '':
pt_key = pt_key_2
else:
pt_key = pt_key_1 + '.' + pt_key_2
#print ("[***] {} --> {}".format(tf_key, pt_key))
return pt_key
#>see https://stackoverflow.com/questions/51628607/pytorch-passing-numpy-array-for-weight-initialization
def set_resnet_parameter_data(layer, parameter_name, new_torch_data):
param = getattr(layer, parameter_name)
param.data = new_torch_data
def pass_np_model_state_to_resnet(src_np_model_state_dict, dst_resnet_model):
map_dict = get_tf2pt_key_map_dict()
dst_state_dict = dst_resnet_model.state_dict()
n_valid = 0
n_adam = 0
tf_var_names = list(src_np_model_state_dict['resnet_v2_50_names'])
N = len(tf_var_names)
for tf_key in sorted(src_np_model_state_dict.keys()):
# Note: 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/beta/Adam':
# 'Adam' is related to Adam Optimization, so here we do not use it!!!
param = src_np_model_state_dict[tf_key]
if 'Adam' in tf_key:
#print('Adam! {} is only for Adam Optimization, not uesed here!!'.format(tf_key))
n_adam += 1
tf_var_names.remove(tf_key)
continue
elif 'resnet_v2_50_names' == tf_key:
continue
pt_key = map_tf_dictKeys_2PyTorch_dictKeys(map_dict, tf_key)
if pt_key not in dst_state_dict:
print('unexpected ', pt_key, ' !')
continue
if not isinstance(param, np.ndarray):
raise ValueError('Expected a np.ndarray')
else:
# !!! Note: added by CCJ on 2019/10/24;
# tensorflow conv2d weight in size of [kernel_size[0], kernel_size[1], in_channels, out_channels],
# e.g., weight in size [7,7,3,64] means applying 7x7-kernel-size convolution to input image with 3 channel
# and output channel is 64;
# While, PyTorch will have its weight in shape [out_channels, in_channels/groups, kernel_size[0], kernel_size[1]],
# here we assume gropus = 1;
if param.ndim == 4:
param = np.transpose(param, [3,2,0,1])
param = torch.from_numpy(param).contiguous()
try:
dst_state_dict[pt_key].copy_(param)
n_valid += 1
tf_var_names.remove(tf_key)
except:
print(pt_key, ' is inconsistent!')
print ('src np.ndarray in shape {}, dst tensor in shape {}'.format(param.shape,
dst_state_dict[pt_key].shape))
n_valid -= 1
tf_var_names.append(tf_key)
continue
print('%d out of %d variables processed! Wherein:'%(n_valid + n_adam, N))
print(' [***] Copyed state dict for %d variables and finished!' %n_valid)
print(' [***] Skip %d adam variables, which are related to Adam optimaization state' %(n_adam))
print(' [***] {} variables are left unprocessed!'.format(len(tf_var_names)))
if n_valid + n_adam == N:
print (" [***] Resnet_V2_50 loading Numpy weights Succeed!!!")
else:
print (" [***] Resnet_V2_50 loading Numpy weights Failed !!!")
#print('[***] Including: ', tf_var_names)
def load_Res50ModelFromNpyFile(npy_file = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.npy'):
dst_resnet_model = resnet_v2_50()
assert (npy_file is not None)
# this npy file is generated by Python2, due to Tensorflow is installed in Python2;
# load this npy file (generated by Python2) to Python3, due to PyTorch is installed in Python3;
src_np_model_state_dict = np.load(npy_file, allow_pickle= True, encoding = 'latin1').item()
#tmp_name = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/weights'
# check the variable dimensionality
# print should be : [3, 3, 512, 512];
#print(src_np_model_state_dict[tmp_name].shape)
pass_np_model_state_to_resnet(src_np_model_state_dict, dst_resnet_model)
return dst_resnet_model
if __name__ == '__main__':
if 0:
print ('resnet_v2_50 state_dict():')
n = 0
for k,v in resnet_v2_50().state_dict().items():
print (k, v.shape)
n += 1
print (n)
if 0:
""" load dictionary """
npy_file = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.npy'
resnet_dict2 = np.load(npy_file, allow_pickle= True, encoding = 'latin1').item()
print ('loaded var_names : ', resnet_dict2['resnet_v2_50_names'])
tmp_name = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/weights'
# check the variable dimensionality
# print should be : [3, 3, 512, 512];
print (resnet_dict2[tmp_name].shape)
""" load numpy dictionary to Pytorch model and save the model"""
if 1:
# this npy file is generated by Python2, due to Tensorflow is installed in Python2;
npy_file = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.npy'
# load this npy file (generated by Python2) to Python3, due to PyTorch is installed in Python3;
dst_resnet_model = load_Res50ModelFromNpyFile(npy_file)
dst_state_dict = dst_resnet_model.state_dict()
model_path = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.pt'
torch.save(dst_state_dict, model_path)
print ('saved %s' % model_path)
#n = 0
#for k,v in dst_state_dict.items():
# print (k, v.shape)
# n += 1
#print (n)
if 1:
# get a new model
resnet_v2_50 = resnet_v2_50()
model_path = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.pt'
# load the weights
resnet_v2_50.load_state_dict(torch.load(model_path))
print ('Loading %s' % model_path)
| 40.797235 | 127 | 0.605162 |
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.nn.parameter import Parameter
import torch.optim as optim
import numpy as np
import math
import torchvision
import sys
cout, stride):
super(Bottleneck_V2, self).__init__()
cmid = cout// self.expansion
self.relu = nn.ReLU(inplace=True)
self.bn0 = nn.BatchNorm2d(cin)
self.conv1 = conv1x1(cin, cmid, bias=False) #conv1
self.bn1 = nn.BatchNorm2d(cmid) #conv1/BatchNorm
self.conv2 = conv3x3(cmid, cmid, stride, bias=False) #conv2
self.bn2 = nn.BatchNorm2d(cmid) #conv2/BatchNorm
self.conv3 = conv1x1(cmid, cout, bias=True) # conv3
self.stride = stride
self.maxpool2d= nn.MaxPool2d(kernel_size=1, stride = stride)
self.shortcut = None
if cin != cout:
# conv, 1 x 1
self.shortcut = conv1x1(cin, cout, stride, bias = True)
def forward(self, x):
preact = self.relu(self.bn0(x))
if self.shortcut is not None:
shortcut = self.shortcut(preact) # e.g., stride = 2
else:
shortcut = self.maxpool2d(x)
residual = self.relu(self.bn1(self.conv1(preact)))
residual = self.relu(self.bn2(self.conv2(residual)))
residual = self.conv3(residual)
output = shortcut + residual
return output
class ResNet_V2(nn.Module):
def __init__(self, block, layers, num_classes=None, global_pool = True,
isFetchDictForDebug = False):
self.isFetchDictForDebug = isFetchDictForDebug
self.inplanes = 64
self.expansion = 4
super(ResNet_V2, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True)
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
#self.bn1 = nn.BatchNorm2d(64)
#self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
#Updated to implement 'same' padding in tensorflow; do manually padding to bottom and right,
# then apply the follwoing maxpool with padding = 0 as its argument;
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
# padding size: starting from the last dimension and moving forward;
self.maxpool_pad = (0,1,0,1)# i.e, (padding_left, padding_right, padding_top, padding_bottom)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
self.postnorm = nn.BatchNorm2d(512*self.expansion)
self.relu = nn.ReLU(inplace=True)
#self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output is of size 1 x 1 here;
self.global_pool = global_pool
#Note: in HMR project, we set `num_classes=None`;
if num_classes is not None:
self.fc = nn.Linear(512 * block.expansion, num_classes)
else:
self.fc = None
#leave it here FYI:
#for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# the new version is shown below:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#def __init__(self, cin, cout, stride=1):
def _make_layer(self, block, planes, numBlocks, stride):
expansion = block.expansion
layers = []
for i in range(0, numBlocks):
cur_inplanes = planes * expansion if i > 0 else self.inplanes
tmp_stride = 1 if i < (numBlocks - 1) else stride
layers.append(block(cur_inplanes, planes*expansion, tmp_stride))
#update self.inplanes = output planes, for next incoming Residual block, with new palnes #;
self.inplanes = planes * expansion
return nn.Sequential(*layers)
def forward(self, x):
fetch_dict = {}
x = self.conv1(x)
fetch_dict['x_conv1'] = x
#Updated to implement 'same' padding in tensorflow; do manually padding to bottom and right,
# then apply the follwoing maxpool with padding = 0 as its argument;
x = F.pad(x, pad = self.maxpool_pad, mode = 'constant', value = 0)
x = self.maxpool(x)
fetch_dict['x_maxpool'] = x
x = self.layer1(x)
fetch_dict['x_layer1'] = x
x = self.layer2(x)
fetch_dict['x_layer2'] = x
x = self.layer3(x)
fetch_dict['x_layer3'] = x
x = self.layer4(x)
fetch_dict['x_layer4'] = x
x = self.postnorm(x)
#Updated on 2019/10/30: missing the relu added!!!
x = self.relu(x)
fetch_dict['x_postnorm'] = x
if self.global_pool:
x = torch.mean(x, dim=[2,3], keepdim = True)
fetch_dict['x_global_pool'] = x
if self.fc is not None:
x = self.fc(torch.flatten(x,1))
if self.isFetchDictForDebug:
return x, fetch_dict
else:
return x
def resnet_v2_50(num_classes=None, global_pool = True, isFetchDictForDebug = False):
model = ResNet_V2(Bottleneck_V2, [3,4,6,3],num_classes, global_pool, isFetchDictForDebug)
return model
def get_tf2pt_key_map_dict():
map_dict = {
'' : '',
# for root block: conv1 --> pool1
# that is: input x --> (conv1 --> pool1 )--> (residual-block1,2,3,4) --> postnorm --> global avg-pool --> output
'conv1/weights' : 'conv1.weight',
'conv1/biases' : 'conv1.bias',
# for post norm:
'postnorm/beta': 'postnorm.bias',
'postnorm/gamma': 'postnorm.weight',
'postnorm/moving_mean': 'postnorm.running_mean',
'postnorm/moving_variance': 'postnorm.running_var',
}
# processing tf_key_1
blks = [(1,3), (2,4), (3,6), (4,3)]
for t in blks:
b_idx = t[0]
for u_idx in range(t[1]):
key = 'block{}/unit_{}'.format(b_idx, u_idx + 1)
vaule = 'layer{}.{}'.format(b_idx, u_idx )
map_dict[key] = vaule
# processing tf_key_2
#Example: (tf_key, pt_key)
bottleneck_tf_pt_tuples = [
# Note: 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/beta/Adam':
# 'Adam' is related to Adam Optimization, so here we do not use it!!!
# Pre-Act: bn0"""
# BN: out = gamma * X_norm + beta, so beta is bias, gamma is weight;
['preact/gamma','bn0.weight'],
['preact/beta', 'bn0.bias'],
['preact/moving_mean', 'bn0.running_mean'],
['preact/moving_variance', 'bn0.running_var'],
#conv1 + bn1 + relu1
['conv1/weights', 'conv1.weight'],
['conv1/BatchNorm/gamma', 'bn1.weight'],
['conv1/BatchNorm/beta', 'bn1.bias'],
['conv1/BatchNorm/moving_mean', 'bn1.running_mean'],
['conv1/BatchNorm/moving_variance', 'bn1.running_var'],
#conv2 + bn2 + relu2
['conv2/weights', 'conv2.weight'],
['conv2/BatchNorm/gamma', 'bn2.weight'],
['conv2/BatchNorm/beta', 'bn2.bias'],
['conv2/BatchNorm/moving_mean', 'bn2.running_mean'],
['conv2/BatchNorm/moving_variance', 'bn2.running_var'],
#conv3
['conv3/weights', 'conv3.weight'],
['conv3/biases', 'conv3.bias'],
#shortcut
['shortcut/weights', 'shortcut.weight'],
['shortcut/biases', 'shortcut.bias'],
]
for cur_tuple in bottleneck_tf_pt_tuples:
map_dict[cur_tuple[0]] = cur_tuple[1]
#print (map_dict)
return map_dict
def map_tf_dictKeys_2PyTorch_dictKeys( map_dict,
tf_key = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/BatchNorm/beta'):
# E.g.:
# tf_key = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/BatchNorm/beta'
# or tf_key = 'resnet_v2_50/conv1/biases'
# 1) skip the first part : 'resnet_v2_50'
tf_key = tf_key[len('resnet_v2_50')+1:]
# 2) find 'bottleneck_v2' if exists, and pick the part before and after 'bottleneck_v2'
pos = tf_key.find('bottleneck_v2')
if pos > 0: # if found 'bottleneck_v2'
tf_key_1, tf_key_2 = tf_key[0:pos-1], tf_key[pos+1+len('bottleneck_v2'):]
else: # no found 'bottleneck_v2'
tf_key_1, tf_key_2 = '', tf_key
# processing tf_key_1
#print (tf_key_1)
pt_key_1 = map_dict[tf_key_1]
#print (pt_key_1)
#print (tf_key_2)
pt_key_2 = map_dict[tf_key_2]
#print (pt_key_2)
if pt_key_1 == '':
pt_key = pt_key_2
else:
pt_key = pt_key_1 + '.' + pt_key_2
#print ("[***] {} --> {}".format(tf_key, pt_key))
return pt_key
#>see https://stackoverflow.com/questions/51628607/pytorch-passing-numpy-array-for-weight-initialization
def set_resnet_parameter_data(layer, parameter_name, new_torch_data):
param = getattr(layer, parameter_name)
param.data = new_torch_data
def pass_np_model_state_to_resnet(src_np_model_state_dict, dst_resnet_model):
map_dict = get_tf2pt_key_map_dict()
dst_state_dict = dst_resnet_model.state_dict()
n_valid = 0
n_adam = 0
tf_var_names = list(src_np_model_state_dict['resnet_v2_50_names'])
N = len(tf_var_names)
for tf_key in sorted(src_np_model_state_dict.keys()):
# Note: 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/beta/Adam':
# 'Adam' is related to Adam Optimization, so here we do not use it!!!
param = src_np_model_state_dict[tf_key]
if 'Adam' in tf_key:
#print('Adam! {} is only for Adam Optimization, not uesed here!!'.format(tf_key))
n_adam += 1
tf_var_names.remove(tf_key)
continue
elif 'resnet_v2_50_names' == tf_key:
continue
pt_key = map_tf_dictKeys_2PyTorch_dictKeys(map_dict, tf_key)
if pt_key not in dst_state_dict:
print('unexpected ', pt_key, ' !')
continue
if not isinstance(param, np.ndarray):
raise ValueError('Expected a np.ndarray')
else:
# !!! Note: added by CCJ on 2019/10/24;
# tensorflow conv2d weight in size of [kernel_size[0], kernel_size[1], in_channels, out_channels],
# e.g., weight in size [7,7,3,64] means applying 7x7-kernel-size convolution to input image with 3 channel
# and output channel is 64;
# While, PyTorch will have its weight in shape [out_channels, in_channels/groups, kernel_size[0], kernel_size[1]],
# here we assume gropus = 1;
if param.ndim == 4:
param = np.transpose(param, [3,2,0,1])
param = torch.from_numpy(param).contiguous()
try:
dst_state_dict[pt_key].copy_(param)
n_valid += 1
tf_var_names.remove(tf_key)
except:
print(pt_key, ' is inconsistent!')
print ('src np.ndarray in shape {}, dst tensor in shape {}'.format(param.shape,
dst_state_dict[pt_key].shape))
n_valid -= 1
tf_var_names.append(tf_key)
continue
print('%d out of %d variables processed! Wherein:'%(n_valid + n_adam, N))
print(' [***] Copyed state dict for %d variables and finished!' %n_valid)
print(' [***] Skip %d adam variables, which are related to Adam optimaization state' %(n_adam))
print(' [***] {} variables are left unprocessed!'.format(len(tf_var_names)))
if n_valid + n_adam == N:
print (" [***] Resnet_V2_50 loading Numpy weights Succeed!!!")
else:
print (" [***] Resnet_V2_50 loading Numpy weights Failed !!!")
#print('[***] Including: ', tf_var_names)
def load_Res50ModelFromNpyFile(npy_file = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.npy'):
dst_resnet_model = resnet_v2_50()
assert (npy_file is not None)
# this npy file is generated by Python2, due to Tensorflow is installed in Python2;
# load this npy file (generated by Python2) to Python3, due to PyTorch is installed in Python3;
src_np_model_state_dict = np.load(npy_file, allow_pickle= True, encoding = 'latin1').item()
#tmp_name = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/weights'
# check the variable dimensionality
# print should be : [3, 3, 512, 512];
#print(src_np_model_state_dict[tmp_name].shape)
pass_np_model_state_to_resnet(src_np_model_state_dict, dst_resnet_model)
return dst_resnet_model
if __name__ == '__main__':
if 0:
print ('resnet_v2_50 state_dict():')
n = 0
for k,v in resnet_v2_50().state_dict().items():
print (k, v.shape)
n += 1
print (n)
if 0:
npy_file = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.npy'
resnet_dict2 = np.load(npy_file, allow_pickle= True, encoding = 'latin1').item()
print ('loaded var_names : ', resnet_dict2['resnet_v2_50_names'])
tmp_name = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/weights'
# check the variable dimensionality
# print should be : [3, 3, 512, 512];
print (resnet_dict2[tmp_name].shape)
if 1:
# this npy file is generated by Python2, due to Tensorflow is installed in Python2;
npy_file = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.npy'
# load this npy file (generated by Python2) to Python3, due to PyTorch is installed in Python3;
dst_resnet_model = load_Res50ModelFromNpyFile(npy_file)
dst_state_dict = dst_resnet_model.state_dict()
model_path = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.pt'
torch.save(dst_state_dict, model_path)
print ('saved %s' % model_path)
#n = 0
#for k,v in dst_state_dict.items():
# print (k, v.shape)
# n += 1
#print (n)
if 1:
# get a new model
resnet_v2_50 = resnet_v2_50()
model_path = '/home/ccj/hmr-rgbd/results/saved_weights/hmr_pre_trained_resnet_v2_50.pt'
# load the weights
resnet_v2_50.load_state_dict(torch.load(model_path))
print ('Loading %s' % model_path)
| true | true |
f727081df263bc130ba55eb6cf42a0583ef84e06 | 543 | py | Python | problems/chapter05/Ysi/dp_c.py | tokuma09/algorithm_problems | 58534620df73b230afbeb12de126174362625a78 | [
"CC0-1.0"
] | 1 | 2021-07-07T15:46:58.000Z | 2021-07-07T15:46:58.000Z | problems/chapter05/Ysi/dp_c.py | tokuma09/algorithm_problems | 58534620df73b230afbeb12de126174362625a78 | [
"CC0-1.0"
] | 5 | 2021-06-05T14:16:41.000Z | 2021-07-10T07:08:28.000Z | problems/chapter05/Ysi/dp_c.py | tokuma09/algorithm_problems | 58534620df73b230afbeb12de126174362625a78 | [
"CC0-1.0"
] | null | null | null | def main():
n = int(input())
welfare = []
for i in range(n):
a, b, c = map(int, input().split())
welfare.append([a, b, c])
dp = [[0, 0, 0] for _ in range(n+1)]
for i in range(1, n+1):
dp[i][0] = max(dp[i-1][1] + welfare[i-1][0], dp[i-1][2] + welfare[i-1][0])
dp[i][1] = max(dp[i-1][0] + welfare[i-1][1], dp[i-1][2] + welfare[i-1][1])
dp[i][2] = max(dp[i-1][0] + welfare[i-1][2], dp[i-1][1] + welfare[i-1][2])
ans = max(dp[n])
print(ans)
if __name__=='__main__':
main() | 30.166667 | 82 | 0.464088 | def main():
n = int(input())
welfare = []
for i in range(n):
a, b, c = map(int, input().split())
welfare.append([a, b, c])
dp = [[0, 0, 0] for _ in range(n+1)]
for i in range(1, n+1):
dp[i][0] = max(dp[i-1][1] + welfare[i-1][0], dp[i-1][2] + welfare[i-1][0])
dp[i][1] = max(dp[i-1][0] + welfare[i-1][1], dp[i-1][2] + welfare[i-1][1])
dp[i][2] = max(dp[i-1][0] + welfare[i-1][2], dp[i-1][1] + welfare[i-1][2])
ans = max(dp[n])
print(ans)
if __name__=='__main__':
main() | true | true |
f7270905c7aba4a402b7cd24c6eb95248f25ce9c | 1,368 | py | Python | setup.py | RevengeComing/DemonHunter | 8ab5fc0e8e4f33c3e299cba78555f33b96cc28d8 | [
"MIT"
] | 52 | 2017-02-06T10:43:42.000Z | 2022-03-06T02:21:57.000Z | setup.py | RevengeComing/DemonHunter | 8ab5fc0e8e4f33c3e299cba78555f33b96cc28d8 | [
"MIT"
] | 4 | 2017-05-03T23:28:43.000Z | 2018-05-16T18:40:28.000Z | setup.py | RevengeComing/DemonHunter | 8ab5fc0e8e4f33c3e299cba78555f33b96cc28d8 | [
"MIT"
] | 10 | 2017-05-03T23:18:45.000Z | 2022-03-31T13:51:06.000Z | from setuptools import setup, find_packages
long_description = """
DemonHunter is a framework to create a Honeypot network very simple and easy.
"""
requirements = [
"httptools==0.0.11",
"aiohttp==2.3.10",
"bcrypt==3.1.4",
"flask==0.12.2",
"flask-login==0.4.1",
"flask-sqlalchemy==2.3.2",
"flask-sockets==0.2.1",
"meinheld==0.6.1",
"click==6.7",
]
setup(
name='demonhunter',
version='2.0.3',
description='A Distributed Honeypot',
long_description=long_description,
url='https://github.com/RevengeComing/DemonHunter',
author='Sepehr Hamzelooy',
author_email='s.hamzelooy@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
install_requires=requirements,
packages=find_packages(),
keywords='honeypot honeynet agent',
scripts = [
'bin/dh_run'
],
package_data = {
'': ['*.html', '*.js', '*.css'],
'demonhunter': [
'nodes/honeypots/http/nginx/*.html',
'nodes/honeypots/http/apache/*.html',
'nodes/master/templates/*',
'nodes/master/static/css/*',
'nodes/master/static/js/*'
],
}
) | 23.186441 | 77 | 0.574561 | from setuptools import setup, find_packages
long_description = """
DemonHunter is a framework to create a Honeypot network very simple and easy.
"""
requirements = [
"httptools==0.0.11",
"aiohttp==2.3.10",
"bcrypt==3.1.4",
"flask==0.12.2",
"flask-login==0.4.1",
"flask-sqlalchemy==2.3.2",
"flask-sockets==0.2.1",
"meinheld==0.6.1",
"click==6.7",
]
setup(
name='demonhunter',
version='2.0.3',
description='A Distributed Honeypot',
long_description=long_description,
url='https://github.com/RevengeComing/DemonHunter',
author='Sepehr Hamzelooy',
author_email='s.hamzelooy@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
install_requires=requirements,
packages=find_packages(),
keywords='honeypot honeynet agent',
scripts = [
'bin/dh_run'
],
package_data = {
'': ['*.html', '*.js', '*.css'],
'demonhunter': [
'nodes/honeypots/http/nginx/*.html',
'nodes/honeypots/http/apache/*.html',
'nodes/master/templates/*',
'nodes/master/static/css/*',
'nodes/master/static/js/*'
],
}
) | true | true |
f727096ddb4e3b582b8a50d866549fed8ea616db | 2,026 | py | Python | exp/python_c3_class_mro/python_c3_mro_anler.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 41 | 2016-01-21T05:14:45.000Z | 2021-11-24T20:37:21.000Z | exp/python_c3_class_mro/python_c3_mro_anler.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 5 | 2016-01-21T05:36:37.000Z | 2016-08-22T19:26:51.000Z | exp/python_c3_class_mro/python_c3_mro_anler.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 3 | 2016-01-23T04:03:44.000Z | 2016-08-21T15:58:38.000Z | # taken from https://gist.github.com/anler/1144867
def C3(cls, *mro_lists):
"""Implementation of the Python's C3 Algorithm.
Notes:
* The order of items in an MRO should be preserved in all of
its future subclasses
"""
import itertools
# Make a copy so we don't change existing content
mro_lists = [list(mro_list[:]) for mro_list in mro_lists]
# Set up the new MRO with the class itself
mro = [cls]
# The real algorithm goes here
while True:
# Reset for the next round of tests
candidate_found = False
for mro_list in mro_lists:
if not len(mro_list):
# Any empty lists are of no use to the algorithm
continue
# Get the first item as a potential candidate for the MRO
candidate = mro_list[0]
if candidate_found:
# Candidates promoted to the MRO are no longer of use
if candidate in mro:
mro_list.pop(0)
# Don't bother checking any more candidates if one was found
continue
# See if it's in any position other than fist in any of the other lists
if candidate in itertools.chain(*(x[1:] for x in mro_lists)):
# Isn't a valid candidate yet and we need to move on to the first class
# in the next list
continue
else:
# The candidate is valid and should be promoted to the MRO
mro.append(candidate)
mro_list.pop(0)
candidate_found = True
if not sum(len(mro_list) for mro_list in mro_lists):
# There are no MROs to cycle through, so we're all done
break
if not candidate_found:
# No valid candidate was available, so we have to bail out
raise TypeError("Inconsistent MRO")
return tuple(mro) | 36.836364 | 87 | 0.55923 |
def C3(cls, *mro_lists):
import itertools
mro_lists = [list(mro_list[:]) for mro_list in mro_lists]
# Set up the new MRO with the class itself
mro = [cls]
# The real algorithm goes here
while True:
# Reset for the next round of tests
candidate_found = False
for mro_list in mro_lists:
if not len(mro_list):
# Any empty lists are of no use to the algorithm
continue
# Get the first item as a potential candidate for the MRO
candidate = mro_list[0]
if candidate_found:
# Candidates promoted to the MRO are no longer of use
if candidate in mro:
mro_list.pop(0)
# Don't bother checking any more candidates if one was found
continue
if candidate in itertools.chain(*(x[1:] for x in mro_lists)):
# Isn't a valid candidate yet and we need to move on to the first class
continue
else:
mro.append(candidate)
mro_list.pop(0)
candidate_found = True
if not sum(len(mro_list) for mro_list in mro_lists):
break
if not candidate_found:
# No valid candidate was available, so we have to bail out
raise TypeError("Inconsistent MRO")
return tuple(mro) | true | true |
f72709c4742158734a8a8151b8c373a41c265cb7 | 13,728 | py | Python | jc/parsers/netstat.py | shaikustin/jc | b59e38cfd2c8a7f5868e05d5562557b1c27e5e56 | [
"MIT"
] | 3,215 | 2019-10-24T15:25:56.000Z | 2022-03-31T15:43:01.000Z | jc/parsers/netstat.py | shaikustin/jc | b59e38cfd2c8a7f5868e05d5562557b1c27e5e56 | [
"MIT"
] | 109 | 2019-11-02T16:22:29.000Z | 2022-03-30T17:32:17.000Z | jc/parsers/netstat.py | shaikustin/jc | b59e38cfd2c8a7f5868e05d5562557b1c27e5e56 | [
"MIT"
] | 75 | 2020-02-07T00:16:32.000Z | 2022-03-29T09:29:53.000Z | """jc - JSON CLI output utility `netstat` command output parser
Caveats:
- Use of multiple `l` options is not supported on OSX (e.g. `netstat -rlll`)
- Use of the `A` option is not supported on OSX when using the `r` option (e.g. `netstat -rA`)
Usage (cli):
$ netstat | jc --netstat
or
$ jc netstat
Usage (module):
import jc.parsers.netstat
result = jc.parsers.netstat.parse(netstat_command_output)
Schema:
[
{
"proto": string,
"recv_q": integer,
"send_q": integer,
"transport_protocol" string,
"network_protocol": string,
"local_address": string,
"local_port": string,
"local_port_num": integer,
"foreign_address": string,
"foreign_port": string,
"foreign_port_num": integer,
"state": string,
"program_name": string,
"pid": integer,
"user": string,
"security_context": string,
"refcnt": integer,
"flags": string,
"type": string,
"inode": integer,
"path": string,
"kind": string,
"address": string,
"unix_inode": string,
"conn": string,
"refs": string,
"nextref": string,
"name": string,
"unit": integer,
"vendor": integer,
"class": integer,
"subcla": integer,
"unix_flags": integer,
"pcbcount": integer,
"rcvbuf": integer,
"sndbuf": integer,
"rxbytes": integer,
"txbytes": integer,
"destination": string,
"gateway": string,
"route_flags": string,
"route_flags_pretty": [
string,
]
"route_refs": integer,
"use": integer,
"mtu": integer,
"expire": string,
"genmask": string,
"mss": integer,
"window": integer,
"irtt": integer,
"iface": string,
"metric": integer,
"network": string,
"address": string,
"ipkts": integer, # - = null
"ierrs": integer, # - = null
"idrop": integer, # - = null
"opkts": integer, # - = null
"oerrs": integer, # - = null
"coll": integer, # - = null
"rx_ok": integer,
"rx_err": integer,
"rx_drp": integer,
"rx_ovr": integer,
"tx_ok": integer,
"tx_err": integer,
"tx_drp": integer,
"tx_ovr": integer,
"flg": string,
"ibytes": integer,
"obytes": integer,
"r_mbuf": integer,
"s_mbuf": integer,
"r_clus": integer,
"s_clus": integer,
"r_hiwa": integer,
"s_hiwa": integer,
"r_lowa": integer,
"s_lowa": integer,
"r_bcnt": integer,
"s_bcnt": integer,
"r_bmax": integer,
"s_bmax": integer,
"rexmit": integer,
"ooorcv": integer,
"0_win": integer,
"rexmt": float,
"persist": float,
"keep": float,
"2msl": float,
"delack": float,
"rcvtime": float,
}
]
Examples:
# netstat -apee | jc --netstat -p
[
{
"proto": "tcp",
"recv_q": 0,
"send_q": 0,
"local_address": "localhost",
"foreign_address": "0.0.0.0",
"state": "LISTEN",
"user": "systemd-resolve",
"inode": 26958,
"program_name": "systemd-resolve",
"kind": "network",
"pid": 887,
"local_port": "domain",
"foreign_port": "*",
"transport_protocol": "tcp",
"network_protocol": "ipv4"
},
{
"proto": "tcp",
"recv_q": 0,
"send_q": 0,
"local_address": "0.0.0.0",
"foreign_address": "0.0.0.0",
"state": "LISTEN",
"user": "root",
"inode": 30499,
"program_name": "sshd",
"kind": "network",
"pid": 1186,
"local_port": "ssh",
"foreign_port": "*",
"transport_protocol": "tcp",
"network_protocol": "ipv4"
},
{
"proto": "tcp",
"recv_q": 0,
"send_q": 0,
"local_address": "localhost",
"foreign_address": "localhost",
"state": "ESTABLISHED",
"user": "root",
"inode": 46829,
"program_name": "sshd: root",
"kind": "network",
"pid": 2242,
"local_port": "ssh",
"foreign_port": "52186",
"transport_protocol": "tcp",
"network_protocol": "ipv4",
"foreign_port_num": 52186
},
{
"proto": "tcp",
"recv_q": 0,
"send_q": 0,
"local_address": "localhost",
"foreign_address": "localhost",
"state": "ESTABLISHED",
"user": "root",
"inode": 46828,
"program_name": "ssh",
"kind": "network",
"pid": 2241,
"local_port": "52186",
"foreign_port": "ssh",
"transport_protocol": "tcp",
"network_protocol": "ipv4",
"local_port_num": 52186
},
{
"proto": "tcp6",
"recv_q": 0,
"send_q": 0,
"local_address": "[::]",
"foreign_address": "[::]",
"state": "LISTEN",
"user": "root",
"inode": 30510,
"program_name": "sshd",
"kind": "network",
"pid": 1186,
"local_port": "ssh",
"foreign_port": "*",
"transport_protocol": "tcp",
"network_protocol": "ipv6"
},
{
"proto": "udp",
"recv_q": 0,
"send_q": 0,
"local_address": "localhost",
"foreign_address": "0.0.0.0",
"state": null,
"user": "systemd-resolve",
"inode": 26957,
"program_name": "systemd-resolve",
"kind": "network",
"pid": 887,
"local_port": "domain",
"foreign_port": "*",
"transport_protocol": "udp",
"network_protocol": "ipv4"
},
{
"proto": "raw6",
"recv_q": 0,
"send_q": 0,
"local_address": "[::]",
"foreign_address": "[::]",
"state": "7",
"user": "systemd-network",
"inode": 27001,
"program_name": "systemd-network",
"kind": "network",
"pid": 867,
"local_port": "ipv6-icmp",
"foreign_port": "*",
"transport_protocol": null,
"network_protocol": "ipv6"
},
{
"proto": "unix",
"refcnt": 2,
"flags": null,
"type": "DGRAM",
"state": null,
"inode": 33322,
"program_name": "systemd",
"path": "/run/user/1000/systemd/notify",
"kind": "socket",
"pid": 1607
},
{
"proto": "unix",
"refcnt": 2,
"flags": "ACC",
"type": "SEQPACKET",
"state": "LISTENING",
"inode": 20835,
"program_name": "init",
"path": "/run/udev/control",
"kind": "socket",
"pid": 1
},
...
]
$ netstat -r | jc --netstat -p
[
{
"destination": "default",
"gateway": "gateway",
"genmask": "0.0.0.0",
"route_flags": "UG",
"mss": 0,
"window": 0,
"irtt": 0,
"iface": "ens33",
"kind": "route",
"route_flags_pretty": [
"UP",
"GATEWAY"
]
},
{
"destination": "172.17.0.0",
"gateway": "0.0.0.0",
"genmask": "255.255.0.0",
"route_flags": "U",
"mss": 0,
"window": 0,
"irtt": 0,
"iface": "docker0",
"kind": "route",
"route_flags_pretty": [
"UP"
]
},
{
"destination": "192.168.71.0",
"gateway": "0.0.0.0",
"genmask": "255.255.255.0",
"route_flags": "U",
"mss": 0,
"window": 0,
"irtt": 0,
"iface": "ens33",
"kind": "route",
"route_flags_pretty": [
"UP"
]
}
]
$ netstat -i | jc --netstat -p
[
{
"iface": "ens33",
"mtu": 1500,
"rx_ok": 476,
"rx_err": 0,
"rx_drp": 0,
"rx_ovr": 0,
"tx_ok": 312,
"tx_err": 0,
"tx_drp": 0,
"tx_ovr": 0,
"flg": "BMRU",
"kind": "interface"
},
{
"iface": "lo",
"mtu": 65536,
"rx_ok": 0,
"rx_err": 0,
"rx_drp": 0,
"rx_ovr": 0,
"tx_ok": 0,
"tx_err": 0,
"tx_drp": 0,
"tx_ovr": 0,
"flg": "LRU",
"kind": "interface"
}
]
"""
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.10'
description = '`netstat` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin', 'freebsd']
magic_commands = ['netstat']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
for entry in proc_data:
# integer and float conversions
int_list = ['recv_q', 'send_q', 'pid', 'refcnt', 'inode', 'unit', 'vendor', 'class',
'osx_flags', 'subcla', 'pcbcount', 'rcvbuf', 'sndbuf', 'rxbytes', 'txbytes',
'route_refs', 'use', 'mtu', 'mss', 'window', 'irtt', 'metric', 'ipkts',
'ierrs', 'opkts', 'oerrs', 'coll', 'rx_ok', 'rx_err', 'rx_drp', 'rx_ovr',
'tx_ok', 'tx_err', 'tx_drp', 'tx_ovr', 'idrop', 'ibytes', 'obytes', 'r_mbuf',
's_mbuf', 'r_clus', 's_clus', 'r_hiwa', 's_hiwa', 'r_lowa', 's_lowa', 'r_bcnt',
's_bcnt', 'r_bmax', 's_bmax', 'rexmit', 'ooorcv', '0_win']
float_list = ['rexmt', 'persist', 'keep', '2msl', 'delack', 'rcvtime']
for key in entry:
if key in int_list:
entry[key] = jc.utils.convert_to_int(entry[key])
if key in float_list:
entry[key] = jc.utils.convert_to_float(entry[key])
# add number keys
if 'local_port' in entry:
local_num = jc.utils.convert_to_int(entry['local_port'])
if local_num:
entry['local_port_num'] = local_num
if 'foreign_port' in entry:
foreign_num = jc.utils.convert_to_int(entry['foreign_port'])
if foreign_num:
entry['foreign_port_num'] = foreign_num
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
import jc.utils
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = list(filter(None, data.splitlines()))
raw_output = []
if jc.utils.has_data(data):
# check for FreeBSD/OSX vs Linux
# is this from FreeBSD/OSX?
if cleandata[0] == 'Active Internet connections' \
or cleandata[0] == 'Active Internet connections (including servers)' \
or cleandata[0] == 'Active Multipath Internet connections' \
or cleandata[0] == 'Active LOCAL (UNIX) domain sockets' \
or cleandata[0] == 'Registered kernel control modules' \
or cleandata[0] == 'Active kernel event sockets' \
or cleandata[0] == 'Active kernel control sockets' \
or cleandata[0] == 'Routing tables' \
or cleandata[0].startswith('Name '):
import jc.parsers.netstat_freebsd_osx
raw_output = jc.parsers.netstat_freebsd_osx.parse(cleandata)
# use linux parser
else:
import jc.parsers.netstat_linux
raw_output = jc.parsers.netstat_linux.parse(cleandata)
if raw:
return raw_output
else:
return _process(raw_output)
| 29.908497 | 99 | 0.436844 | import jc.utils
class info():
version = '1.10'
description = '`netstat` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux', 'darwin', 'freebsd']
magic_commands = ['netstat']
__version__ = info.version
def _process(proc_data):
for entry in proc_data:
int_list = ['recv_q', 'send_q', 'pid', 'refcnt', 'inode', 'unit', 'vendor', 'class',
'osx_flags', 'subcla', 'pcbcount', 'rcvbuf', 'sndbuf', 'rxbytes', 'txbytes',
'route_refs', 'use', 'mtu', 'mss', 'window', 'irtt', 'metric', 'ipkts',
'ierrs', 'opkts', 'oerrs', 'coll', 'rx_ok', 'rx_err', 'rx_drp', 'rx_ovr',
'tx_ok', 'tx_err', 'tx_drp', 'tx_ovr', 'idrop', 'ibytes', 'obytes', 'r_mbuf',
's_mbuf', 'r_clus', 's_clus', 'r_hiwa', 's_hiwa', 'r_lowa', 's_lowa', 'r_bcnt',
's_bcnt', 'r_bmax', 's_bmax', 'rexmit', 'ooorcv', '0_win']
float_list = ['rexmt', 'persist', 'keep', '2msl', 'delack', 'rcvtime']
for key in entry:
if key in int_list:
entry[key] = jc.utils.convert_to_int(entry[key])
if key in float_list:
entry[key] = jc.utils.convert_to_float(entry[key])
if 'local_port' in entry:
local_num = jc.utils.convert_to_int(entry['local_port'])
if local_num:
entry['local_port_num'] = local_num
if 'foreign_port' in entry:
foreign_num = jc.utils.convert_to_int(entry['foreign_port'])
if foreign_num:
entry['foreign_port_num'] = foreign_num
return proc_data
def parse(data, raw=False, quiet=False):
import jc.utils
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = list(filter(None, data.splitlines()))
raw_output = []
if jc.utils.has_data(data):
if cleandata[0] == 'Active Internet connections' \
or cleandata[0] == 'Active Internet connections (including servers)' \
or cleandata[0] == 'Active Multipath Internet connections' \
or cleandata[0] == 'Active LOCAL (UNIX) domain sockets' \
or cleandata[0] == 'Registered kernel control modules' \
or cleandata[0] == 'Active kernel event sockets' \
or cleandata[0] == 'Active kernel control sockets' \
or cleandata[0] == 'Routing tables' \
or cleandata[0].startswith('Name '):
import jc.parsers.netstat_freebsd_osx
raw_output = jc.parsers.netstat_freebsd_osx.parse(cleandata)
else:
import jc.parsers.netstat_linux
raw_output = jc.parsers.netstat_linux.parse(cleandata)
if raw:
return raw_output
else:
return _process(raw_output)
| true | true |
f7270a9ece60d01e3332a67758dc9efe26f5976e | 3,799 | py | Python | tests/test_02_app/test_custom_app.py | hairychris/uvicorn-gunicorn-docker | 5c1f3538b14a52676e0723497e1f65947382888b | [
"MIT"
] | null | null | null | tests/test_02_app/test_custom_app.py | hairychris/uvicorn-gunicorn-docker | 5c1f3538b14a52676e0723497e1f65947382888b | [
"MIT"
] | null | null | null | tests/test_02_app/test_custom_app.py | hairychris/uvicorn-gunicorn-docker | 5c1f3538b14a52676e0723497e1f65947382888b | [
"MIT"
] | null | null | null | import time
from pathlib import Path, PurePath
import docker
import pytest
import requests
from ..utils import (
CONTAINER_NAME,
IMAGE_NAME,
get_config,
get_logs,
remove_previous_container,
)
client = docker.from_env()
def verify_container(container, response_text):
config_data = get_config(container)
assert config_data["workers_per_core"] == 1
assert config_data["host"] == "0.0.0.0"
assert config_data["port"] == "80"
assert config_data["loglevel"] == "info"
assert config_data["workers"] >= 2
assert config_data["bind"] == "0.0.0.0:80"
logs = get_logs(container)
assert "Checking for script in /app/prestart.sh" in logs
assert "Running script /app/prestart.sh" in logs
assert (
"Running inside /app/prestart.sh, you could add migrations to this file" in logs
)
response = requests.get("http://127.0.0.1:8000")
assert response.text == response_text
@pytest.mark.parametrize(
"dockerfile,environment,response_text",
[
(
"python3.6.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"latest.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"python3.6-alpine3.9.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7-alpine3.9.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"python3.6.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"latest.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"python3.6-alpine3.9.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7-alpine3.9.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
],
)
def test_custom_app(dockerfile, environment, response_text):
remove_previous_container(client)
test_path: PurePath = Path(__file__)
path = test_path.parent / "custom_app"
client.images.build(path=str(path), dockerfile=dockerfile, tag=IMAGE_NAME)
container = client.containers.run(
IMAGE_NAME,
name=CONTAINER_NAME,
environment=environment,
ports={"80": "8000"},
detach=True,
)
time.sleep(1)
verify_container(container, response_text)
container.stop()
# Test that everything works after restarting too
container.start()
time.sleep(1)
verify_container(container, response_text)
container.stop()
container.remove()
| 33.619469 | 88 | 0.609371 | import time
from pathlib import Path, PurePath
import docker
import pytest
import requests
from ..utils import (
CONTAINER_NAME,
IMAGE_NAME,
get_config,
get_logs,
remove_previous_container,
)
client = docker.from_env()
def verify_container(container, response_text):
config_data = get_config(container)
assert config_data["workers_per_core"] == 1
assert config_data["host"] == "0.0.0.0"
assert config_data["port"] == "80"
assert config_data["loglevel"] == "info"
assert config_data["workers"] >= 2
assert config_data["bind"] == "0.0.0.0:80"
logs = get_logs(container)
assert "Checking for script in /app/prestart.sh" in logs
assert "Running script /app/prestart.sh" in logs
assert (
"Running inside /app/prestart.sh, you could add migrations to this file" in logs
)
response = requests.get("http://127.0.0.1:8000")
assert response.text == response_text
@pytest.mark.parametrize(
"dockerfile,environment,response_text",
[
(
"python3.6.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"latest.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"python3.6-alpine3.9.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7-alpine3.9.dockerfile",
{"MODULE_NAME": "custom_app.custom_main", "VARIABLE_NAME": "custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"python3.6.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"latest.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"python3.6-alpine3.9.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"python3.7-alpine3.9.dockerfile",
{"APP_MODULE": "custom_app.custom_main:custom_var"},
"Test app. From Uvicorn with Gunicorn. Using Python 3.7",
),
],
)
def test_custom_app(dockerfile, environment, response_text):
remove_previous_container(client)
test_path: PurePath = Path(__file__)
path = test_path.parent / "custom_app"
client.images.build(path=str(path), dockerfile=dockerfile, tag=IMAGE_NAME)
container = client.containers.run(
IMAGE_NAME,
name=CONTAINER_NAME,
environment=environment,
ports={"80": "8000"},
detach=True,
)
time.sleep(1)
verify_container(container, response_text)
container.stop()
container.start()
time.sleep(1)
verify_container(container, response_text)
container.stop()
container.remove()
| true | true |
f7270af0eb3dba69ec7cddb1fdb8c33f7344108d | 1,231 | py | Python | src/modules/agents/noisy_agents.py | mariuslindegaard/6.867_MARL_project | 572b88b4d491db8a1673535868f4bf9aff58f73d | [
"Apache-2.0"
] | 401 | 2021-02-23T02:42:42.000Z | 2022-03-21T08:22:37.000Z | src/modules/agents/noisy_agents.py | mariuslindegaard/6.867_MARL_project | 572b88b4d491db8a1673535868f4bf9aff58f73d | [
"Apache-2.0"
] | 21 | 2021-04-10T10:05:07.000Z | 2022-03-29T10:09:03.000Z | src/modules/agents/noisy_agents.py | mariuslindegaard/6.867_MARL_project | 572b88b4d491db8a1673535868f4bf9aff58f73d | [
"Apache-2.0"
] | 90 | 2021-02-15T08:37:04.000Z | 2022-03-21T06:37:15.000Z | import torch.nn as nn
import torch.nn.functional as F
from utils.noisy_liner import NoisyLinear
from torch.nn import LayerNorm
class NoisyRNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(NoisyRNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = NoisyLinear(args.rnn_hidden_dim, args.n_actions, True, args.device)
if getattr(args, "use_layer_norm", False):
self.layer_norm = LayerNorm(args.rnn_hidden_dim)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
b, a, e = inputs.size()
inputs = inputs.view(-1, e)
x = F.relu(self.fc1(inputs), inplace=True)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
hh = self.rnn(x, h_in)
if getattr(self.args, "use_layer_norm", False):
q = self.fc2(self.layer_norm(hh))
else:
q = self.fc2(hh)
return q.view(b, a, -1), hh.view(b, a, -1) | 35.171429 | 86 | 0.640942 | import torch.nn as nn
import torch.nn.functional as F
from utils.noisy_liner import NoisyLinear
from torch.nn import LayerNorm
class NoisyRNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(NoisyRNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = NoisyLinear(args.rnn_hidden_dim, args.n_actions, True, args.device)
if getattr(args, "use_layer_norm", False):
self.layer_norm = LayerNorm(args.rnn_hidden_dim)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
b, a, e = inputs.size()
inputs = inputs.view(-1, e)
x = F.relu(self.fc1(inputs), inplace=True)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
hh = self.rnn(x, h_in)
if getattr(self.args, "use_layer_norm", False):
q = self.fc2(self.layer_norm(hh))
else:
q = self.fc2(hh)
return q.view(b, a, -1), hh.view(b, a, -1) | true | true |
f7270bef7a963e5cdee0174d9826895442fbf65b | 11,468 | py | Python | modules/flow.py | aasensio/bayesDI | 4ddad57d89c3512b4c4ee5684ddc5608060ebdec | [
"MIT"
] | 2 | 2021-08-20T07:59:05.000Z | 2021-12-02T20:19:48.000Z | modules/flow.py | aasensio/bayesDI | 4ddad57d89c3512b4c4ee5684ddc5608060ebdec | [
"MIT"
] | null | null | null | modules/flow.py | aasensio/bayesDI | 4ddad57d89c3512b4c4ee5684ddc5608060ebdec | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn.functional as F
from nflows import transforms, distributions, flows, utils
import nflows.nn.nets as nn_
import matplotlib.pyplot as pl
from modules import resnet
# https://github.com/stephengreen/lfi-gw/blob/master/lfigw/nde_flows.py
def create_linear_transform(input_dim):
"""Create the composite linear transform PLU.
Arguments:
input_dim {int} -- dimension of the space
Returns:
Transform -- nde.Transform object
"""
permutation = transforms.RandomPermutation(features = input_dim)
linear = transforms.LULinear(input_dim, identity_init=True)
return transforms.CompositeTransform([permutation, linear])
def create_base_transform(i,
input_dim,
context_dim,
hidden_dim=512,
num_transform_blocks=2,
activation='relu',
dropout_probability=0.0,
batch_norm=False,
num_bins=8,
tail_bound=1.,
apply_unconditional_transform=False,
base_transform_type='rq-coupling',
transform_net='conv'):
"""Build a base NSF transform of x, conditioned on y.
This uses the PiecewiseRationalQuadraticCoupling transform or
the MaskedPiecewiseRationalQuadraticAutoregressiveTransform, as described
in the Neural Spline Flow paper (https://arxiv.org/abs/1906.04032).
Code is adapted from the uci.py example from
https://github.com/bayesiains/nsf.
A coupling flow fixes half the components of x, and applies a transform
to the remaining components, conditioned on the fixed components. This is
a restricted form of an autoregressive transform, with a single split into
fixed/transformed components.
The transform here is a neural spline flow, where the flow is parametrized
by a residual neural network that depends on x_fixed and y. The residual
network consists of a sequence of two-layer fully-connected blocks.
Arguments:
i {int} -- index of transform in sequence
param_dim {int} -- dimensionality of x
Keyword Arguments:
context_dim {int} -- dimensionality of y (default: {None})
hidden_dim {int} -- number of hidden units per layer (default: {512})
num_transform_blocks {int} -- number of transform blocks comprising the
transform (default: {2})
activation {str} -- activation function (default: {'relu'})
dropout_probability {float} -- probability of dropping out a unit
(default: {0.0})
batch_norm {bool} -- whether to use batch normalization
(default: {False})
num_bins {int} -- number of bins for the spline (default: {8})
tail_bound {[type]} -- [description] (default: {1.})
apply_unconditional_transform {bool} -- whether to apply an
unconditional transform to
fixed components
(default: {False})
base_transform_type {str} -- type of base transform
([rq-coupling], rq-autoregressive)
Returns:
Transform -- the NSF transform
"""
if activation == 'elu':
activation_fn = F.elu
elif activation == 'relu':
activation_fn = F.relu
elif activation == 'leaky_relu':
activation_fn = F.leaky_relu
else:
activation_fn = F.relu # Default
print('Invalid activation function specified. Using ReLU.')
if base_transform_type == 'rq-coupling':
mask = utils.create_alternating_binary_mask(input_dim, even=(i % 2 == 0))
if (transform_net == 'fc'):
transform_net = lambda in_features, out_features: nn_.ResidualNet(
in_features = in_features,
out_features = out_features,
hidden_features = hidden_dim,
context_features = context_dim,
num_blocks = num_transform_blocks,
activation = activation_fn,
dropout_probability = dropout_probability,
use_batch_norm = batch_norm)
if (transform_net == 'conv'):
transform_net = lambda in_features, out_features: resnet.ConvResidualNet1d(
in_channels = 1,
out_channels = out_features // in_features,
hidden_channels = hidden_dim,
context_channels = context_dim,
num_blocks = num_transform_blocks,
activation = activation_fn,
dropout_probability = dropout_probability,
use_batch_norm = batch_norm)
transform = transforms.PiecewiseRationalQuadraticCouplingTransform(
mask = mask,
transform_net_create_fn = transform_net,
num_bins = num_bins,
tails = 'linear',
tail_bound = tail_bound,
apply_unconditional_transform = apply_unconditional_transform
)
elif base_transform_type == 'rq-autoregressive':
transform = transforms.MaskedPiecewiseRationalQuadraticAutoregressiveTransform(
features=input_dim,
hidden_features=hidden_dim,
context_features=context_dim,
num_bins=num_bins,
tails='linear',
tail_bound=tail_bound,
num_blocks=num_transform_blocks,
use_residual_blocks=True,
random_mask=False,
activation=activation_fn,
dropout_probability=dropout_probability,
use_batch_norm=batch_norm
)
else:
raise ValueError
return transform
def create_transform(input_dim, context_dim, num_flow_steps, base_transform_kwargs):
"""Build a sequence of NSF transforms, which maps parameters x into the
base distribution u (noise). Transforms are conditioned on strain data y.
Note that the forward map is f^{-1}(x, y).
Each step in the sequence consists of
* A linear transform of x, which in particular permutes components
* A NSF transform of x, conditioned on y.
There is one final linear transform at the end.
This function was adapted from the uci.py example in
https://github.com/bayesiains/nsf
Arguments:
num_flow_steps {int} -- number of transforms in sequence
param_dim {int} -- dimensionality of x
context_dim {int} -- dimensionality of y
base_transform_kwargs {dict} -- hyperparameters for NSF step
Returns:
Transform -- the constructed transform
"""
transform = transforms.CompositeTransform([
transforms.CompositeTransform([
create_linear_transform(input_dim),
create_base_transform(i, input_dim, context_dim=context_dim, **base_transform_kwargs)
]) for i in range(num_flow_steps)] + [create_linear_transform(input_dim)])
return transform
def fun(input_dim):
return fun
def create_nsf_model(input_dim, context_dim, num_flow_steps, base_transform_kwargs, learn_normal=False):
"""Build NSF (neural spline flow) model. This uses the nsf module
available at https://github.com/bayesiains/nsf.
This models the posterior distribution p(x|y).
The model consists of
* a base distribution (StandardNormal, dim(x))
* a sequence of transforms, each conditioned on y
Arguments:
input_dim {int} -- dimensionality of x
context_dim {int} -- dimensionality of y
num_flow_steps {int} -- number of sequential transforms
base_transform_kwargs {dict} -- hyperparameters for transform steps
Returns:
Flow -- the model
"""
# Define a base distribution.
if (learn_normal):
base_distribution = distributions.DiagonalNormal(shape=(input_dim,))
else:
base_distribution = distributions.StandardNormal(shape=(input_dim,))
# if (sigma_base != 1):
# def fun2(x):
# n_batch, n = x.shape
# return torch.cat([torch.zeros((n_batch, input_dim), device=x.device), sigma_base * torch.ones((n_batch, input_dim), device=x.device)], dim=1)
# base_distribution = distributions.ConditionalDiagonalNormal(shape=(input_dim,), context_encoder=fun2)
# Define the neural spline transform
transform = create_transform(input_dim, context_dim, num_flow_steps, base_transform_kwargs)
# Create the flow
flow = flows.Flow(transform=transform, distribution=base_distribution)
# Add the hyperparameters for reconstructing the model after loading
flow.model_hyperparams = {
'input_dim': input_dim,
'num_flow_steps': num_flow_steps,
'context_dim': context_dim,
'base_transform_kwargs': base_transform_kwargs
}
return flow
def obtain_samples(flow, y, nsamples, device=None, batch_size=512):
"""Draw samples from the posterior.
Arguments:
flow {Flow} -- NSF model
y {array} -- strain data
nsamples {int} -- number of samples desired
Keyword Arguments:
device {torch.device} -- model device (CPU or GPU) (default: {None})
batch_size {int} -- batch size for sampling (default: {512})
Returns:
Tensor -- samples
"""
with torch.no_grad():
flow.eval()
y = torch.from_numpy(y).unsqueeze(0).to(device)
num_batches = nsamples // batch_size
num_leftover = nsamples % batch_size
samples = [flow.sample(batch_size, y) for _ in range(num_batches)]
if num_leftover > 0:
samples.append(flow.sample(num_leftover, y))
# The batching in the nsf package seems screwed up, so we had to do it
# ourselves, as above. They are concatenating on the wrong axis.
# samples = flow.sample(nsamples, context=y, batch_size=batch_size)
return torch.cat(samples, dim=1)[0]
if (__name__ == '__main__'):
base_transform_kwargs = {
'hidden_dim': 50,
'num_transform_blocks': 2,
'activation': 'relu',
'dropout_probability': 0.0,
'batch_norm': False,
'num_bins': 10,
'tail_bound': 3.0,
'apply_unconditional_transform': False
}
model = create_nsf_model(20, 1, 3, base_transform_kwargs)
# context = np.array([[2.]])
# context = torch.tensor(context.astype('float32'))
# samples = model.sample(5000, context).detach().cpu().numpy()
# pl.plot(samples[0,:,0], samples[0,:,1], '.')
# pl.show() | 42.791045 | 155 | 0.593129 | import numpy as np
import torch
import torch.nn.functional as F
from nflows import transforms, distributions, flows, utils
import nflows.nn.nets as nn_
import matplotlib.pyplot as pl
from modules import resnet
def create_linear_transform(input_dim):
permutation = transforms.RandomPermutation(features = input_dim)
linear = transforms.LULinear(input_dim, identity_init=True)
return transforms.CompositeTransform([permutation, linear])
def create_base_transform(i,
input_dim,
context_dim,
hidden_dim=512,
num_transform_blocks=2,
activation='relu',
dropout_probability=0.0,
batch_norm=False,
num_bins=8,
tail_bound=1.,
apply_unconditional_transform=False,
base_transform_type='rq-coupling',
transform_net='conv'):
if activation == 'elu':
activation_fn = F.elu
elif activation == 'relu':
activation_fn = F.relu
elif activation == 'leaky_relu':
activation_fn = F.leaky_relu
else:
activation_fn = F.relu
print('Invalid activation function specified. Using ReLU.')
if base_transform_type == 'rq-coupling':
mask = utils.create_alternating_binary_mask(input_dim, even=(i % 2 == 0))
if (transform_net == 'fc'):
transform_net = lambda in_features, out_features: nn_.ResidualNet(
in_features = in_features,
out_features = out_features,
hidden_features = hidden_dim,
context_features = context_dim,
num_blocks = num_transform_blocks,
activation = activation_fn,
dropout_probability = dropout_probability,
use_batch_norm = batch_norm)
if (transform_net == 'conv'):
transform_net = lambda in_features, out_features: resnet.ConvResidualNet1d(
in_channels = 1,
out_channels = out_features // in_features,
hidden_channels = hidden_dim,
context_channels = context_dim,
num_blocks = num_transform_blocks,
activation = activation_fn,
dropout_probability = dropout_probability,
use_batch_norm = batch_norm)
transform = transforms.PiecewiseRationalQuadraticCouplingTransform(
mask = mask,
transform_net_create_fn = transform_net,
num_bins = num_bins,
tails = 'linear',
tail_bound = tail_bound,
apply_unconditional_transform = apply_unconditional_transform
)
elif base_transform_type == 'rq-autoregressive':
transform = transforms.MaskedPiecewiseRationalQuadraticAutoregressiveTransform(
features=input_dim,
hidden_features=hidden_dim,
context_features=context_dim,
num_bins=num_bins,
tails='linear',
tail_bound=tail_bound,
num_blocks=num_transform_blocks,
use_residual_blocks=True,
random_mask=False,
activation=activation_fn,
dropout_probability=dropout_probability,
use_batch_norm=batch_norm
)
else:
raise ValueError
return transform
def create_transform(input_dim, context_dim, num_flow_steps, base_transform_kwargs):
transform = transforms.CompositeTransform([
transforms.CompositeTransform([
create_linear_transform(input_dim),
create_base_transform(i, input_dim, context_dim=context_dim, **base_transform_kwargs)
]) for i in range(num_flow_steps)] + [create_linear_transform(input_dim)])
return transform
def fun(input_dim):
return fun
def create_nsf_model(input_dim, context_dim, num_flow_steps, base_transform_kwargs, learn_normal=False):
if (learn_normal):
base_distribution = distributions.DiagonalNormal(shape=(input_dim,))
else:
base_distribution = distributions.StandardNormal(shape=(input_dim,))
transform = create_transform(input_dim, context_dim, num_flow_steps, base_transform_kwargs)
flow = flows.Flow(transform=transform, distribution=base_distribution)
flow.model_hyperparams = {
'input_dim': input_dim,
'num_flow_steps': num_flow_steps,
'context_dim': context_dim,
'base_transform_kwargs': base_transform_kwargs
}
return flow
def obtain_samples(flow, y, nsamples, device=None, batch_size=512):
with torch.no_grad():
flow.eval()
y = torch.from_numpy(y).unsqueeze(0).to(device)
num_batches = nsamples // batch_size
num_leftover = nsamples % batch_size
samples = [flow.sample(batch_size, y) for _ in range(num_batches)]
if num_leftover > 0:
samples.append(flow.sample(num_leftover, y))
return torch.cat(samples, dim=1)[0]
if (__name__ == '__main__'):
base_transform_kwargs = {
'hidden_dim': 50,
'num_transform_blocks': 2,
'activation': 'relu',
'dropout_probability': 0.0,
'batch_norm': False,
'num_bins': 10,
'tail_bound': 3.0,
'apply_unconditional_transform': False
}
model = create_nsf_model(20, 1, 3, base_transform_kwargs)
| true | true |
f7270cc5a74622d850496e16ffaf8362ce017691 | 3,489 | py | Python | WebServer/microservices/dispatcher/auth_token.py | AnneEjsing/TrafficDataAnonymisation | 6ee5b4a46d53a656299d6a53896175b78008228a | [
"MIT"
] | 1 | 2020-03-12T13:27:58.000Z | 2020-03-12T13:27:58.000Z | WebServer/microservices/dispatcher/auth_token.py | AnneEjsing/TrafficDataAnonymisation | 6ee5b4a46d53a656299d6a53896175b78008228a | [
"MIT"
] | 7 | 2020-04-02T12:47:45.000Z | 2022-03-02T07:35:49.000Z | WebServer/microservices/dispatcher/auth_token.py | AnneEjsing/Traffic-Data-Anonymisation-Web | 6ee5b4a46d53a656299d6a53896175b78008228a | [
"MIT"
] | null | null | null | import base64
import requests
import json
import hashlib
import hmac
from enum import IntEnum
from datetime import datetime, timedelta
import os
secretKey = os.getenv("SECRET_KEY")
def valid_token(token):
return ('.' in token) and len(token.split('.')) == 3
def verify_credentials(email, pwd):
data = {"email": email, "password": pwd}
resp = requests.request(method='get', url='http://profileservice:1338/login', headers={'content-type': 'text/json'}, json=data)
if (resp.status_code == 200):
json_data = resp.json()
return (True, json_data['user_id'], json_data['role'])
else:
return (False, "", "")
def is_not_expired(token):
if not valid_token(token):
return False
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
is_valid = verify_date(expiration)
if not is_valid:
return False
else:
return True
def authenticate(token):
if not valid_token(token):
return False
header, payload, signature = token.split('.')
new_signature = encode(create_signature(header, payload))
if (new_signature == signature):
return is_not_expired(token)
else:
return False
def verify_token(token, desired_rights):
is_success = authenticate(token)
if (is_success):
is_auth = is_authorized(token, desired_rights)
if (is_auth):
return True, 200
else:
return False, 403
else:
return False, 401
def is_authorized(token, desired_rights):
if not valid_token(token):
return False
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
return role == desired_rights
def get_user_id(token):
if not valid_token(token):
return None
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
return subject
def get_rights(token):
if not valid_token(token):
return None
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
return role
def verify_date(date):
return (datetime.utcnow() < datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f'))
def get_payload_info(payload):
text = base64.urlsafe_b64decode(payload + '=' * (4 - len(payload) % 4))
json_obj = json.loads(text)
return json_obj['jid'], json_obj['sub'], json_obj['rights'], json_obj['exp']
def create_token(user_id, rights):
header = encode(json.dumps({"alg": "HS512", "type": "JWT"}))
payload = encode(create_payload(user_id, rights))
signature = encode(create_signature(header, payload))
return '.'.join([header, payload, signature])
def encode(encoding_input):
"""This function converts a string to base64, and removes trailing ="""
if (isinstance(encoding_input, str)):
byte = str.encode(encoding_input)
else:
byte = encoding_input
b64 = base64.urlsafe_b64encode(byte)
res = b64.decode('utf-8')
return res.replace('=', '')
def create_payload(user_id, rights):
return json.dumps({'sub': user_id, 'rights': rights, 'exp': generate_token_exp_time()})
def create_signature(header, payload):
return hmac.new(str.encode(secretKey), str.encode(header + '.' + payload), hashlib.sha512).digest()
def generate_token_exp_time():
return (datetime.utcnow() + timedelta(hours=3)).isoformat()
| 28.137097 | 131 | 0.665807 | import base64
import requests
import json
import hashlib
import hmac
from enum import IntEnum
from datetime import datetime, timedelta
import os
secretKey = os.getenv("SECRET_KEY")
def valid_token(token):
return ('.' in token) and len(token.split('.')) == 3
def verify_credentials(email, pwd):
data = {"email": email, "password": pwd}
resp = requests.request(method='get', url='http://profileservice:1338/login', headers={'content-type': 'text/json'}, json=data)
if (resp.status_code == 200):
json_data = resp.json()
return (True, json_data['user_id'], json_data['role'])
else:
return (False, "", "")
def is_not_expired(token):
if not valid_token(token):
return False
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
is_valid = verify_date(expiration)
if not is_valid:
return False
else:
return True
def authenticate(token):
if not valid_token(token):
return False
header, payload, signature = token.split('.')
new_signature = encode(create_signature(header, payload))
if (new_signature == signature):
return is_not_expired(token)
else:
return False
def verify_token(token, desired_rights):
is_success = authenticate(token)
if (is_success):
is_auth = is_authorized(token, desired_rights)
if (is_auth):
return True, 200
else:
return False, 403
else:
return False, 401
def is_authorized(token, desired_rights):
if not valid_token(token):
return False
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
return role == desired_rights
def get_user_id(token):
if not valid_token(token):
return None
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
return subject
def get_rights(token):
if not valid_token(token):
return None
header, payload, signature = token.split('.')
id, subject, role, expiration = get_payload_info(payload)
return role
def verify_date(date):
return (datetime.utcnow() < datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f'))
def get_payload_info(payload):
text = base64.urlsafe_b64decode(payload + '=' * (4 - len(payload) % 4))
json_obj = json.loads(text)
return json_obj['jid'], json_obj['sub'], json_obj['rights'], json_obj['exp']
def create_token(user_id, rights):
header = encode(json.dumps({"alg": "HS512", "type": "JWT"}))
payload = encode(create_payload(user_id, rights))
signature = encode(create_signature(header, payload))
return '.'.join([header, payload, signature])
def encode(encoding_input):
if (isinstance(encoding_input, str)):
byte = str.encode(encoding_input)
else:
byte = encoding_input
b64 = base64.urlsafe_b64encode(byte)
res = b64.decode('utf-8')
return res.replace('=', '')
def create_payload(user_id, rights):
return json.dumps({'sub': user_id, 'rights': rights, 'exp': generate_token_exp_time()})
def create_signature(header, payload):
return hmac.new(str.encode(secretKey), str.encode(header + '.' + payload), hashlib.sha512).digest()
def generate_token_exp_time():
return (datetime.utcnow() + timedelta(hours=3)).isoformat()
| true | true |
f7270eb6f31c026c910661b3d770b077b26405bb | 990 | py | Python | scenedetect/main.py | zhaipro/MySceneDetect | fbbe085b05e916d52253ffddd91848c3e85b2fe9 | [
"MIT"
] | null | null | null | scenedetect/main.py | zhaipro/MySceneDetect | fbbe085b05e916d52253ffddd91848c3e85b2fe9 | [
"MIT"
] | null | null | null | scenedetect/main.py | zhaipro/MySceneDetect | fbbe085b05e916d52253ffddd91848c3e85b2fe9 | [
"MIT"
] | 2 | 2019-11-27T04:44:11.000Z | 2020-01-15T05:32:59.000Z | import sys
import time
import cv2
import numpy as np
def scenedetect(cap, threshold=30, min_scene_len=15):
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
downscale_factor = int(w / 200)
last_hsv = None
first = 0
curr = 0
while True:
ret, im = cap.read()
if not ret:
break
curr_hsv = im[::downscale_factor, ::downscale_factor]
curr_hsv = cv2.cvtColor(curr_hsv, cv2.COLOR_BGR2HSV)
curr_hsv = curr_hsv.astype('int32')
if last_hsv is not None:
delta_hsv = np.mean(np.abs(curr_hsv - last_hsv))
if delta_hsv >= threshold and curr - first >= min_scene_len:
yield first, curr, delta_hsv
first = curr
last_hsv = curr_hsv
curr += 1
yield first, curr, 0
fn = 'video.rmvb'
cap = cv2.VideoCapture(fn)
start = time.time()
for first, last, delta_hsv in scenedetect(cap):
print(first, last, delta_hsv)
print(time.time() - start)
cap.release()
| 24.146341 | 72 | 0.614141 | import sys
import time
import cv2
import numpy as np
def scenedetect(cap, threshold=30, min_scene_len=15):
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
downscale_factor = int(w / 200)
last_hsv = None
first = 0
curr = 0
while True:
ret, im = cap.read()
if not ret:
break
curr_hsv = im[::downscale_factor, ::downscale_factor]
curr_hsv = cv2.cvtColor(curr_hsv, cv2.COLOR_BGR2HSV)
curr_hsv = curr_hsv.astype('int32')
if last_hsv is not None:
delta_hsv = np.mean(np.abs(curr_hsv - last_hsv))
if delta_hsv >= threshold and curr - first >= min_scene_len:
yield first, curr, delta_hsv
first = curr
last_hsv = curr_hsv
curr += 1
yield first, curr, 0
fn = 'video.rmvb'
cap = cv2.VideoCapture(fn)
start = time.time()
for first, last, delta_hsv in scenedetect(cap):
print(first, last, delta_hsv)
print(time.time() - start)
cap.release()
| true | true |
f727103bf36fa5841b9d61da58cc4ea81dc4118e | 4,404 | py | Python | desertbot/modules/admin/Ignore.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-20T17:10:10.000Z | 2021-11-17T18:58:04.000Z | desertbot/modules/admin/Ignore.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 109 | 2015-08-20T13:16:35.000Z | 2022-01-21T19:40:35.000Z | desertbot/modules/admin/Ignore.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-29T05:55:01.000Z | 2021-02-05T19:19:39.000Z | """
Created on Feb 09, 2018
@author: StarlitGhost
"""
import re
from collections import OrderedDict
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand, admin
from desertbot.response import IRCResponse
@implementer(IPlugin, IModule)
class Ignore(BotCommand):
def triggers(self):
return ['ignore']
@admin("Only my admins may add new ignores!")
def _add(self, message):
"""add <nick/full hostmask> - adds the specified user to the ignored list.
You can list multiple users to add them all at once.
Nick alone will be converted to a glob hostmask, eg: *!user@host"""
if len(message.parameterList) < 2:
return IRCResponse("You didn't give me a user to ignore!", message.replyTo)
for ignore in message.parameterList[1:]:
if message.replyTo in self.bot.channels:
if ignore in self.bot.channels[message.replyTo].users:
user = self.bot.channels[message.replyTo].users[ignore]
ignore = '*!{}@{}'.format(user.nick, user.host)
ignores = self.bot.config.getWithDefault('ignored', [])
ignores.append(ignore)
self.bot.config['ignored'] = ignores
self.bot.config.writeConfig()
return IRCResponse("Now ignoring specified users!", message.replyTo)
@admin("Only my admins may remove ignores!")
def _del(self, message):
"""del <full hostmask> - removes the specified user from the ignored list.
You can list multiple users to remove them all at once."""
if len(message.parameterList) < 2:
return IRCResponse("You didn't give me a user to unignore!", message.replyTo)
deleted = []
skipped = []
ignores = self.bot.config.getWithDefault('ignored', [])
for unignore in message.parameterList[1:]:
if message.replyTo in self.bot.channels:
if unignore in self.bot.channels[message.replyTo].users:
user = self.bot.channels[message.replyTo].users[unignore]
unignore = '*!{}@{}'.format(user.nick, user.host)
if unignore not in ignores:
skipped.append(unignore)
continue
ignores.remove(unignore)
deleted.append(unignore)
self.bot.config['ignored'] = ignores
self.bot.config.writeConfig()
return IRCResponse("Removed '{}' from ignored list, {} skipped"
.format(', '.join(deleted), len(skipped)), message.replyTo)
def _list(self, message):
"""list - lists all ignored users"""
ignores = self.bot.config.getWithDefault('ignored', [])
return IRCResponse("Ignored Users: {}".format(', '.join(ignores)), message.replyTo)
subCommands = OrderedDict([
('add', _add),
('del', _del),
('list', _list)])
def help(self, query) -> str:
if len(query) > 1:
subCommand = query[1].lower()
if subCommand in self.subCommands:
return ('{1}ignore {0}'
.format(re.sub(r"\s+", " ", self.subCommands[subCommand].__doc__),
self.bot.commandChar))
else:
return self._unrecognizedSubcommand(subCommand)
else:
return self._helpText()
def _unrecognizedSubcommand(self, subCommand):
return ("unrecognized subcommand '{}', "
"available subcommands for ignore are: {}"
.format(subCommand, ', '.join(self.subCommands)))
def _helpText(self):
return ("{1}ignore ({0})"
" - manages ignored users."
" Use '{1}help ignore <subcommand> for subcommand help."
.format('/'.join(self.subCommands), self.bot.commandChar))
def execute(self, message):
if len(message.parameterList) > 0:
subCommand = message.parameterList[0].lower()
if subCommand not in self.subCommands:
return IRCResponse(self._unrecognizedSubcommand(subCommand), message.replyTo)
return self.subCommands[subCommand](self, message)
else:
return IRCResponse(self._helpText(), message.replyTo)
ignore = Ignore()
| 37.965517 | 93 | 0.603542 | import re
from collections import OrderedDict
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand, admin
from desertbot.response import IRCResponse
@implementer(IPlugin, IModule)
class Ignore(BotCommand):
def triggers(self):
return ['ignore']
@admin("Only my admins may add new ignores!")
def _add(self, message):
if len(message.parameterList) < 2:
return IRCResponse("You didn't give me a user to ignore!", message.replyTo)
for ignore in message.parameterList[1:]:
if message.replyTo in self.bot.channels:
if ignore in self.bot.channels[message.replyTo].users:
user = self.bot.channels[message.replyTo].users[ignore]
ignore = '*!{}@{}'.format(user.nick, user.host)
ignores = self.bot.config.getWithDefault('ignored', [])
ignores.append(ignore)
self.bot.config['ignored'] = ignores
self.bot.config.writeConfig()
return IRCResponse("Now ignoring specified users!", message.replyTo)
@admin("Only my admins may remove ignores!")
def _del(self, message):
if len(message.parameterList) < 2:
return IRCResponse("You didn't give me a user to unignore!", message.replyTo)
deleted = []
skipped = []
ignores = self.bot.config.getWithDefault('ignored', [])
for unignore in message.parameterList[1:]:
if message.replyTo in self.bot.channels:
if unignore in self.bot.channels[message.replyTo].users:
user = self.bot.channels[message.replyTo].users[unignore]
unignore = '*!{}@{}'.format(user.nick, user.host)
if unignore not in ignores:
skipped.append(unignore)
continue
ignores.remove(unignore)
deleted.append(unignore)
self.bot.config['ignored'] = ignores
self.bot.config.writeConfig()
return IRCResponse("Removed '{}' from ignored list, {} skipped"
.format(', '.join(deleted), len(skipped)), message.replyTo)
def _list(self, message):
ignores = self.bot.config.getWithDefault('ignored', [])
return IRCResponse("Ignored Users: {}".format(', '.join(ignores)), message.replyTo)
subCommands = OrderedDict([
('add', _add),
('del', _del),
('list', _list)])
def help(self, query) -> str:
if len(query) > 1:
subCommand = query[1].lower()
if subCommand in self.subCommands:
return ('{1}ignore {0}'
.format(re.sub(r"\s+", " ", self.subCommands[subCommand].__doc__),
self.bot.commandChar))
else:
return self._unrecognizedSubcommand(subCommand)
else:
return self._helpText()
def _unrecognizedSubcommand(self, subCommand):
return ("unrecognized subcommand '{}', "
"available subcommands for ignore are: {}"
.format(subCommand, ', '.join(self.subCommands)))
def _helpText(self):
return ("{1}ignore ({0})"
" - manages ignored users."
" Use '{1}help ignore <subcommand> for subcommand help."
.format('/'.join(self.subCommands), self.bot.commandChar))
def execute(self, message):
if len(message.parameterList) > 0:
subCommand = message.parameterList[0].lower()
if subCommand not in self.subCommands:
return IRCResponse(self._unrecognizedSubcommand(subCommand), message.replyTo)
return self.subCommands[subCommand](self, message)
else:
return IRCResponse(self._helpText(), message.replyTo)
ignore = Ignore()
| true | true |
f727105123cecc3f0975d6ac12017569a168ee54 | 3,444 | py | Python | tests/ut/python/parallel/test_dropout_do_mask.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | tests/ut/python/parallel/test_dropout_do_mask.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_dropout_do_mask.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class Net(Cell):
def __init__(self, mul_weight, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.mul2 = P.Mul().shard(strategy1)
self.dropout_do_mask = P.DropoutDoMask().shard(strategy2)
self.dropout_gen_mask = P.DropoutGenMask()
self.get_shape = P.Shape()
self.cast = P.Cast()
self.mul_weight = Parameter(mul_weight, "w1")
self.mul_weight2 = Parameter(mul_weight, "w2")
self.keep_prob = Tensor(0.9)
def construct(self, x, b):
out = self.mul(x, self.mul_weight)
shape = self.get_shape(out)
dtype = P.DType()(out)
keep_prob = self.cast(self.keep_prob, dtype)
mask = self.dropout_gen_mask(shape, keep_prob)
out = self.dropout_do_mask(out, mask, keep_prob)
out = self.mul2(out, self.mul_weight2)
return out
_x = Tensor(np.ones([128, 64]), dtype=ms.float32)
_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32)
_b = Tensor(np.ones([128, 64]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()
def test_dropout_do_mask_data_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((16, 1), (16, 1))
strategy2 = ((16, 1),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_dropout_do_mask_model_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((1, 16), (1, 16))
strategy2 = ((1, 16),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_dropout_do_mask_hybrid_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((4, 4),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_dropout_do_mask_auto_parallel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0)
net = Net(_w1)
compile_net(net)
def test_dropout_do_mask_repeat_calc():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((2, 4),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
| 35.142857 | 103 | 0.707027 |
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class Net(Cell):
def __init__(self, mul_weight, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.mul2 = P.Mul().shard(strategy1)
self.dropout_do_mask = P.DropoutDoMask().shard(strategy2)
self.dropout_gen_mask = P.DropoutGenMask()
self.get_shape = P.Shape()
self.cast = P.Cast()
self.mul_weight = Parameter(mul_weight, "w1")
self.mul_weight2 = Parameter(mul_weight, "w2")
self.keep_prob = Tensor(0.9)
def construct(self, x, b):
out = self.mul(x, self.mul_weight)
shape = self.get_shape(out)
dtype = P.DType()(out)
keep_prob = self.cast(self.keep_prob, dtype)
mask = self.dropout_gen_mask(shape, keep_prob)
out = self.dropout_do_mask(out, mask, keep_prob)
out = self.mul2(out, self.mul_weight2)
return out
_x = Tensor(np.ones([128, 64]), dtype=ms.float32)
_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32)
_b = Tensor(np.ones([128, 64]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()
def test_dropout_do_mask_data_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((16, 1), (16, 1))
strategy2 = ((16, 1),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_dropout_do_mask_model_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((1, 16), (1, 16))
strategy2 = ((1, 16),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_dropout_do_mask_hybrid_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((4, 4),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_dropout_do_mask_auto_parallel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0)
net = Net(_w1)
compile_net(net)
def test_dropout_do_mask_repeat_calc():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((2, 4),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
| true | true |
f72710d9f65e5ca4beff6e82aed8a822c535c132 | 5,172 | py | Python | tests/unit/test_charm.py | gabrielcocenza/prometheus-bind-exporter-operator | 8998f049f68e72a71b7d97949d9a0e1dc57d8113 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_charm.py | gabrielcocenza/prometheus-bind-exporter-operator | 8998f049f68e72a71b7d97949d9a0e1dc57d8113 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_charm.py | gabrielcocenza/prometheus-bind-exporter-operator | 8998f049f68e72a71b7d97949d9a0e1dc57d8113 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Unicorn
# See LICENSE file for licensing details.
#
# Learn more about testing at: https://juju.is/docs/sdk/testing
import unittest
from unittest import mock
import charm
from ops.model import Unit
from ops.testing import Harness
class TestCharm(unittest.TestCase):
def assert_active_unit(self, unit: Unit):
self.assertEqual(unit.status.name, "active")
self.assertEqual(unit.status.message, "Unit is ready")
class TestInitCharm(TestCharm):
def test_init(self):
"""Test initialization of charm."""
harness = Harness(charm.PrometheusBindExporterOperatorCharm)
harness.begin()
self.assert_active_unit(harness.charm.unit)
class TestCharmHooks(TestCharm):
def patch(self, obj, method):
"""Mock the method."""
_patch = mock.patch.object(obj, method)
mock_method = _patch.start()
self.addCleanup(_patch.stop)
return mock_method
def setUp(self):
self.harness = Harness(charm.PrometheusBindExporterOperatorCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
# mock subprocess
self.mock_subprocess = self.patch(charm, "subprocess")
# mock getting private address
mock_get_binding = self.patch(self.harness.model, "get_binding")
mock_get_binding.return_value = self.mock_binding = mock.MagicMock()
self.mock_binding.network.bind_address = "127.0.0.1"
# mock fetch resource
self.mock_fetch = self.patch(self.harness.model.resources, "fetch")
self.mock_fetch.return_value = "prometheus-bind-exporter.snap"
def _add_bind_exporter_relation(self):
"""Help function to add bind-exporter relation."""
relation_id = self.harness.add_relation("bind-exporter", "prometheus2")
self.harness.add_relation_unit(relation_id, "prometheus2/0")
return relation_id
def test_manage_prometheus_bind_exporter_service(self):
"""Test manage the prometheus-bind-exporter snap."""
self.harness.charm._manage_prometheus_bind_exporter_service()
self.mock_subprocess.check_call.assert_called_once_with(
["snap", "set", "prometheus-bind-exporter",
"web.listen-address=127.0.0.1:9119",
"web.stats-groups=server,view,tasks"])
def test_private_address(self):
"""Test help function to get private address."""
address = self.harness.charm.private_address
self.assertEqual("127.0.0.1", address)
def test_on_install(self):
"""Test install hook."""
exp_call = mock.call(["snap", "install", "--dangerous",
"prometheus-bind-exporter.snap"])
self.harness.charm.on.install.emit()
self.mock_fetch.assert_called_once_with("prometheus-bind-exporter")
self.assertIn(exp_call, self.mock_subprocess.check_call.mock_calls)
self.assert_active_unit(self.harness.charm.unit)
def test_on_config_changed(self):
"""Test config-changed hook."""
# this will trigger self.harness.charm.on.config_changed.emit()
self.harness.update_config({"exporter-listen-port": "9120",
"exporter-stats-groups": "server"})
self.assertEqual(self.harness.charm._stored.listen_port, "9120")
self.assertEqual(self.harness.charm._stored.stats_groups, "server")
self.mock_subprocess.check_call.assert_called_once_with(
["snap", "set", "prometheus-bind-exporter",
"web.listen-address=127.0.0.1:9120",
"web.stats-groups=server"])
self.assert_active_unit(self.harness.charm.unit)
def test_on_config_changed_with_bind_exporter_relation(self):
"""Test config-changed hook with existing bind-exporter relation."""
relation_id = self._add_bind_exporter_relation()
self.harness.update_config({"exporter-listen-port": "9120"})
relation_data = self.harness.get_relation_data(relation_id, self.harness.charm.unit.name)
self.assertDictEqual(relation_data, {"hostname": "127.0.0.1", "port": "9120"})
self.assert_active_unit(self.harness.charm.unit)
def test_on_bind_exporter_relation_changed(self):
"""Test Prometheus relation changed hook."""
relation_id = self._add_bind_exporter_relation()
# update relation -> trigger bind_exporter_relation_changed hook
self.harness.update_relation_data(relation_id, "prometheus2/0", {})
relation_data = self.harness.get_relation_data(relation_id, self.harness.charm.unit.name)
self.assertDictEqual(relation_data, {"hostname": "127.0.0.1", "port": "9119"})
self.assert_active_unit(self.harness.charm.unit)
def test_on_prometheus_relation_departed(self):
"""Test Prometheus relation changed hook."""
relation_id = self._add_bind_exporter_relation()
# remove relation -> trigger bind_exporter_departed hook
self.harness.remove_relation(relation_id)
self.assertEqual(0, len(self.harness.model.relations.get("bind-exporter")))
self.assert_active_unit(self.harness.charm.unit)
| 42.04878 | 97 | 0.687355 |
import unittest
from unittest import mock
import charm
from ops.model import Unit
from ops.testing import Harness
class TestCharm(unittest.TestCase):
def assert_active_unit(self, unit: Unit):
self.assertEqual(unit.status.name, "active")
self.assertEqual(unit.status.message, "Unit is ready")
class TestInitCharm(TestCharm):
def test_init(self):
harness = Harness(charm.PrometheusBindExporterOperatorCharm)
harness.begin()
self.assert_active_unit(harness.charm.unit)
class TestCharmHooks(TestCharm):
def patch(self, obj, method):
_patch = mock.patch.object(obj, method)
mock_method = _patch.start()
self.addCleanup(_patch.stop)
return mock_method
def setUp(self):
self.harness = Harness(charm.PrometheusBindExporterOperatorCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
self.mock_subprocess = self.patch(charm, "subprocess")
mock_get_binding = self.patch(self.harness.model, "get_binding")
mock_get_binding.return_value = self.mock_binding = mock.MagicMock()
self.mock_binding.network.bind_address = "127.0.0.1"
self.mock_fetch = self.patch(self.harness.model.resources, "fetch")
self.mock_fetch.return_value = "prometheus-bind-exporter.snap"
def _add_bind_exporter_relation(self):
relation_id = self.harness.add_relation("bind-exporter", "prometheus2")
self.harness.add_relation_unit(relation_id, "prometheus2/0")
return relation_id
def test_manage_prometheus_bind_exporter_service(self):
self.harness.charm._manage_prometheus_bind_exporter_service()
self.mock_subprocess.check_call.assert_called_once_with(
["snap", "set", "prometheus-bind-exporter",
"web.listen-address=127.0.0.1:9119",
"web.stats-groups=server,view,tasks"])
def test_private_address(self):
address = self.harness.charm.private_address
self.assertEqual("127.0.0.1", address)
def test_on_install(self):
exp_call = mock.call(["snap", "install", "--dangerous",
"prometheus-bind-exporter.snap"])
self.harness.charm.on.install.emit()
self.mock_fetch.assert_called_once_with("prometheus-bind-exporter")
self.assertIn(exp_call, self.mock_subprocess.check_call.mock_calls)
self.assert_active_unit(self.harness.charm.unit)
def test_on_config_changed(self):
self.harness.update_config({"exporter-listen-port": "9120",
"exporter-stats-groups": "server"})
self.assertEqual(self.harness.charm._stored.listen_port, "9120")
self.assertEqual(self.harness.charm._stored.stats_groups, "server")
self.mock_subprocess.check_call.assert_called_once_with(
["snap", "set", "prometheus-bind-exporter",
"web.listen-address=127.0.0.1:9120",
"web.stats-groups=server"])
self.assert_active_unit(self.harness.charm.unit)
def test_on_config_changed_with_bind_exporter_relation(self):
relation_id = self._add_bind_exporter_relation()
self.harness.update_config({"exporter-listen-port": "9120"})
relation_data = self.harness.get_relation_data(relation_id, self.harness.charm.unit.name)
self.assertDictEqual(relation_data, {"hostname": "127.0.0.1", "port": "9120"})
self.assert_active_unit(self.harness.charm.unit)
def test_on_bind_exporter_relation_changed(self):
relation_id = self._add_bind_exporter_relation()
self.harness.update_relation_data(relation_id, "prometheus2/0", {})
relation_data = self.harness.get_relation_data(relation_id, self.harness.charm.unit.name)
self.assertDictEqual(relation_data, {"hostname": "127.0.0.1", "port": "9119"})
self.assert_active_unit(self.harness.charm.unit)
def test_on_prometheus_relation_departed(self):
relation_id = self._add_bind_exporter_relation()
self.harness.remove_relation(relation_id)
self.assertEqual(0, len(self.harness.model.relations.get("bind-exporter")))
self.assert_active_unit(self.harness.charm.unit)
| true | true |
f727121629beee502e1de4f5eae42d70c7b1b0db | 12,344 | py | Python | tensorflow/python/keras/layers/preprocessing/categorical.py | lightyang/tensorflow | 1a455a77d80fa788fd7963530dd130ad7d902226 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/layers/preprocessing/categorical.py | lightyang/tensorflow | 1a455a77d80fa788fd7963530dd130ad7d902226 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:13:06.000Z | 2022-02-10T02:19:43.000Z | tensorflow/python/keras/layers/preprocessing/categorical.py | Hyperclaw79/tensorflow | 14c58e1d380b2001ffdf7ef782d44ad1a21f763c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras categorical preprocessing layers."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
class CategoryLookup(Layer):
"""Category lookup layer.
This layer looks up tokens (int or string) in a vocabulary table,
and return their indices (int). It converts a sequence of int or string to a
sequence of int.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. This is used when `adapt`
is called.
num_oov_tokens: Non-negative integer. The number of out-of-vocab tokens. All
out-of-vocab inputs will be assigned IDs in the range of [0,
num_oov_tokens) based on a hash.
vocabulary: The vocabulary to lookup the input. If it is a file, it
represents the source vocab file; If it is a list/tuple, it represents the
source vocab list. If it is None, the vocabulary can later be set.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: A string or int tensor of shape `[batch_size, d1, ..., dm]`
Output shape: An int tensor of shape `[batch_size, d1, .., dm]`
Example: Consider a batch of a single input sample, `[["a", "c", "d", "a",
"x"]]`. Let's say the vocabulary is `["a", "b", "c", "d"]` and a single OOV
token is used (`num_oov_tokens=1`). Then the corresponding output is `[[1,
3, 4, 1, 0]]`. 0 stands for an OOV token.
"""
def __init__(self,
max_tokens=None,
num_oov_tokens=1,
vocabulary=None,
name=None,
**kwargs):
if max_tokens is not None:
raise ValueError('`max_tokens` and `adapt` is not supported yet.')
if vocabulary is None:
raise ValueError('for now, you must pass a `vocabulary` argument')
self.max_tokens = max_tokens
self.num_oov_tokens = num_oov_tokens
self.vocabulary = vocabulary
super(CategoryLookup, self).__init__(name=name, **kwargs)
def __call__(self, inputs, *args, **kwargs):
if isinstance(inputs, (np.ndarray, float, int)):
inputs = ops.convert_to_tensor(inputs)
self._input_dtype = inputs.dtype
return super(CategoryLookup, self).__call__(inputs, *args, **kwargs)
def build(self, input_shape):
# categorical with vocabulary list.
if isinstance(self.vocabulary, (tuple, list, np.ndarray)):
self.table = lookup_ops.index_table_from_tensor(
vocabulary_list=self.vocabulary,
num_oov_buckets=self.num_oov_tokens,
dtype=self._input_dtype)
# categorical with vocabulary file.
elif self.vocabulary:
self.table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary,
num_oov_buckets=self.num_oov_tokens,
key_dtype=self._input_dtype)
def call(self, inputs):
return self.table.lookup(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_config(self):
config = {
'max_tokens': self.max_tokens,
'num_oov_tokens': self.num_oov_tokens,
'vocabulary': self.vocabulary
}
base_config = super(CategoryLookup, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CategoryCrossing(Layer):
"""Category crossing layer.
This layer transforms multiple categorical inputs to categorical outputs
by Cartesian product, and hash the output if necessary. Without hashing
(`num_bins=None`) the output dtype is string, with hashing the output dtype
is int64.
Arguments:
depth: depth of input crossing. By default None, all inputs are crossed into
one output. It can also be an int or tuple/list of ints. Passing an
integer will create combinations of crossed outputs with depth up to that
integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will
create crossed outputs with depth for the specified values in the tuple,
i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth
equal to N1 or N2. Passing `None` means a single crossed output with all
inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the
output will be [a;b;c;cross(a, b);cross(bc);cross(ca)].
num_bins: Number of hash bins. By default None, no hashing is performed.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: a list of string or int tensors or sparse tensors of shape
`[batch_size, d1, ..., dm]`
Output shape: a single string or int tensor or sparse tensor of shape
`[batch_size, d1, ..., dm]`
Example: (`depth`=None)
If the layer receives three inputs:
`a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]`
the output will be a string tensor if not hashed:
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
the output will be an int64 tensor if hashed:
`[[hash(b'1_X_2_X_3')], [hash(b'4_X_5_X_6')]]`
Example: (`depth` is an integer)
With the same input above, and if `depth`=2,
the output will be a list of 6 string tensors if not hashed:
`[[b'1'], [b'4']]`
`[[b'2'], [b'5']]`
`[[b'3'], [b'6']]`
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`
the output will be a list of 6 int64 tensors if hashed:
`[[hash(b'1')], [hash(b'4')]]`
`[[hash(b'2')], [hash(b'5')]]`
`[[hash(b'3')], [hash(b'6')]]`
`[[hash(b'1_X_2')], [hash(b'4_X_5')]]`,
`[[hash(b'2_X_3')], [hash(b'5_X_6')]]`,
`[[hash(b'3_X_1')], [hash(b'6_X_4')]]`
Example: (`depth` is a tuple/list of integers)
With the same input above, and if `depth`=(2, 3)
the output will be a list of 4 string tensors if not hashed:
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`,
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
the output will be a list of 4 int64 tensors if hashed:
`[[hash(b'1_X_2')], [hash(b'4_X_5')]]`,
`[[hash(b'2_X_3')], [hash(b'5_X_6')]]`,
`[[hash(b'3_X_1')], [hash(b'6_X_4')]]`,
`[[hash(b'1_X_2_X_3')], [hash(b'4_X_5_X_6')]]`
"""
def __init__(self, depth=None, num_bins=None, name=None, **kwargs):
# TODO(tanzheny): Add support for depth.
# TODO(tanzheny): Consider making seperator configurable.
if depth is not None:
raise NotImplementedError('`depth` is not supported yet.')
self.num_bins = num_bins
self.depth = depth
super(CategoryCrossing, self).__init__(name=name, **kwargs)
def call(self, inputs):
sparse_output = False
if any([isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs]):
sparse_output = True
if self.num_bins is not None:
output = sparse_ops.sparse_cross_hashed(
inputs, num_buckets=self.num_bins)
else:
output = sparse_ops.sparse_cross(inputs)
if not sparse_output:
output = sparse_ops.sparse_tensor_to_dense(output)
return output
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
raise ValueError('A `CategoryCrossing` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))
if batch_size is None:
batch_size = inp_tensor_shape[0]
# The second dimension is dynamic based on inputs.
output_shape = [batch_size, None]
return tensor_shape.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
output_dtype = dtypes.int64 if self.num_bins else dtypes.string
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
def get_config(self):
config = {'depth': self.depth, 'num_bins': self.num_bins}
base_config = super(CategoryCrossing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Hashing(Layer):
"""Implements categorical feature hashing, also known as "hashing trick".
This layer transforms categorical inputs to hashed output. It converts a
sequence of int or string to a sequence of int. The stable hash function uses
tensorflow::ops::Fingerprint to produce universal output that is consistent
across platforms.
Usage:
```python
layer = Hashing(num_bins=3)
inp = np.asarray([['A', 'B'], ['C', 'A']])
layer(inputs)
[[0, 0], [1, 0]]
```
Arguments:
num_bins: Number of hash bins.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: A string, int32 or int64 tensor of shape
`[batch_size, d1, ..., dm]`
Output shape: An int64 tensor of shape `[batch_size, d1, ..., dm]`
Example:
If the input is a 5 by 1 string tensor '[['A'], ['B'], ['C'], ['D'], ['E']]'
with `num_bins=2`, then output is 5 by 1 integer tensor
[[hash('A')], [hash('B')], [hash('C')], [hash('D')], [hash('E')]].
"""
def __init__(self, num_bins, name=None, **kwargs):
# TODO(tanzheny): consider adding strong hash variant.
self._num_bins = num_bins
super(Hashing, self).__init__(name=name, **kwargs)
def call(self, inputs):
# TODO(tanzheny): Add ragged support.
# TODO(tanzheny): Add int support.
if isinstance(inputs, sparse_tensor.SparseTensor):
sparse_values = inputs.values
sparse_hashed_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self._num_bins, name='lookup')
return sparse_tensor.SparseTensor(
indices=inputs.indices,
values=sparse_hashed_values,
dense_shape=inputs.dense_shape)
# string_to_hash_bucket_fast uses FarmHash as hash function.
return string_ops.string_to_hash_bucket_fast(
inputs, self._num_bins, name='lookup')
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_config(self):
config = {'num_bins': self._num_bins}
base_config = super(Hashing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 40.208469 | 80 | 0.679439 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
class CategoryLookup(Layer):
def __init__(self,
max_tokens=None,
num_oov_tokens=1,
vocabulary=None,
name=None,
**kwargs):
if max_tokens is not None:
raise ValueError('`max_tokens` and `adapt` is not supported yet.')
if vocabulary is None:
raise ValueError('for now, you must pass a `vocabulary` argument')
self.max_tokens = max_tokens
self.num_oov_tokens = num_oov_tokens
self.vocabulary = vocabulary
super(CategoryLookup, self).__init__(name=name, **kwargs)
def __call__(self, inputs, *args, **kwargs):
if isinstance(inputs, (np.ndarray, float, int)):
inputs = ops.convert_to_tensor(inputs)
self._input_dtype = inputs.dtype
return super(CategoryLookup, self).__call__(inputs, *args, **kwargs)
def build(self, input_shape):
if isinstance(self.vocabulary, (tuple, list, np.ndarray)):
self.table = lookup_ops.index_table_from_tensor(
vocabulary_list=self.vocabulary,
num_oov_buckets=self.num_oov_tokens,
dtype=self._input_dtype)
elif self.vocabulary:
self.table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary,
num_oov_buckets=self.num_oov_tokens,
key_dtype=self._input_dtype)
def call(self, inputs):
return self.table.lookup(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_config(self):
config = {
'max_tokens': self.max_tokens,
'num_oov_tokens': self.num_oov_tokens,
'vocabulary': self.vocabulary
}
base_config = super(CategoryLookup, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CategoryCrossing(Layer):
def __init__(self, depth=None, num_bins=None, name=None, **kwargs):
if depth is not None:
raise NotImplementedError('`depth` is not supported yet.')
self.num_bins = num_bins
self.depth = depth
super(CategoryCrossing, self).__init__(name=name, **kwargs)
def call(self, inputs):
sparse_output = False
if any([isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs]):
sparse_output = True
if self.num_bins is not None:
output = sparse_ops.sparse_cross_hashed(
inputs, num_buckets=self.num_bins)
else:
output = sparse_ops.sparse_cross(inputs)
if not sparse_output:
output = sparse_ops.sparse_tensor_to_dense(output)
return output
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
raise ValueError('A `CategoryCrossing` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))
if batch_size is None:
batch_size = inp_tensor_shape[0]
output_shape = [batch_size, None]
return tensor_shape.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
output_dtype = dtypes.int64 if self.num_bins else dtypes.string
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
def get_config(self):
config = {'depth': self.depth, 'num_bins': self.num_bins}
base_config = super(CategoryCrossing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Hashing(Layer):
def __init__(self, num_bins, name=None, **kwargs):
self._num_bins = num_bins
super(Hashing, self).__init__(name=name, **kwargs)
def call(self, inputs):
if isinstance(inputs, sparse_tensor.SparseTensor):
sparse_values = inputs.values
sparse_hashed_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self._num_bins, name='lookup')
return sparse_tensor.SparseTensor(
indices=inputs.indices,
values=sparse_hashed_values,
dense_shape=inputs.dense_shape)
return string_ops.string_to_hash_bucket_fast(
inputs, self._num_bins, name='lookup')
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_config(self):
config = {'num_bins': self._num_bins}
base_config = super(Hashing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true | true |
f727139b39073fe544e6ea44332b015dc4cb68d8 | 7,589 | py | Python | applications/views.py | AndyUGA/ugahacks5 | 6a7787b50d9e8ea9685c3e36c38da6bc699bca77 | [
"MIT"
] | null | null | null | applications/views.py | AndyUGA/ugahacks5 | 6a7787b50d9e8ea9685c3e36c38da6bc699bca77 | [
"MIT"
] | null | null | null | applications/views.py | AndyUGA/ugahacks5 | 6a7787b50d9e8ea9685c3e36c38da6bc699bca77 | [
"MIT"
] | null | null | null | # Create your views here.
from __future__ import print_function
import logging
from datetime import timedelta
from django import http
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ValidationError
from django.http import Http404, HttpResponseRedirect, JsonResponse, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views import View
from app import slack
from app.slack import SlackInvitationException
from app.utils import reverse, hacker_tabs
from app.views import TabsView
from applications import models, emails, forms
from user.mixins import IsHackerMixin, is_hacker
def check_application_exists(user, uuid):
try:
application = models.Application.objects.get(user=user)
except models.Application.DoesNotExist:
raise Http404
if not application or uuid != application.uuid_str:
raise Http404
class ConfirmApplication(IsHackerMixin, UserPassesTestMixin, View):
def test_func(self):
check_application_exists(self.request.user, self.kwargs.get('id', None))
return True
def get(self, request, *args, **kwargs):
application = models.Application.objects.get(user=request.user)
msg = None
if application.can_confirm():
msg = emails.create_confirmation_email(application, self.request)
try:
application.confirm()
except:
raise Http404
if msg:
msg.send()
try:
slack.send_slack_invite(request.user.email)
# Ignore if we can't send, it's only optional
except SlackInvitationException as e:
logging.error(e)
return http.HttpResponseRedirect(reverse('dashboard'))
class CancelApplication(IsHackerMixin, UserPassesTestMixin, TabsView):
template_name = 'cancel.html'
def test_func(self):
check_application_exists(self.request.user, self.kwargs.get('id', None))
return True
def get_back_url(self):
return reverse('dashboard')
def get_context_data(self, **kwargs):
context = super(CancelApplication, self).get_context_data(**kwargs)
application = models.Application.objects.get(user=self.request.user)
context.update({'application': application, })
if application.status == models.APP_CANCELLED:
context.update({'error': "Thank you for responding. We're sorry you won't be able to make it."
" Hope to see you next edition!"
})
elif application.status == models.APP_EXPIRED:
context.update({'error': "Unfortunately your invite has expired."})
elif not application.can_be_cancelled():
context.update({
'error': "You found a glitch! You can't cancel this invitation. Is this the question for 42?",
'application': None
})
return context
def post(self, request, *args, **kwargs):
application = models.Application.objects.get(user=self.request.user)
try:
application.cancel()
except ValidationError:
pass
return http.HttpResponseRedirect(reverse('dashboard'))
def get_deadline(application):
last_updated = application.status_update_date
if application.status == models.APP_INVITED:
deadline = last_updated + timedelta(days=5)
else:
deadline = last_updated + timedelta(days=1)
return deadline
class HackerDashboard(IsHackerMixin, TabsView):
template_name = 'dashboard.html'
def get_current_tabs(self):
return hacker_tabs(self.request.user)
def get_context_data(self, **kwargs):
context = super(HackerDashboard, self).get_context_data(**kwargs)
try:
draft = models.DraftApplication.objects.get(user=self.request.user)
form = forms.ApplicationForm(instance=models.Application(**draft.get_dict()))
except:
form = forms.ApplicationForm()
context.update({'form': form})
try:
application = models.Application.objects.get(user=self.request.user)
deadline = get_deadline(application)
context.update({'invite_timeleft': deadline - timezone.now()})
except:
# We ignore this as we are okay if the user has not created an application yet
pass
return context
def post(self, request, *args, **kwargs):
new_application = True
try:
form = forms.ApplicationForm(request.POST, request.FILES, instance=request.user.application)
new_application = False
except:
form = forms.ApplicationForm(request.POST, request.FILES)
if form.is_valid():
application = form.save(commit=False)
application.user = request.user
application.save()
if new_application:
messages.success(request,
'We have now received your application. '
'Processing your application will take some time, so please be patient.')
else:
messages.success(request, 'Application changes saved successfully!')
return HttpResponseRedirect(reverse('root'))
else:
c = self.get_context_data()
c.update({'form': form})
return render(request, self.template_name, c)
class HackerApplication(IsHackerMixin, TabsView):
template_name = 'application.html'
def get_current_tabs(self):
return hacker_tabs(self.request.user)
def get_context_data(self, **kwargs):
context = super(HackerApplication, self).get_context_data(**kwargs)
application = get_object_or_404(models.Application, user=self.request.user)
deadline = get_deadline(application)
context.update(
{'invite_timeleft': deadline - timezone.now(), 'form': forms.ApplicationForm(instance=application)})
return context
def post(self, request, *args, **kwargs):
try:
form = forms.ApplicationForm(request.POST, request.FILES, instance=request.user.application)
except:
form = forms.ApplicationForm(request.POST, request.FILES)
if form.is_valid():
application = form.save(commit=False)
application.user = request.user
application.save()
messages.success(request, 'Application changes saved successfully!')
return HttpResponseRedirect(reverse('dashboard'))
else:
c = self.get_context_data()
c.update({'form': form})
return render(request, self.template_name, c)
@is_hacker
def save_draft(request):
d = models.DraftApplication()
d.user = request.user
form_keys = set(dict(forms.ApplicationForm().fields).keys())
valid_keys = set([field.name for field in models.Application()._meta.get_fields()])
d.save_dict(dict((k, v) for k, v in request.POST.items() if k in valid_keys.intersection(form_keys) and v))
d.save()
return JsonResponse({'saved': True})
def export_resume(request):
try:
response = HttpResponse(open("./files/resumes/resume_export.tar.gz", 'rb').read())
response['Content-Type'] = 'text/plain'
response['Content-Disposition'] = 'attachment; filename=resume_export.tar.gz'
return response
except:
raise Http404
| 36.311005 | 112 | 0.65305 |
from __future__ import print_function
import logging
from datetime import timedelta
from django import http
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ValidationError
from django.http import Http404, HttpResponseRedirect, JsonResponse, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views import View
from app import slack
from app.slack import SlackInvitationException
from app.utils import reverse, hacker_tabs
from app.views import TabsView
from applications import models, emails, forms
from user.mixins import IsHackerMixin, is_hacker
def check_application_exists(user, uuid):
try:
application = models.Application.objects.get(user=user)
except models.Application.DoesNotExist:
raise Http404
if not application or uuid != application.uuid_str:
raise Http404
class ConfirmApplication(IsHackerMixin, UserPassesTestMixin, View):
def test_func(self):
check_application_exists(self.request.user, self.kwargs.get('id', None))
return True
def get(self, request, *args, **kwargs):
application = models.Application.objects.get(user=request.user)
msg = None
if application.can_confirm():
msg = emails.create_confirmation_email(application, self.request)
try:
application.confirm()
except:
raise Http404
if msg:
msg.send()
try:
slack.send_slack_invite(request.user.email)
except SlackInvitationException as e:
logging.error(e)
return http.HttpResponseRedirect(reverse('dashboard'))
class CancelApplication(IsHackerMixin, UserPassesTestMixin, TabsView):
template_name = 'cancel.html'
def test_func(self):
check_application_exists(self.request.user, self.kwargs.get('id', None))
return True
def get_back_url(self):
return reverse('dashboard')
def get_context_data(self, **kwargs):
context = super(CancelApplication, self).get_context_data(**kwargs)
application = models.Application.objects.get(user=self.request.user)
context.update({'application': application, })
if application.status == models.APP_CANCELLED:
context.update({'error': "Thank you for responding. We're sorry you won't be able to make it."
" Hope to see you next edition!"
})
elif application.status == models.APP_EXPIRED:
context.update({'error': "Unfortunately your invite has expired."})
elif not application.can_be_cancelled():
context.update({
'error': "You found a glitch! You can't cancel this invitation. Is this the question for 42?",
'application': None
})
return context
def post(self, request, *args, **kwargs):
application = models.Application.objects.get(user=self.request.user)
try:
application.cancel()
except ValidationError:
pass
return http.HttpResponseRedirect(reverse('dashboard'))
def get_deadline(application):
last_updated = application.status_update_date
if application.status == models.APP_INVITED:
deadline = last_updated + timedelta(days=5)
else:
deadline = last_updated + timedelta(days=1)
return deadline
class HackerDashboard(IsHackerMixin, TabsView):
template_name = 'dashboard.html'
def get_current_tabs(self):
return hacker_tabs(self.request.user)
def get_context_data(self, **kwargs):
context = super(HackerDashboard, self).get_context_data(**kwargs)
try:
draft = models.DraftApplication.objects.get(user=self.request.user)
form = forms.ApplicationForm(instance=models.Application(**draft.get_dict()))
except:
form = forms.ApplicationForm()
context.update({'form': form})
try:
application = models.Application.objects.get(user=self.request.user)
deadline = get_deadline(application)
context.update({'invite_timeleft': deadline - timezone.now()})
except:
# We ignore this as we are okay if the user has not created an application yet
pass
return context
def post(self, request, *args, **kwargs):
new_application = True
try:
form = forms.ApplicationForm(request.POST, request.FILES, instance=request.user.application)
new_application = False
except:
form = forms.ApplicationForm(request.POST, request.FILES)
if form.is_valid():
application = form.save(commit=False)
application.user = request.user
application.save()
if new_application:
messages.success(request,
'We have now received your application. '
'Processing your application will take some time, so please be patient.')
else:
messages.success(request, 'Application changes saved successfully!')
return HttpResponseRedirect(reverse('root'))
else:
c = self.get_context_data()
c.update({'form': form})
return render(request, self.template_name, c)
class HackerApplication(IsHackerMixin, TabsView):
template_name = 'application.html'
def get_current_tabs(self):
return hacker_tabs(self.request.user)
def get_context_data(self, **kwargs):
context = super(HackerApplication, self).get_context_data(**kwargs)
application = get_object_or_404(models.Application, user=self.request.user)
deadline = get_deadline(application)
context.update(
{'invite_timeleft': deadline - timezone.now(), 'form': forms.ApplicationForm(instance=application)})
return context
def post(self, request, *args, **kwargs):
try:
form = forms.ApplicationForm(request.POST, request.FILES, instance=request.user.application)
except:
form = forms.ApplicationForm(request.POST, request.FILES)
if form.is_valid():
application = form.save(commit=False)
application.user = request.user
application.save()
messages.success(request, 'Application changes saved successfully!')
return HttpResponseRedirect(reverse('dashboard'))
else:
c = self.get_context_data()
c.update({'form': form})
return render(request, self.template_name, c)
@is_hacker
def save_draft(request):
d = models.DraftApplication()
d.user = request.user
form_keys = set(dict(forms.ApplicationForm().fields).keys())
valid_keys = set([field.name for field in models.Application()._meta.get_fields()])
d.save_dict(dict((k, v) for k, v in request.POST.items() if k in valid_keys.intersection(form_keys) and v))
d.save()
return JsonResponse({'saved': True})
def export_resume(request):
try:
response = HttpResponse(open("./files/resumes/resume_export.tar.gz", 'rb').read())
response['Content-Type'] = 'text/plain'
response['Content-Disposition'] = 'attachment; filename=resume_export.tar.gz'
return response
except:
raise Http404
| true | true |
f72715df1abfb3b959b3006e717ef5f1bb7888f0 | 90 | py | Python | app/reserve/__init__.py | YaJunCui/bhbmjsfwzx | 1241b433663d5bcd170d61ab3e31423304f8a257 | [
"Apache-2.0"
] | null | null | null | app/reserve/__init__.py | YaJunCui/bhbmjsfwzx | 1241b433663d5bcd170d61ab3e31423304f8a257 | [
"Apache-2.0"
] | null | null | null | app/reserve/__init__.py | YaJunCui/bhbmjsfwzx | 1241b433663d5bcd170d61ab3e31423304f8a257 | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
reserve = Blueprint('reserve', __name__)
from . import views | 18 | 40 | 0.777778 | from flask import Blueprint
reserve = Blueprint('reserve', __name__)
from . import views | true | true |
f727170830757a9927a76f877b9aa62a8ac16456 | 4,123 | py | Python | scripts/appleseedMaya/menu.py | mororo250/appleseed-maya | 267d747d56b10fea716d014a6952e2a3de91b69c | [
"MIT"
] | 85 | 2016-03-02T13:52:08.000Z | 2022-01-07T22:45:30.000Z | scripts/appleseedMaya/menu.py | markreidvfx/appleseed-maya | d8dbf4b4134b34edc6c30b3f5e51f042de6abbf0 | [
"MIT"
] | 167 | 2016-01-29T17:45:44.000Z | 2021-09-17T04:47:17.000Z | scripts/appleseedMaya/menu.py | markreidvfx/appleseed-maya | d8dbf4b4134b34edc6c30b3f5e51f042de6abbf0 | [
"MIT"
] | 24 | 2016-01-29T17:37:06.000Z | 2022-01-07T15:55:24.000Z |
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2019 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Standard imports.
import os
# Maya imports.
import maya.cmds as mc
import maya.mel as mel
# appleseedMaya imports.
from logger import logger
from util import createLocator
def showAbout():
if mc.window('appleseedAboutDialog', query=True, exists=True):
mc.deleteUI('appleseedAboutDialog')
window = mc.window('appleseedAboutDialog', title='About appleseed-maya')
mc.columnLayout(rs=20, columnOffset=['both', 22], width=200)
# Add some empty space. Is there a better way to do this?
mc.text(label='')
mc.image(image='appleseed-logo-256.png')
mc.text(
label='Plugin version: ' + mc.pluginInfo("appleseedMaya", q=True, v=True),
font='boldLabelFont',
align='center')
mc.text(
label='Copyright (c) 2019 The appleseedhq Organization',
font='boldLabelFont',
align='center')
mc.text(
label='This software is released under the MIT license.',
font='boldLabelFont',
align='center')
# Add some empty space. Is there a better way to do this?
mc.text(label='')
mc.setParent('..')
mc.showWindow(window)
__g_appleseedMenu = None
def createSkyDomeLight():
(xform, shape) = createLocator('appleseedSkyDomeLight')
# Add the locator to the light set.
mc.connectAttr(
xform + '.instObjGroups',
'defaultLightSet.dagSetMembers',
nextAvailable=True)
def createPhysicalLight():
(xform, shape) = createLocator('appleseedPhysicalSkyLight')
# Add the locator to the light set.
mc.connectAttr(
xform + '.instObjGroups',
'defaultLightSet.dagSetMembers',
nextAvailable=True)
def createMenu():
logger.debug("creating appleseed menu.")
global __g_appleseedMenu
deleteMenu()
gMainWindow = mel.eval('$temp1=$gMainWindow')
__g_appleseedMenu = mc.menu(
'appleseedMenu', parent=gMainWindow, label='appleseed', tearOff=True)
mc.menuItem(
'appleseedLightMenu',
subMenu=True,
label='Lights',
to=True,
parent='appleseedMenu')
mc.menuItem(
label='Create Dome Light',
parent='appleseedLightMenu',
command='import appleseedMaya.menu\nappleseedMaya.menu.createSkyDomeLight()')
mc.menuItem(
label='Create Physical Sky',
parent='appleseedLightMenu',
command='import appleseedMaya.menu\nappleseedMaya.menu.createPhysicalLight()')
mc.menuItem(divider=True, parent='appleseedMenu')
mc.menuItem(
label='About',
parent='appleseedMenu',
command='import appleseedMaya.menu\nappleseedMaya.menu.showAbout()')
def deleteMenu():
global __g_appleseedMenu
try:
mc.deleteUI(__g_appleseedMenu)
logger.debug("deleted appleseed menu.")
except:
pass
| 29.876812 | 86 | 0.696338 |
import os
import maya.cmds as mc
import maya.mel as mel
from logger import logger
from util import createLocator
def showAbout():
if mc.window('appleseedAboutDialog', query=True, exists=True):
mc.deleteUI('appleseedAboutDialog')
window = mc.window('appleseedAboutDialog', title='About appleseed-maya')
mc.columnLayout(rs=20, columnOffset=['both', 22], width=200)
mc.text(label='')
mc.image(image='appleseed-logo-256.png')
mc.text(
label='Plugin version: ' + mc.pluginInfo("appleseedMaya", q=True, v=True),
font='boldLabelFont',
align='center')
mc.text(
label='Copyright (c) 2019 The appleseedhq Organization',
font='boldLabelFont',
align='center')
mc.text(
label='This software is released under the MIT license.',
font='boldLabelFont',
align='center')
mc.text(label='')
mc.setParent('..')
mc.showWindow(window)
__g_appleseedMenu = None
def createSkyDomeLight():
(xform, shape) = createLocator('appleseedSkyDomeLight')
mc.connectAttr(
xform + '.instObjGroups',
'defaultLightSet.dagSetMembers',
nextAvailable=True)
def createPhysicalLight():
(xform, shape) = createLocator('appleseedPhysicalSkyLight')
mc.connectAttr(
xform + '.instObjGroups',
'defaultLightSet.dagSetMembers',
nextAvailable=True)
def createMenu():
logger.debug("creating appleseed menu.")
global __g_appleseedMenu
deleteMenu()
gMainWindow = mel.eval('$temp1=$gMainWindow')
__g_appleseedMenu = mc.menu(
'appleseedMenu', parent=gMainWindow, label='appleseed', tearOff=True)
mc.menuItem(
'appleseedLightMenu',
subMenu=True,
label='Lights',
to=True,
parent='appleseedMenu')
mc.menuItem(
label='Create Dome Light',
parent='appleseedLightMenu',
command='import appleseedMaya.menu\nappleseedMaya.menu.createSkyDomeLight()')
mc.menuItem(
label='Create Physical Sky',
parent='appleseedLightMenu',
command='import appleseedMaya.menu\nappleseedMaya.menu.createPhysicalLight()')
mc.menuItem(divider=True, parent='appleseedMenu')
mc.menuItem(
label='About',
parent='appleseedMenu',
command='import appleseedMaya.menu\nappleseedMaya.menu.showAbout()')
def deleteMenu():
global __g_appleseedMenu
try:
mc.deleteUI(__g_appleseedMenu)
logger.debug("deleted appleseed menu.")
except:
pass
| true | true |
f727178eb81e72d2d877679f084f64e3e80cf022 | 2,302 | py | Python | test/functional/wallet_coinbase_category.py | picacoin/picacoin | a6b6c1053d796fac077d1c4ce63e09014002b364 | [
"MIT"
] | 1 | 2021-06-17T01:38:26.000Z | 2021-06-17T01:38:26.000Z | test/functional/wallet_coinbase_category.py | picacoin/picacoin | a6b6c1053d796fac077d1c4ce63e09014002b364 | [
"MIT"
] | null | null | null | test/functional/wallet_coinbase_category.py | picacoin/picacoin | a6b6c1053d796fac077d1c4ce63e09014002b364 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Picacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test coinbase transactions return the correct categories.
Tests listtransactions, listsinceblock, and gettransaction.
"""
from test_framework.test_framework import PicacoinTestFramework
from test_framework.util import (
assert_array_result
)
class CoinbaseCategoryTest(PicacoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_category(self, category, address, txid, skip):
assert_array_result(self.nodes[0].listtransactions(skip=skip),
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].listsinceblock()["transactions"],
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].gettransaction(txid)["details"],
{"address": address},
{"category": category})
def run_test(self):
# Generate one block to an address
address = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(1, address)
hash = self.nodes[0].getbestblockhash()
txid = self.nodes[0].getblock(hash)["tx"][0]
# Coinbase transaction is immature after 1 confirmation
self.assert_category("immature", address, txid, 0)
# Mine another 99 blocks on top
self.nodes[0].generate(99)
# Coinbase transaction is still immature after 100 confirmations
self.assert_category("immature", address, txid, 99)
# Mine one more block
self.nodes[0].generate(1)
# Coinbase transaction is now matured, so category is "generate"
self.assert_category("generate", address, txid, 100)
# Orphan block that paid to address
self.nodes[0].invalidateblock(hash)
# Coinbase transaction is now orphaned
self.assert_category("orphan", address, txid, 100)
if __name__ == '__main__':
CoinbaseCategoryTest().main()
| 38.366667 | 75 | 0.650738 |
from test_framework.test_framework import PicacoinTestFramework
from test_framework.util import (
assert_array_result
)
class CoinbaseCategoryTest(PicacoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_category(self, category, address, txid, skip):
assert_array_result(self.nodes[0].listtransactions(skip=skip),
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].listsinceblock()["transactions"],
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].gettransaction(txid)["details"],
{"address": address},
{"category": category})
def run_test(self):
address = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(1, address)
hash = self.nodes[0].getbestblockhash()
txid = self.nodes[0].getblock(hash)["tx"][0]
self.assert_category("immature", address, txid, 0)
self.nodes[0].generate(99)
self.assert_category("immature", address, txid, 99)
self.nodes[0].generate(1)
self.assert_category("generate", address, txid, 100)
self.nodes[0].invalidateblock(hash)
self.assert_category("orphan", address, txid, 100)
if __name__ == '__main__':
CoinbaseCategoryTest().main()
| true | true |
f727180f817153cce34f871f9fe22f9853129f9e | 717 | py | Python | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/re/re_negative_look_behind.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/re/re_negative_look_behind.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/re/re_negative_look_behind.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Negative look behind assertion.
"""
# end_pymotw_header
import re
address = re.compile(
"""
^
# An address: username@domain.tld
[\w\d.+-]+ # username
# Ignore noreply addresses
(?<!noreply)
@
([\w\d.]+\.)+ # domain name prefix
(com|org|edu) # limit the allowed top-level domains
$
""",
re.VERBOSE,
)
candidates = [u"first.last@example.com", u"noreply@example.com"]
for candidate in candidates:
print("Candidate:", candidate)
match = address.search(candidate)
if match:
print(" Match:", candidate[match.start() : match.end()])
else:
print(" No match")
| 18.868421 | 65 | 0.591353 |
import re
address = re.compile(
"""
^
# An address: username@domain.tld
[\w\d.+-]+ # username
# Ignore noreply addresses
(?<!noreply)
@
([\w\d.]+\.)+ # domain name prefix
(com|org|edu) # limit the allowed top-level domains
$
""",
re.VERBOSE,
)
candidates = [u"first.last@example.com", u"noreply@example.com"]
for candidate in candidates:
print("Candidate:", candidate)
match = address.search(candidate)
if match:
print(" Match:", candidate[match.start() : match.end()])
else:
print(" No match")
| true | true |
f727184e862d83fc9178a8cd67a0568c1ac7bed2 | 1,740 | py | Python | pandas/tests/categorical/test_algos.py | stillmatic/pandas | da067b2fe4cdc43eac5349e0648cfbbe4b96dbbd | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-01-13T09:40:44.000Z | 2021-01-13T09:40:52.000Z | pandas/tests/categorical/test_algos.py | stillmatic/pandas | da067b2fe4cdc43eac5349e0648cfbbe4b96dbbd | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/categorical/test_algos.py | stillmatic/pandas | da067b2fe4cdc43eac5349e0648cfbbe4b96dbbd | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
| 34.8 | 65 | 0.58046 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
| true | true |
f727187cf5688be60c2c2db7a635f08927f1a6e9 | 3,431 | py | Python | utils/unshrtn.py | rongpenl/twarc | 1294fc717d16787b631236cd43e9f2b3155d3d96 | [
"MIT"
] | null | null | null | utils/unshrtn.py | rongpenl/twarc | 1294fc717d16787b631236cd43e9f2b3155d3d96 | [
"MIT"
] | null | null | null | utils/unshrtn.py | rongpenl/twarc | 1294fc717d16787b631236cd43e9f2b3155d3d96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Unfortunately the "expanded_url" as supplied by Twitter aren't fully
expanded one hop past t.co.
unshrtn.py will attempt to completely unshorten URLs and add them as the
"unshortened_url" key to each url, and emit the tweet as JSON again on stdout.
This script starts 10 seaprate processes which talk to an instance of unshrtn
that is running:
http://github.com/edsu/unshrtn
"""
import re
import json
import time
import urllib.request, urllib.parse, urllib.error
import logging
import argparse
import fileinput
import multiprocessing
# number of urls to look up in parallel
POOL_SIZE = 10
unshrtn_url = "http://localhost:3000"
retries = 2
wait = 15
logging.basicConfig(filename="unshorten.log", level=logging.INFO)
def unshorten_url(url):
if url is None:
return None
# TODO: Worth providing some way for the user to specify specific hostnames they want to expand,
# instead of assuming that all hostnames need expanding?
if re.match(r"^https?://twitter.com/", url):
return url
u = "{}/?{}".format(
unshrtn_url, urllib.parse.urlencode({"url": url.encode("utf8")})
)
resp = None
for retry in range(0, retries):
try:
resp = json.loads(urllib.request.urlopen(u).read().decode("utf-8"))
break
except Exception as e:
logging.error(
"http error: %s when looking up %s. Try %s of %s",
e,
url,
retry,
retries,
)
time.sleep(wait)
for key in ["canonical", "long"]:
if key in resp:
return resp[key]
return None
def rewrite_line(line):
try:
tweet = json.loads(line)
except Exception as e:
# garbage in, garbage out
logging.error(e)
return line
for url_dict in tweet["entities"]["urls"]:
if "expanded_url" in url_dict:
url = url_dict["expanded_url"]
else:
url = url_dict["url"]
url_dict["unshortened_url"] = unshorten_url(url)
tweet["user"]["unshortened_url"] = unshorten_url(tweet["user"]["url"])
return json.dumps(tweet)
def main():
global unshrtn_url, retries, wait
parser = argparse.ArgumentParser()
parser.add_argument(
"--pool-size",
help="number of urls to look up in parallel",
default=POOL_SIZE,
type=int,
)
parser.add_argument(
"--unshrtn", help="url of the unshrtn service", default=unshrtn_url
)
parser.add_argument(
"--retries",
help="number of time to retry if error from unshrtn service",
default=retries,
type=int,
)
parser.add_argument(
"--wait",
help="number of seconds to wait between retries if error from unshrtn service",
default=wait,
type=int,
)
parser.add_argument(
"files",
metavar="FILE",
nargs="*",
help="files to read, if empty, stdin is used",
)
args = parser.parse_args()
unshrtn_url = args.unshrtn
retries = args.retries
wait = args.wait
pool = multiprocessing.Pool(args.pool_size)
for line in pool.imap_unordered(
rewrite_line,
fileinput.input(files=args.files if len(args.files) > 0 else ("-",)),
):
if line != "\n":
print(line)
if __name__ == "__main__":
main()
| 25.043796 | 100 | 0.609443 |
import re
import json
import time
import urllib.request, urllib.parse, urllib.error
import logging
import argparse
import fileinput
import multiprocessing
POOL_SIZE = 10
unshrtn_url = "http://localhost:3000"
retries = 2
wait = 15
logging.basicConfig(filename="unshorten.log", level=logging.INFO)
def unshorten_url(url):
if url is None:
return None
if re.match(r"^https?://twitter.com/", url):
return url
u = "{}/?{}".format(
unshrtn_url, urllib.parse.urlencode({"url": url.encode("utf8")})
)
resp = None
for retry in range(0, retries):
try:
resp = json.loads(urllib.request.urlopen(u).read().decode("utf-8"))
break
except Exception as e:
logging.error(
"http error: %s when looking up %s. Try %s of %s",
e,
url,
retry,
retries,
)
time.sleep(wait)
for key in ["canonical", "long"]:
if key in resp:
return resp[key]
return None
def rewrite_line(line):
try:
tweet = json.loads(line)
except Exception as e:
logging.error(e)
return line
for url_dict in tweet["entities"]["urls"]:
if "expanded_url" in url_dict:
url = url_dict["expanded_url"]
else:
url = url_dict["url"]
url_dict["unshortened_url"] = unshorten_url(url)
tweet["user"]["unshortened_url"] = unshorten_url(tweet["user"]["url"])
return json.dumps(tweet)
def main():
global unshrtn_url, retries, wait
parser = argparse.ArgumentParser()
parser.add_argument(
"--pool-size",
help="number of urls to look up in parallel",
default=POOL_SIZE,
type=int,
)
parser.add_argument(
"--unshrtn", help="url of the unshrtn service", default=unshrtn_url
)
parser.add_argument(
"--retries",
help="number of time to retry if error from unshrtn service",
default=retries,
type=int,
)
parser.add_argument(
"--wait",
help="number of seconds to wait between retries if error from unshrtn service",
default=wait,
type=int,
)
parser.add_argument(
"files",
metavar="FILE",
nargs="*",
help="files to read, if empty, stdin is used",
)
args = parser.parse_args()
unshrtn_url = args.unshrtn
retries = args.retries
wait = args.wait
pool = multiprocessing.Pool(args.pool_size)
for line in pool.imap_unordered(
rewrite_line,
fileinput.input(files=args.files if len(args.files) > 0 else ("-",)),
):
if line != "\n":
print(line)
if __name__ == "__main__":
main()
| true | true |
f727189e07ca7e93ec6cf131f33eb666eb02749e | 7,355 | py | Python | python/dl.py | mkuznets/ytbackup | 834cf65432860bc3fbd92d7d79f2449464ee3ed0 | [
"MIT"
] | null | null | null | python/dl.py | mkuznets/ytbackup | 834cf65432860bc3fbd92d7d79f2449464ee3ed0 | [
"MIT"
] | null | null | null | python/dl.py | mkuznets/ytbackup | 834cf65432860bc3fbd92d7d79f2449464ee3ed0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import contextlib
import copy
import glob
import hashlib
import http.client
import json
import logging
import os
import shutil
import stat
import sys
import typing
import urllib.error
from unittest import mock
SYSTEM_EXCS = (urllib.error.URLError, http.client.HTTPException, OSError)
STDERR = sys.stderr
YDL_OPTIONS = {
"buffersize": 16 * 1024,
"retries": 5,
"fragment_retries": 5,
"quiet": True,
"noprogress": True,
"youtube_include_dash_manifest": True,
"no_color": True,
"call_home": False,
"ignoreerrors": False,
"geo_bypass": True,
"verbose": False,
"prefer_ffmpeg": True,
"noplaylist": True,
"write_all_thumbnails": True,
"allsubtitles": True,
"writesubtitles": True,
"writeinfojson": True,
"format": "bestvideo+bestaudio/best",
"merge_output_format": "mkv",
}
# ------------------------------------------------------------------------------
class Error(Exception):
def __init__(self, *args, reason=None, **kwargs):
self.reason = reason or "unknown"
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
def json_dump(data, f: typing.TextIO):
json.dump(
data, f, indent=2, skipkeys=True, ensure_ascii=False, default=lambda x: None,
)
f.write("\n")
@contextlib.contextmanager
def suppress_output():
with open(os.devnull, "w") as f:
with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
yield
def get_logger(filename: typing.Optional[str] = None) -> logging.Logger:
logger = logging.getLogger("log")
logger.setLevel(logging.DEBUG)
if not logger.handlers:
stream = STDERR
if filename:
stream = open(filename, "a")
handler = logging.StreamHandler(stream)
fmt = logging.Formatter("%(asctime)s\t%(levelname)s\t%(message)s")
handler.setFormatter(fmt)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def create_progress_hook(logger):
def log_hook(data):
size_done = data.get("downloaded_bytes", None)
size_total = data.get("total_bytes", None)
report = {
"finished": data.get("status") == "finished",
"done": "unk",
}
if size_done is not None and size_total is not None:
report["downloaded"] = size_done
report["total"] = size_total
report["done"] = "%.2f%%" % (size_done * 100 / size_total)
logger.info("__progress__ %s", json.dumps(report))
return log_hook
# noinspection PyUnresolvedReferences
def sha256sum(filename: str, logger: logging.Logger) -> str:
h = hashlib.sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
total = 0
with open(filename, "rb", buffering=0) as f:
for i, n in enumerate(iter(lambda: f.readinto(mv), 0)):
total += n
if not (i % 160):
logger.info("sha256: %d", total)
h.update(mv[:n])
return h.hexdigest()
# ------------------------------------------------------------------------------
class Download:
def __init__(self, args: argparse.Namespace):
self.url = args.url
self.logger = get_logger(args.log)
# ----------------------------------------------------------------------
self.dest_dir = os.path.abspath(os.path.expanduser(args.dst))
os.makedirs(os.path.dirname(self.dest_dir), exist_ok=True)
self.root = os.path.abspath(os.path.expanduser(args.root))
self.output_dir = tmp_dir = os.path.join(self.root, ".tmp")
os.makedirs(self.output_dir, exist_ok=True)
# Cache for youtube-dl
cache_dir = args.cache or os.path.join(tmp_dir, "ydl_cache")
os.makedirs(cache_dir, exist_ok=True)
# ----------------------------------------------------------------------
custom_opts = json.loads(os.environ.get("YDL_OPTS", "{}"))
assert isinstance(custom_opts, dict)
opts = copy.copy(YDL_OPTIONS)
opts.update(
logger=self.logger,
outtmpl=os.path.join(self.output_dir, "%(id)s/%(id)s.%(ext)s"),
progress_hooks=[create_progress_hook(self.logger)],
cachedir=cache_dir,
)
if args.log:
ffmpeg_log = str(args.log).replace(".log", "-ffmpeg.log")
opts["postprocessor_args"] = ["-progress", "file:{}".format(ffmpeg_log)]
if custom_opts:
self.logger.info("Custom youtube-dl options: %s", custom_opts)
opts.update(custom_opts)
self.opts = opts
def execute(self) -> typing.Any:
import youtube_dl
ydl = youtube_dl.YoutubeDL(self.opts)
process_info = ydl.process_info
infos = {}
def process_hook(data):
if not data.get("id"):
return
infos[data["id"]] = data
return process_info(data)
try:
with mock.patch.object(ydl, "process_info", process_hook):
ydl.download([self.url])
except youtube_dl.DownloadError as exc:
if exc.exc_info[0] in SYSTEM_EXCS:
raise Error(str(exc), reason="system") from exc
raise
if not infos:
raise Error("result is empty")
result = []
for info in infos.values():
result_dir = os.path.join(self.output_dir, info["id"])
if not os.path.exists(result_dir):
raise Error("result directory is not found: %s".format(info["id"]))
shutil.rmtree(self.dest_dir, ignore_errors=True)
shutil.move(result_dir, self.dest_dir)
files = []
for path in glob.glob(os.path.join(self.dest_dir, "**"), recursive=True):
self.logger.info("output file: %s", path)
try:
fi = os.stat(path)
except OSError as exc:
raise Error("could not stat output file") from exc
if stat.S_ISREG(fi.st_mode):
files.append(
{
"path": os.path.relpath(path, self.root),
"hash": sha256sum(path, self.logger),
"size": fi.st_size,
}
)
result.append({"id": info["id"], "files": files})
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log")
parser.add_argument("--root", required=True)
parser.add_argument("--dst", required=True)
parser.add_argument("--cache")
parser.add_argument("url")
args = parser.parse_args()
logger = get_logger(args.log)
try:
with suppress_output():
result = Download(args).execute()
json_dump(result, sys.stdout)
except Exception as exc:
if isinstance(exc, Error):
msg = str(exc)
reason = exc.reason
else:
logger.exception("unknown error")
msg = "{}: {}".format(exc.__class__.__name__, str(exc))
reason = "unknown"
json_dump({"error": msg, "reason": reason}, sys.stderr)
sys.exit(0xE7)
if __name__ == "__main__":
main()
| 28.397683 | 85 | 0.556628 |
import argparse
import contextlib
import copy
import glob
import hashlib
import http.client
import json
import logging
import os
import shutil
import stat
import sys
import typing
import urllib.error
from unittest import mock
SYSTEM_EXCS = (urllib.error.URLError, http.client.HTTPException, OSError)
STDERR = sys.stderr
YDL_OPTIONS = {
"buffersize": 16 * 1024,
"retries": 5,
"fragment_retries": 5,
"quiet": True,
"noprogress": True,
"youtube_include_dash_manifest": True,
"no_color": True,
"call_home": False,
"ignoreerrors": False,
"geo_bypass": True,
"verbose": False,
"prefer_ffmpeg": True,
"noplaylist": True,
"write_all_thumbnails": True,
"allsubtitles": True,
"writesubtitles": True,
"writeinfojson": True,
"format": "bestvideo+bestaudio/best",
"merge_output_format": "mkv",
}
class Error(Exception):
def __init__(self, *args, reason=None, **kwargs):
self.reason = reason or "unknown"
super().__init__(*args, **kwargs)
def json_dump(data, f: typing.TextIO):
json.dump(
data, f, indent=2, skipkeys=True, ensure_ascii=False, default=lambda x: None,
)
f.write("\n")
@contextlib.contextmanager
def suppress_output():
with open(os.devnull, "w") as f:
with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
yield
def get_logger(filename: typing.Optional[str] = None) -> logging.Logger:
logger = logging.getLogger("log")
logger.setLevel(logging.DEBUG)
if not logger.handlers:
stream = STDERR
if filename:
stream = open(filename, "a")
handler = logging.StreamHandler(stream)
fmt = logging.Formatter("%(asctime)s\t%(levelname)s\t%(message)s")
handler.setFormatter(fmt)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def create_progress_hook(logger):
def log_hook(data):
size_done = data.get("downloaded_bytes", None)
size_total = data.get("total_bytes", None)
report = {
"finished": data.get("status") == "finished",
"done": "unk",
}
if size_done is not None and size_total is not None:
report["downloaded"] = size_done
report["total"] = size_total
report["done"] = "%.2f%%" % (size_done * 100 / size_total)
logger.info("__progress__ %s", json.dumps(report))
return log_hook
def sha256sum(filename: str, logger: logging.Logger) -> str:
h = hashlib.sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
total = 0
with open(filename, "rb", buffering=0) as f:
for i, n in enumerate(iter(lambda: f.readinto(mv), 0)):
total += n
if not (i % 160):
logger.info("sha256: %d", total)
h.update(mv[:n])
return h.hexdigest()
class Download:
def __init__(self, args: argparse.Namespace):
self.url = args.url
self.logger = get_logger(args.log)
self.dest_dir = os.path.abspath(os.path.expanduser(args.dst))
os.makedirs(os.path.dirname(self.dest_dir), exist_ok=True)
self.root = os.path.abspath(os.path.expanduser(args.root))
self.output_dir = tmp_dir = os.path.join(self.root, ".tmp")
os.makedirs(self.output_dir, exist_ok=True)
cache_dir = args.cache or os.path.join(tmp_dir, "ydl_cache")
os.makedirs(cache_dir, exist_ok=True)
custom_opts = json.loads(os.environ.get("YDL_OPTS", "{}"))
assert isinstance(custom_opts, dict)
opts = copy.copy(YDL_OPTIONS)
opts.update(
logger=self.logger,
outtmpl=os.path.join(self.output_dir, "%(id)s/%(id)s.%(ext)s"),
progress_hooks=[create_progress_hook(self.logger)],
cachedir=cache_dir,
)
if args.log:
ffmpeg_log = str(args.log).replace(".log", "-ffmpeg.log")
opts["postprocessor_args"] = ["-progress", "file:{}".format(ffmpeg_log)]
if custom_opts:
self.logger.info("Custom youtube-dl options: %s", custom_opts)
opts.update(custom_opts)
self.opts = opts
def execute(self) -> typing.Any:
import youtube_dl
ydl = youtube_dl.YoutubeDL(self.opts)
process_info = ydl.process_info
infos = {}
def process_hook(data):
if not data.get("id"):
return
infos[data["id"]] = data
return process_info(data)
try:
with mock.patch.object(ydl, "process_info", process_hook):
ydl.download([self.url])
except youtube_dl.DownloadError as exc:
if exc.exc_info[0] in SYSTEM_EXCS:
raise Error(str(exc), reason="system") from exc
raise
if not infos:
raise Error("result is empty")
result = []
for info in infos.values():
result_dir = os.path.join(self.output_dir, info["id"])
if not os.path.exists(result_dir):
raise Error("result directory is not found: %s".format(info["id"]))
shutil.rmtree(self.dest_dir, ignore_errors=True)
shutil.move(result_dir, self.dest_dir)
files = []
for path in glob.glob(os.path.join(self.dest_dir, "**"), recursive=True):
self.logger.info("output file: %s", path)
try:
fi = os.stat(path)
except OSError as exc:
raise Error("could not stat output file") from exc
if stat.S_ISREG(fi.st_mode):
files.append(
{
"path": os.path.relpath(path, self.root),
"hash": sha256sum(path, self.logger),
"size": fi.st_size,
}
)
result.append({"id": info["id"], "files": files})
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log")
parser.add_argument("--root", required=True)
parser.add_argument("--dst", required=True)
parser.add_argument("--cache")
parser.add_argument("url")
args = parser.parse_args()
logger = get_logger(args.log)
try:
with suppress_output():
result = Download(args).execute()
json_dump(result, sys.stdout)
except Exception as exc:
if isinstance(exc, Error):
msg = str(exc)
reason = exc.reason
else:
logger.exception("unknown error")
msg = "{}: {}".format(exc.__class__.__name__, str(exc))
reason = "unknown"
json_dump({"error": msg, "reason": reason}, sys.stderr)
sys.exit(0xE7)
if __name__ == "__main__":
main()
| true | true |
f727190f87503bd55a12dd3ee0e9882c00f0b9d2 | 4,189 | py | Python | Prototype.py | supersamdam/ConversationalAI | bb6013c33f6332aee57abbae310577c056c6fdc1 | [
"MIT"
] | 1 | 2021-02-17T16:38:56.000Z | 2021-02-17T16:38:56.000Z | Prototype.py | samaydumasia/ConversationalAI | bb6013c33f6332aee57abbae310577c056c6fdc1 | [
"MIT"
] | null | null | null | Prototype.py | samaydumasia/ConversationalAI | bb6013c33f6332aee57abbae310577c056c6fdc1 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, accuracy_score
import pickle
import joblib
# Class starts from here
class CONVAI:
#this is the empty vocabulary (vectorizer)
cv = CountVectorizer(max_features = 20000) #change in no of features will result in how many different/unique words it will have
classifier = GaussianNB() #this is the main algorith which works on probablistic approach
no = 1000 #change this to change the number of data in terms of line you want to fed in model
def init(self): #basic function
dataset = pd.read_csv('data.csv') #dataset loaded
no=self.no
corpus = [] #corpus will have cleaned data
for i in range(0, no):
review = re.sub('[^a-zA-Z]', ' ', dataset['0'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
review = [ps.stem(word) for word in review if not word in set(all_stopwords)]
review = ' '.join(review)
corpus.append(review)
print(corpus)
X = self.cv.fit_transform(corpus).toarray() #divided dataset into 2 parts this will be like questions
y = dataset.iloc[0:no, 2].values #this will be like answer to the abouve question
# print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0) #splitted dataset into train and test
sav = self.classifier.fit(X_train, y_train)
y_pred = self.classifier.predict(X_test) #all the action is done here
print(np.concatenate((y_pred.reshape(len(y_pred),1,), y_test.reshape(len(y_test),1)),1),) #printing the current actions
cm = confusion_matrix(y_test, y_pred)
print(cm)
a = accuracy_score(y_test, y_pred)
print(a)
joblib.dump(self.cv, "vectorizer1.pkl") #vocabulary is saved here
joblib.dump(self.classifier, "classifier1.pkl") #algorithm is saved here
# with open('model.pkl', 'wb') as fout:
# pickle.dump((cv, classifier), fout)
# filename = 'finalized_model.sav'
# pickle.dump(cv, open(filename, 'wb'))
# filename = 'finalized.sav'
# pickle.dump(cv, open(filename, 'wb'))
# saved_model = pickle.dumps(classifier)
def Test(self,query): #this is the function for implementation of new inputs
vectorizer = joblib.load("vectorizer.pkl") #vocabulary is loaded
classifier = joblib.load("classifier.pkl") #algoritm is loaded
# with open('model.pkl', 'rb') as fin:
# cv, classifier = pickle.load(fin)
#This is known as preprocessing the data
cv = self.cv
classifier = self.classifier
#query = input()
new_review = query
new_review = re.sub('[^a-zA-Z]', ' ', new_review)
new_review = new_review.lower()
new_review = new_review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
new_review = [ps.stem(word) for word in new_review if not word in set(all_stopwords)]
new_review = ' '.join(new_review)
new_corpus = [new_review]
new_X_test = cv.transform(new_corpus).toarray()
new_y_pred = classifier.predict(new_X_test)
print(new_y_pred) #output from the algorithm is printed
return new_y_pred #output from the algorithm is returned
if __name__ == "__main__": #main class
a=CONVAI() #created instance(object) of the class CONVAI
a.init() #called the function which will start training
a.Test("hello") #enter different type of input here to get new output results
| 39.149533 | 139 | 0.644307 | import numpy as np
import pandas as pd
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, accuracy_score
import pickle
import joblib
class CONVAI:
cv = CountVectorizer(max_features = 20000)
classifier = GaussianNB()
no = 1000
def init(self):
dataset = pd.read_csv('data.csv')
no=self.no
corpus = []
for i in range(0, no):
review = re.sub('[^a-zA-Z]', ' ', dataset['0'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
review = [ps.stem(word) for word in review if not word in set(all_stopwords)]
review = ' '.join(review)
corpus.append(review)
print(corpus)
X = self.cv.fit_transform(corpus).toarray()
y = dataset.iloc[0:no, 2].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
sav = self.classifier.fit(X_train, y_train)
y_pred = self.classifier.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1,), y_test.reshape(len(y_test),1)),1),)
cm = confusion_matrix(y_test, y_pred)
print(cm)
a = accuracy_score(y_test, y_pred)
print(a)
joblib.dump(self.cv, "vectorizer1.pkl")
joblib.dump(self.classifier, "classifier1.pkl")
def Test(self,query):
vectorizer = joblib.load("vectorizer.pkl")
classifier = joblib.load("classifier.pkl")
cv = self.cv
classifier = self.classifier
new_review = query
new_review = re.sub('[^a-zA-Z]', ' ', new_review)
new_review = new_review.lower()
new_review = new_review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
new_review = [ps.stem(word) for word in new_review if not word in set(all_stopwords)]
new_review = ' '.join(new_review)
new_corpus = [new_review]
new_X_test = cv.transform(new_corpus).toarray()
new_y_pred = classifier.predict(new_X_test)
print(new_y_pred)
return new_y_pred
if __name__ == "__main__":
a=CONVAI()
a.init()
a.Test("hello")
| true | true |
f7271956500274e8b25a12c00f4764c1033c5146 | 4,346 | py | Python | modisco/value_provider.py | XiaotingChen/tfmodisco | 17cbafe806942304a02e8134fe10224bdff38b0c | [
"MIT"
] | null | null | null | modisco/value_provider.py | XiaotingChen/tfmodisco | 17cbafe806942304a02e8134fe10224bdff38b0c | [
"MIT"
] | null | null | null | modisco/value_provider.py | XiaotingChen/tfmodisco | 17cbafe806942304a02e8134fe10224bdff38b0c | [
"MIT"
] | null | null | null | from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.stats
class AbstractValueProvider(object):
def __call__(self, seqlet):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class CoorScoreValueProvider(AbstractValueProvider):
def __call__(self, seqlet):
return seqlet.coor.score
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
@classmethod
def from_hdf5(cls, grp):
return cls()
class TransformCentralWindowValueProvider(AbstractValueProvider):
def __init__(self, track_name, central_window, val_transformer):
if isinstance(track_name, str):
self.track_name = track_name
else:
self.track_name = track_name.decode('utf-8')
self.central_window = central_window
self.val_transformer = val_transformer
def __call__(self, seqlet):
val = self.get_val(seqlet=seqlet)
return self.val_transformer(val=val)
def get_val(self, seqlet):
flank_to_ignore = int(0.5*(len(seqlet)-self.central_window))
track_values = seqlet[self.track_name]\
.fwd[flank_to_ignore:(len(seqlet)-flank_to_ignore)]
return np.sum(track_values)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["track_name"] = self.track_name
grp.attrs["central_window"] = self.central_window
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def from_hdf5(cls, grp):
if isinstance(grp.attrs["track_name"], str):
track_name = grp.attrs["track_name"]
else:
track_name = grp.attrs["track_name"].decode('utf-8')
central_window = grp.attrs["central_window"]
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return cls(track_name=track_name,
central_window=central_window,
val_transformer=val_transformer)
class AbstractValTransformer(object):
def __call__(self, val):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class AbsPercentileValTransformer(AbstractValTransformer):
def __init__(self, distribution):
self.distribution = np.array(sorted(np.abs(distribution)))
@classmethod
def from_hdf5(cls, grp):
distribution = np.array(grp["distribution"][:])
return cls(distribution=distribution)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("distribution", data=self.distribution)
def __call__(self, val):
return np.sign(val)*np.searchsorted(
a=self.distribution,
v=abs(val))/float(len(self.distribution))
class SignedPercentileValTransformer(AbstractValTransformer):
def __init__(self, distribution):
self.distribution = np.array(distribution)
self.pos_dist = np.array(sorted(
[x for x in self.distribution if x > 0]))
self.abs_neg_dist = np.array(sorted(
[abs(x) for x in self.distribution if x < 0]))
@classmethod
def from_hdf5(cls, grp):
distribution = np.array(grp["distribution"][:])
return cls(distribution=distribution)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("distribution", data=self.distribution)
def __call__(self, val):
if (val == 0):
return 0
elif (val > 0):
#add 1E-7 for complicated numerical stability issues
# basically need robustness when dealing with ties
return np.searchsorted(
a=self.pos_dist, v=(val+1E-7))/float(len(self.pos_dist))
else:
#add 1E-7 for complicated numerical stability issues
# basically need robustness when dealing with ties
return np.searchsorted(
a=self.abs_neg_dist, v=(abs(val)+1E-7))/float(
len(self.abs_neg_dist))
| 32.676692 | 77 | 0.637598 | from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.stats
class AbstractValueProvider(object):
def __call__(self, seqlet):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class CoorScoreValueProvider(AbstractValueProvider):
def __call__(self, seqlet):
return seqlet.coor.score
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
@classmethod
def from_hdf5(cls, grp):
return cls()
class TransformCentralWindowValueProvider(AbstractValueProvider):
def __init__(self, track_name, central_window, val_transformer):
if isinstance(track_name, str):
self.track_name = track_name
else:
self.track_name = track_name.decode('utf-8')
self.central_window = central_window
self.val_transformer = val_transformer
def __call__(self, seqlet):
val = self.get_val(seqlet=seqlet)
return self.val_transformer(val=val)
def get_val(self, seqlet):
flank_to_ignore = int(0.5*(len(seqlet)-self.central_window))
track_values = seqlet[self.track_name]\
.fwd[flank_to_ignore:(len(seqlet)-flank_to_ignore)]
return np.sum(track_values)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["track_name"] = self.track_name
grp.attrs["central_window"] = self.central_window
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def from_hdf5(cls, grp):
if isinstance(grp.attrs["track_name"], str):
track_name = grp.attrs["track_name"]
else:
track_name = grp.attrs["track_name"].decode('utf-8')
central_window = grp.attrs["central_window"]
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return cls(track_name=track_name,
central_window=central_window,
val_transformer=val_transformer)
class AbstractValTransformer(object):
def __call__(self, val):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class AbsPercentileValTransformer(AbstractValTransformer):
def __init__(self, distribution):
self.distribution = np.array(sorted(np.abs(distribution)))
@classmethod
def from_hdf5(cls, grp):
distribution = np.array(grp["distribution"][:])
return cls(distribution=distribution)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("distribution", data=self.distribution)
def __call__(self, val):
return np.sign(val)*np.searchsorted(
a=self.distribution,
v=abs(val))/float(len(self.distribution))
class SignedPercentileValTransformer(AbstractValTransformer):
def __init__(self, distribution):
self.distribution = np.array(distribution)
self.pos_dist = np.array(sorted(
[x for x in self.distribution if x > 0]))
self.abs_neg_dist = np.array(sorted(
[abs(x) for x in self.distribution if x < 0]))
@classmethod
def from_hdf5(cls, grp):
distribution = np.array(grp["distribution"][:])
return cls(distribution=distribution)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("distribution", data=self.distribution)
def __call__(self, val):
if (val == 0):
return 0
elif (val > 0):
return np.searchsorted(
a=self.pos_dist, v=(val+1E-7))/float(len(self.pos_dist))
else:
return np.searchsorted(
a=self.abs_neg_dist, v=(abs(val)+1E-7))/float(
len(self.abs_neg_dist))
| true | true |
f727196220df796339c62b0c3941e771d9d06e76 | 119 | py | Python | model_helpers/flatten.py | FlorianKlemt/pytorch-latent-i2a | 36809bf3adda1fcffaccd27e352b7ad2338060a7 | [
"MIT"
] | 3 | 2019-02-24T07:37:36.000Z | 2020-03-17T16:00:38.000Z | model_helpers/flatten.py | FlorianKlemt/pytorch-latent-i2a | 36809bf3adda1fcffaccd27e352b7ad2338060a7 | [
"MIT"
] | null | null | null | model_helpers/flatten.py | FlorianKlemt/pytorch-latent-i2a | 36809bf3adda1fcffaccd27e352b7ad2338060a7 | [
"MIT"
] | null | null | null | import torch
class Flatten(torch.nn.Module):
def forward(self,input):
return input.view(input.size(0), -1) | 23.8 | 44 | 0.680672 | import torch
class Flatten(torch.nn.Module):
def forward(self,input):
return input.view(input.size(0), -1) | true | true |
f7271973927dc75098169d414d35dcc361007bc5 | 2,078 | py | Python | scripts/data_dir_to_fasta.py | yuzhiguo07/openfold | 5fb0f074066387b9969578b8bf68f7e046c778af | [
"Apache-2.0"
] | 789 | 2021-11-12T16:12:21.000Z | 2022-03-28T05:45:19.000Z | scripts/data_dir_to_fasta.py | yuzhiguo07/openfold | 5fb0f074066387b9969578b8bf68f7e046c778af | [
"Apache-2.0"
] | 84 | 2021-11-12T22:23:50.000Z | 2022-03-29T01:06:06.000Z | scripts/data_dir_to_fasta.py | yuzhiguo07/openfold | 5fb0f074066387b9969578b8bf68f7e046c778af | [
"Apache-2.0"
] | 114 | 2021-11-12T16:00:57.000Z | 2022-03-27T21:32:31.000Z | import argparse
import logging
import os
from openfold.data import mmcif_parsing
from openfold.np import protein, residue_constants
def main(args):
fasta = []
for fname in os.listdir(args.data_dir):
basename, ext = os.path.splitext(fname)
basename = basename.upper()
fpath = os.path.join(args.data_dir, fname)
if(ext == ".cif"):
with open(fpath, 'r') as fp:
mmcif_str = fp.read()
mmcif = mmcif_parsing.parse(
file_id=basename, mmcif_string=mmcif_str
)
if(mmcif.mmcif_object is None):
logging.warning(f'Failed to parse {fname}...')
if(args.raise_errors):
raise list(mmcif.errors.values())[0]
else:
continue
mmcif = mmcif.mmcif_object
for chain, seq in mmcif.chain_to_seqres.items():
chain_id = '_'.join([basename, chain])
fasta.append(f">{chain_id}")
fasta.append(seq)
elif(ext == ".core"):
with open(fpath, 'r') as fp:
core_str = fp.read()
core_protein = protein.from_proteinnet_string(core_str)
aatype = core_protein.aatype
seq = ''.join([
residue_constants.restypes_with_x[aatype[i]]
for i in range(len(aatype))
])
fasta.append(f">{basename}")
fasta.append(seq)
with open(args.output_path, "w") as fp:
fp.write('\n'.join(fasta))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"data_dir", type=str,
help="Path to a directory containing mmCIF or .core files"
)
parser.add_argument(
"output_path", type=str,
help="Path to output FASTA file"
)
parser.add_argument(
"--raise_errors", type=bool, default=False,
help="Whether to crash on parsing errors"
)
args = parser.parse_args()
main(args)
| 29.685714 | 67 | 0.547161 | import argparse
import logging
import os
from openfold.data import mmcif_parsing
from openfold.np import protein, residue_constants
def main(args):
fasta = []
for fname in os.listdir(args.data_dir):
basename, ext = os.path.splitext(fname)
basename = basename.upper()
fpath = os.path.join(args.data_dir, fname)
if(ext == ".cif"):
with open(fpath, 'r') as fp:
mmcif_str = fp.read()
mmcif = mmcif_parsing.parse(
file_id=basename, mmcif_string=mmcif_str
)
if(mmcif.mmcif_object is None):
logging.warning(f'Failed to parse {fname}...')
if(args.raise_errors):
raise list(mmcif.errors.values())[0]
else:
continue
mmcif = mmcif.mmcif_object
for chain, seq in mmcif.chain_to_seqres.items():
chain_id = '_'.join([basename, chain])
fasta.append(f">{chain_id}")
fasta.append(seq)
elif(ext == ".core"):
with open(fpath, 'r') as fp:
core_str = fp.read()
core_protein = protein.from_proteinnet_string(core_str)
aatype = core_protein.aatype
seq = ''.join([
residue_constants.restypes_with_x[aatype[i]]
for i in range(len(aatype))
])
fasta.append(f">{basename}")
fasta.append(seq)
with open(args.output_path, "w") as fp:
fp.write('\n'.join(fasta))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"data_dir", type=str,
help="Path to a directory containing mmCIF or .core files"
)
parser.add_argument(
"output_path", type=str,
help="Path to output FASTA file"
)
parser.add_argument(
"--raise_errors", type=bool, default=False,
help="Whether to crash on parsing errors"
)
args = parser.parse_args()
main(args)
| true | true |
f727197bfaf0ad1a02e1a5e39ce0bac083ab567e | 6,741 | py | Python | configs/_base_/models/cascade_mask_rcnn_swin_fpn.py | AminRezaei0x443/Swin-Transformer-Object-Detection | 5376785b9e7b172a1d08cbb87362d5631b47eca9 | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/cascade_mask_rcnn_swin_fpn.py | AminRezaei0x443/Swin-Transformer-Object-Detection | 5376785b9e7b172a1d08cbb87362d5631b47eca9 | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/cascade_mask_rcnn_swin_fpn.py | AminRezaei0x443/Swin-Transformer-Object-Detection | 5376785b9e7b172a1d08cbb87362d5631b47eca9 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='CascadeRCNN',
pretrained=None,
backbone=dict(
type='SwinTransformer',
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=False),
neck=dict(
type='FPN',
in_channels=[96, 192, 384, 768],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=None,
mask_head=None),
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| 34.218274 | 79 | 0.446966 |
model = dict(
type='CascadeRCNN',
pretrained=None,
backbone=dict(
type='SwinTransformer',
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=False),
neck=dict(
type='FPN',
in_channels=[96, 192, 384, 768],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=None,
mask_head=None),
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| true | true |
f7271afaec979ef8020208ff603d1aed3f64fd7f | 7,132 | py | Python | backend/src/services/asr/iflytek_asr.py | didi/MeetDot | a57009d30c1347a9b85950c2e02b77685ce63952 | [
"Apache-2.0"
] | 6 | 2021-09-23T14:53:58.000Z | 2022-02-18T10:14:17.000Z | backend/src/services/asr/iflytek_asr.py | didi/MeetDot | a57009d30c1347a9b85950c2e02b77685ce63952 | [
"Apache-2.0"
] | null | null | null | backend/src/services/asr/iflytek_asr.py | didi/MeetDot | a57009d30c1347a9b85950c2e02b77685ce63952 | [
"Apache-2.0"
] | 1 | 2021-09-24T02:48:50.000Z | 2021-09-24T02:48:50.000Z | """
iflytek stream ASR service class (using WebSocket)
"""
import gevent
import os
from .interface import (
SpeechRecognitionConfig,
SpeechRecognitionRequest,
SpeechRecognitionResponse,
)
from .stream_asr import StreamAsr
from ..tokenizer import Tokenizer
import sys
import hashlib
from hashlib import sha1
import hmac
import base64
import json
import time
from websocket import create_connection
import websocket
from urllib.parse import quote
import logging
import queue
import re
"""
If you want to use iFlytek ASR service, copy your credentials into .env file under repo dir.
IFLYTEK_URL="XXX"
IFLYTEK_API_ID="XXX"
IFLYTEK_API_KEY="XXX"
"""
# iFlytek ASR use different language codes, mapping languages in our systems to iflytek's.
LANGUAGE_CODE_MAPPING = {"zh": "cn", "en-US": "en"}
class IFlyTekAsr(StreamAsr):
SUPPORTED_LANGUAGES = ("en-US", "zh")
POLLING_INTERVAL = 0.1 # seconds
def __init__(self, config: SpeechRecognitionConfig, logger, callback_fn):
super(IFlyTekAsr, self).__init__(config, logger, callback_fn)
self.start_time = time.time()
self.base_url: str = os.getenv("IFLYTEK_URL", "")
self.api_id = os.getenv("IFLYTEK_API_ID", "")
self.api_key = os.getenv("IFLYTEK_API_KEY", "")
self.init_timestamp = str(int(time.time()))
self.pd = "edu" # ASR domain
self.end_tag = '{"end": true}'
self.got_final = False
self.signa = self._get_signature()
self.lang_code = LANGUAGE_CODE_MAPPING[self.user_language]
# TODO: self.tokenizer does not support on-the-fly language switch.
self.tokenizer = Tokenizer(lang=self.user_language)
self.semaphore = gevent.lock.Semaphore()
self.connect()
def connect(self):
try:
self.ws = create_connection(
self.base_url
+ "?appid="
+ self.api_id
+ "&ts="
+ self.init_timestamp
+ "&signa="
+ quote(self.signa)
+ "&lang="
+ quote(self.lang_code)
)
except ConnectionRefusedError:
raise ConnectionRefusedError(
f"Could not connect to iflytek ASR server at {self.base_url} - is it running?"
)
with self.semaphore:
self.ws.send("")
def _get_signature(self):
tt = (self.api_id + self.init_timestamp).encode("utf-8")
md5 = hashlib.md5()
md5.update(tt)
baseString = md5.hexdigest()
baseString = bytes(baseString, encoding="utf-8")
apiKey = self.api_key.encode("utf-8")
signa = hmac.new(apiKey, baseString, hashlib.sha1).digest()
signa = base64.b64encode(signa)
signa = str(signa, "utf-8")
return signa
def run(self):
if not self.ws.connected:
self.connect()
while self.ws.connected:
try:
api_response = str(self.ws.recv())
except websocket.WebSocketConnectionClosedException:
print("receive result end")
break
if len(api_response) == 0:
self.got_final = True
break
api_response = json.loads(api_response)
response_code = int(api_response["code"])
if response_code != 0:
self.logger.error(f"ASR Response Error code: {response_code}")
continue
data = api_response["data"]
if api_response["action"] == "result":
data = json.loads(data)
pure_words_list = [
i["cw"][0]["w"] for i in data["cn"]["st"]["rt"][0]["ws"]
]
# 0-final result; 1-intermediate result
utterance_is_final = int(data["cn"]["st"]["type"]) == 0
if utterance_is_final:
self.got_final = True
response = SpeechRecognitionResponse(
transcript=self._build_transcript(tokens=pure_words_list),
relative_time_offset=time.time() - self.start_time,
is_final=utterance_is_final,
language=LANGUAGE_CODE_MAPPING[self.detected_language],
)
self.callback_fn(self.last_request, response)
gevent.sleep(IFlyTekAsr.POLLING_INTERVAL)
def __call__(self, request: SpeechRecognitionRequest) -> None:
self.last_request = request
data = request.chunk
self._send_chunk(data)
def end_utterance(self):
# Send special end of stream message
self._send_chunk(bytes(self.end_tag.encode("utf-8")))
def terminate(self, wait_for_final=True):
self.end_utterance()
if wait_for_final:
self.wait_for_final()
self.ws.close()
def wait_for_final(self, timeout_seconds=2.0):
"""
After closing, wait until the final response is sent, up to a timeout
"""
q = queue.Queue()
original_callback = self.callback_fn
def wrapped_callback(request, response):
if response.is_final:
q.put(response)
original_callback(request, response)
self.callback_fn = wrapped_callback
try:
final_response = q.get(timeout=timeout_seconds)
except queue.Empty:
final_response = SpeechRecognitionResponse(
transcript="",
relative_time_offset=0,
is_final=True,
language=self.detected_language,
)
self.callback_fn = original_callback
while not self.got_final:
gevent.sleep(0.01)
return final_response
def _build_transcript(self, tokens: list):
raw_transcript = self.tokenizer.detokenize(tokens)
transcript = self.postprocess(raw_transcript)
return transcript
def postprocess(self, text):
# Remove filler words
word_delimiter = "" if self.config.language == "zh" else " "
filler_words = ("mhm", "uh", "um")
text = word_delimiter.join(
w for w in text.strip().split() if w not in filler_words
)
# Remove content in parenthesis: {}, <>, [], and ()
text = re.sub(r"[<{\(\[].*?[\)\]>}]", "", text.strip())
# Fix acronyms
text = text.replace("._", ".")
# Remove leading and trailing whitespace
text = text.strip()
if self.config.language == "zh":
# Remove spaces, speaker ID in chinese
text = text.replace("[SPK]", "")
text = text.replace(" ", "")
else:
if text:
text = text[0].capitalize() + text[1:]
text = re.sub(r"\bi\b", "I", text)
return text
def _send_chunk(self, data):
try:
self.ws.send(data)
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
"WebSocketConnectionClosedException: socket is already closed."
)
| 31.982063 | 94 | 0.580342 | import gevent
import os
from .interface import (
SpeechRecognitionConfig,
SpeechRecognitionRequest,
SpeechRecognitionResponse,
)
from .stream_asr import StreamAsr
from ..tokenizer import Tokenizer
import sys
import hashlib
from hashlib import sha1
import hmac
import base64
import json
import time
from websocket import create_connection
import websocket
from urllib.parse import quote
import logging
import queue
import re
LANGUAGE_CODE_MAPPING = {"zh": "cn", "en-US": "en"}
class IFlyTekAsr(StreamAsr):
SUPPORTED_LANGUAGES = ("en-US", "zh")
POLLING_INTERVAL = 0.1 # seconds
def __init__(self, config: SpeechRecognitionConfig, logger, callback_fn):
super(IFlyTekAsr, self).__init__(config, logger, callback_fn)
self.start_time = time.time()
self.base_url: str = os.getenv("IFLYTEK_URL", "")
self.api_id = os.getenv("IFLYTEK_API_ID", "")
self.api_key = os.getenv("IFLYTEK_API_KEY", "")
self.init_timestamp = str(int(time.time()))
self.pd = "edu" # ASR domain
self.end_tag = '{"end": true}'
self.got_final = False
self.signa = self._get_signature()
self.lang_code = LANGUAGE_CODE_MAPPING[self.user_language]
# TODO: self.tokenizer does not support on-the-fly language switch.
self.tokenizer = Tokenizer(lang=self.user_language)
self.semaphore = gevent.lock.Semaphore()
self.connect()
def connect(self):
try:
self.ws = create_connection(
self.base_url
+ "?appid="
+ self.api_id
+ "&ts="
+ self.init_timestamp
+ "&signa="
+ quote(self.signa)
+ "&lang="
+ quote(self.lang_code)
)
except ConnectionRefusedError:
raise ConnectionRefusedError(
f"Could not connect to iflytek ASR server at {self.base_url} - is it running?"
)
with self.semaphore:
self.ws.send("")
def _get_signature(self):
tt = (self.api_id + self.init_timestamp).encode("utf-8")
md5 = hashlib.md5()
md5.update(tt)
baseString = md5.hexdigest()
baseString = bytes(baseString, encoding="utf-8")
apiKey = self.api_key.encode("utf-8")
signa = hmac.new(apiKey, baseString, hashlib.sha1).digest()
signa = base64.b64encode(signa)
signa = str(signa, "utf-8")
return signa
def run(self):
if not self.ws.connected:
self.connect()
while self.ws.connected:
try:
api_response = str(self.ws.recv())
except websocket.WebSocketConnectionClosedException:
print("receive result end")
break
if len(api_response) == 0:
self.got_final = True
break
api_response = json.loads(api_response)
response_code = int(api_response["code"])
if response_code != 0:
self.logger.error(f"ASR Response Error code: {response_code}")
continue
data = api_response["data"]
if api_response["action"] == "result":
data = json.loads(data)
pure_words_list = [
i["cw"][0]["w"] for i in data["cn"]["st"]["rt"][0]["ws"]
]
# 0-final result; 1-intermediate result
utterance_is_final = int(data["cn"]["st"]["type"]) == 0
if utterance_is_final:
self.got_final = True
response = SpeechRecognitionResponse(
transcript=self._build_transcript(tokens=pure_words_list),
relative_time_offset=time.time() - self.start_time,
is_final=utterance_is_final,
language=LANGUAGE_CODE_MAPPING[self.detected_language],
)
self.callback_fn(self.last_request, response)
gevent.sleep(IFlyTekAsr.POLLING_INTERVAL)
def __call__(self, request: SpeechRecognitionRequest) -> None:
self.last_request = request
data = request.chunk
self._send_chunk(data)
def end_utterance(self):
# Send special end of stream message
self._send_chunk(bytes(self.end_tag.encode("utf-8")))
def terminate(self, wait_for_final=True):
self.end_utterance()
if wait_for_final:
self.wait_for_final()
self.ws.close()
def wait_for_final(self, timeout_seconds=2.0):
q = queue.Queue()
original_callback = self.callback_fn
def wrapped_callback(request, response):
if response.is_final:
q.put(response)
original_callback(request, response)
self.callback_fn = wrapped_callback
try:
final_response = q.get(timeout=timeout_seconds)
except queue.Empty:
final_response = SpeechRecognitionResponse(
transcript="",
relative_time_offset=0,
is_final=True,
language=self.detected_language,
)
self.callback_fn = original_callback
while not self.got_final:
gevent.sleep(0.01)
return final_response
def _build_transcript(self, tokens: list):
raw_transcript = self.tokenizer.detokenize(tokens)
transcript = self.postprocess(raw_transcript)
return transcript
def postprocess(self, text):
# Remove filler words
word_delimiter = "" if self.config.language == "zh" else " "
filler_words = ("mhm", "uh", "um")
text = word_delimiter.join(
w for w in text.strip().split() if w not in filler_words
)
# Remove content in parenthesis: {}, <>, [], and ()
text = re.sub(r"[<{\(\[].*?[\)\]>}]", "", text.strip())
# Fix acronyms
text = text.replace("._", ".")
# Remove leading and trailing whitespace
text = text.strip()
if self.config.language == "zh":
# Remove spaces, speaker ID in chinese
text = text.replace("[SPK]", "")
text = text.replace(" ", "")
else:
if text:
text = text[0].capitalize() + text[1:]
text = re.sub(r"\bi\b", "I", text)
return text
def _send_chunk(self, data):
try:
self.ws.send(data)
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
"WebSocketConnectionClosedException: socket is already closed."
)
| true | true |
f7271b097f49a4ac7e244128ea6b6cecfc86fd93 | 1,008 | py | Python | api/celery/worker/config.py | keitaroinc/spodeli-novosti | f74d4658f2df02536c0cc05e60ade4c2fd7efeac | [
"BSD-2-Clause"
] | 1 | 2018-06-07T09:21:28.000Z | 2018-06-07T09:21:28.000Z | api/celery/worker/config.py | keitaroinc/spodeli-novosti | f74d4658f2df02536c0cc05e60ade4c2fd7efeac | [
"BSD-2-Clause"
] | null | null | null | api/celery/worker/config.py | keitaroinc/spodeli-novosti | f74d4658f2df02536c0cc05e60ade4c2fd7efeac | [
"BSD-2-Clause"
] | 1 | 2018-06-07T09:21:31.000Z | 2018-06-07T09:21:31.000Z | # -*-coding:utf-8-*-
import os
class BaseConfig(object):
"""Base configuration."""
DEBUG = True
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672/')
BROKER_POOL_LIMIT = os.getenv('BROKER_POOL_LIMIT', None)
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = os.getenv('CELERY_TIMEZONE', 'UTC')
CELERYD_CONCURRENCY = os.getenv('CELERYD_CONCURRENCY', 20)
SMTP_SERVER = os.getenv('SMTP_SERVER', None)
SMTP_SERVER_USER = os.getenv('SMTP_SERVER_USER', None)
SMTP_SERVER_PASSWORD = os.getenv('SMTP_SERVER_PASSWORD', None)
SMTP_SERVER_PORT = os.getenv('SMTP_SERVER_PORT', None)
FROM_EMAIL = os.getenv('FROM_EMAIL', 'info@keitaro.com')
FROM_NAME = os.getenv('FROM_NAME', 'root')
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG = True
class TestingConfig(BaseConfig):
"""Testing configuration."""
DEBUG = False
class ProductionConfig(BaseConfig):
"""Production configuration."""
DEBUG = False
| 30.545455 | 78 | 0.703373 |
import os
class BaseConfig(object):
DEBUG = True
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672/')
BROKER_POOL_LIMIT = os.getenv('BROKER_POOL_LIMIT', None)
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = os.getenv('CELERY_TIMEZONE', 'UTC')
CELERYD_CONCURRENCY = os.getenv('CELERYD_CONCURRENCY', 20)
SMTP_SERVER = os.getenv('SMTP_SERVER', None)
SMTP_SERVER_USER = os.getenv('SMTP_SERVER_USER', None)
SMTP_SERVER_PASSWORD = os.getenv('SMTP_SERVER_PASSWORD', None)
SMTP_SERVER_PORT = os.getenv('SMTP_SERVER_PORT', None)
FROM_EMAIL = os.getenv('FROM_EMAIL', 'info@keitaro.com')
FROM_NAME = os.getenv('FROM_NAME', 'root')
class DevelopmentConfig(BaseConfig):
DEBUG = True
class TestingConfig(BaseConfig):
DEBUG = False
class ProductionConfig(BaseConfig):
DEBUG = False
| true | true |
f7271c70df3a4e2a5327a3da3f3419e8bf553154 | 71 | py | Python | ModelHelper/__init__.py | yasarc4/Auto_time_series | 5a9aa5c535fbe09a4cc59e44124a5de435ac5059 | [
"MIT"
] | 7 | 2018-06-18T20:14:30.000Z | 2019-05-24T08:21:52.000Z | ModelHelper/__init__.py | yasarc4/Auto_time_series | 5a9aa5c535fbe09a4cc59e44124a5de435ac5059 | [
"MIT"
] | null | null | null | ModelHelper/__init__.py | yasarc4/Auto_time_series | 5a9aa5c535fbe09a4cc59e44124a5de435ac5059 | [
"MIT"
] | 1 | 2019-06-08T18:20:57.000Z | 2019-06-08T18:20:57.000Z | from .prophet_helper import ProphetHelper
__all__ = ['ProphetHelper']
| 17.75 | 41 | 0.802817 | from .prophet_helper import ProphetHelper
__all__ = ['ProphetHelper']
| true | true |
f7271cc4b96db4c6911f4848e194d1acf37f4ccd | 9,988 | py | Python | spyder/widgets/ipythonconsole/shell.py | computeVision/spyder | 0a71273e0a1bad8fb9812ee8054c0a2711a6178e | [
"MIT"
] | null | null | null | spyder/widgets/ipythonconsole/shell.py | computeVision/spyder | 0a71273e0a1bad8fb9812ee8054c0a2711a6178e | [
"MIT"
] | null | null | null | spyder/widgets/ipythonconsole/shell.py | computeVision/spyder | 0a71273e0a1bad8fb9812ee8054c0a2711a6178e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Shell Widget for the IPython Console
"""
import ast
import uuid
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
from spyder.config.gui import config_shortcut, fixed_shortcut
from spyder.py3compat import to_text_string
from spyder.utils import programs
from spyder.widgets.arraybuilder import SHORTCUT_INLINE, SHORTCUT_TABLE
from spyder.widgets.ipythonconsole import (ControlWidget, DebuggingWidget,
HelpWidget, NamepaceBrowserWidget,
PageControlWidget)
class ShellWidget(NamepaceBrowserWidget, HelpWidget, DebuggingWidget):
"""
Shell widget for the IPython Console
This is the widget in charge of executing code
"""
# NOTE: Signals can't be assigned separately to each widget
# That's why we define all needed signals here.
# For NamepaceBrowserWidget
sig_namespace_view = Signal(object)
sig_var_properties = Signal(object)
# For DebuggingWidget
sig_input_reply = Signal()
sig_pdb_step = Signal(str, int)
sig_prompt_ready = Signal()
# For ShellWidget
focus_changed = Signal()
new_client = Signal()
sig_got_reply = Signal()
def __init__(self, additional_options, interpreter_versions,
external_kernel, *args, **kw):
# To override the Qt widget used by RichJupyterWidget
self.custom_control = ControlWidget
self.custom_page_control = PageControlWidget
super(ShellWidget, self).__init__(*args, **kw)
self.additional_options = additional_options
self.interpreter_versions = interpreter_versions
self.set_background_color()
# Additional variables
self.ipyclient = None
self.external_kernel = external_kernel
# Keyboard shortcuts
self.shortcuts = self.create_shortcuts()
# To save kernel replies in silent execution
self._kernel_reply = None
#---- Public API ----------------------------------------------------------
def set_ipyclient(self, ipyclient):
"""Bind this shell widget to an IPython client one"""
self.ipyclient = ipyclient
self.exit_requested.connect(ipyclient.exit_callback)
def is_running(self):
if self.kernel_client is not None and \
self.kernel_client.channels_running:
return True
else:
return False
# --- To handle the banner
def long_banner(self):
"""Banner for IPython widgets with pylab message"""
# Default banner
from IPython.core.usage import quick_guide
banner_parts = [
'Python %s\n' % self.interpreter_versions['python_version'],
'Type "copyright", "credits" or "license" for more information.\n\n',
'IPython %s -- An enhanced Interactive Python.\n' % \
self.interpreter_versions['ipython_version'],
quick_guide
]
banner = ''.join(banner_parts)
# Pylab additions
pylab_o = self.additional_options['pylab']
autoload_pylab_o = self.additional_options['autoload_pylab']
mpl_installed = programs.is_module_installed('matplotlib')
if mpl_installed and (pylab_o and autoload_pylab_o):
pylab_message = ("\nPopulating the interactive namespace from "
"numpy and matplotlib")
banner = banner + pylab_message
# Sympy additions
sympy_o = self.additional_options['sympy']
if sympy_o:
lines = """
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
"""
banner = banner + lines
return banner
def short_banner(self):
"""Short banner with Python and QtConsole versions"""
banner = 'Python %s -- IPython %s' % (
self.interpreter_versions['python_version'],
self.interpreter_versions['ipython_version'])
return banner
# --- To define additional shortcuts
def clear_console(self):
self.execute("%clear")
def reset_namespace(self):
"""Resets the namespace by removing all names defined by the user"""
reply = QMessageBox.question(
self,
_("Reset IPython namespace"),
_("All user-defined variables will be removed."
"<br>Are you sure you want to reset the namespace?"),
QMessageBox.Yes | QMessageBox.No,
)
if reply == QMessageBox.Yes:
self.execute("%reset -f")
def set_background_color(self):
light_color_o = self.additional_options['light_color']
if not light_color_o:
self.set_default_style(colors='linux')
def create_shortcuts(self):
inspect = config_shortcut(self._control.inspect_current_object,
context='Console', name='Inspect current object',
parent=self)
clear_console = config_shortcut(self.clear_console, context='Console',
name='Clear shell', parent=self)
# Fixed shortcuts
fixed_shortcut("Ctrl+T", self, lambda: self.new_client.emit())
fixed_shortcut("Ctrl+Alt+R", self, lambda: self.reset_namespace())
fixed_shortcut(SHORTCUT_INLINE, self,
lambda: self._control.enter_array_inline())
fixed_shortcut(SHORTCUT_TABLE, self,
lambda: self._control.enter_array_table())
return [inspect, clear_console]
# --- To communicate with the kernel
def silent_execute(self, code):
"""Execute code in the kernel without increasing the prompt"""
self.kernel_client.execute(to_text_string(code), silent=True)
def silent_exec_method(self, code):
"""Silently execute a kernel method and save its reply
The methods passed here **don't** involve getting the value
of a variable but instead replies that can be handled by
ast.literal_eval.
To get a value see `get_value`
Parameters
----------
code : string
Code that contains the kernel method as part of its
string
See Also
--------
handle_exec_method : Method that deals with the reply
Note
----
This is based on the _silent_exec_callback method of
RichJupyterWidget. Therefore this is licensed BSD
"""
# Generate uuid, which would be used as an indication of whether or
# not the unique request originated from here
local_uuid = to_text_string(uuid.uuid1())
code = to_text_string(code)
msg_id = self.kernel_client.execute('', silent=True,
user_expressions={ local_uuid:code })
self._kernel_methods[local_uuid] = code
self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id,
'silent_exec_method')
def handle_exec_method(self, msg):
"""
Handle data returned by silent executions of kernel methods
This is based on the _handle_exec_callback of RichJupyterWidget.
Therefore this is licensed BSD.
"""
user_exp = msg['content'].get('user_expressions')
if not user_exp:
return
for expression in user_exp:
if expression in self._kernel_methods:
# Process kernel reply
method = self._kernel_methods[expression]
reply = user_exp[expression]
data = reply.get('data')
if 'get_namespace_view' in method:
view = ast.literal_eval(data['text/plain'])
self.sig_namespace_view.emit(view)
elif 'get_var_properties' in method:
properties = ast.literal_eval(data['text/plain'])
self.sig_var_properties.emit(properties)
else:
if data is not None:
self._kernel_reply = ast.literal_eval(data['text/plain'])
else:
self._kernel_reply = None
self.sig_got_reply.emit()
# Remove method after being processed
self._kernel_methods.pop(expression)
#---- Private methods (overrode by us) ---------------------------------
def _context_menu_make(self, pos):
"""Reimplement the IPython context menu"""
menu = super(ShellWidget, self)._context_menu_make(pos)
return self.ipyclient.add_actions_to_context_menu(menu)
def _banner_default(self):
"""
Reimplement banner creation to let the user decide if he wants a
banner or not
"""
# Don't change banner for external kernels
if self.external_kernel:
return ''
show_banner_o = self.additional_options['show_banner']
if show_banner_o:
return self.long_banner()
else:
return self.short_banner()
#---- Qt methods ----------------------------------------------------------
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.focus_changed.emit()
return super(ShellWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.focus_changed.emit()
return super(ShellWidget, self).focusOutEvent(event)
| 37.130112 | 83 | 0.605527 |
import ast
import uuid
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
from spyder.config.gui import config_shortcut, fixed_shortcut
from spyder.py3compat import to_text_string
from spyder.utils import programs
from spyder.widgets.arraybuilder import SHORTCUT_INLINE, SHORTCUT_TABLE
from spyder.widgets.ipythonconsole import (ControlWidget, DebuggingWidget,
HelpWidget, NamepaceBrowserWidget,
PageControlWidget)
class ShellWidget(NamepaceBrowserWidget, HelpWidget, DebuggingWidget):
# That's why we define all needed signals here.
sig_namespace_view = Signal(object)
sig_var_properties = Signal(object)
sig_input_reply = Signal()
sig_pdb_step = Signal(str, int)
sig_prompt_ready = Signal()
focus_changed = Signal()
new_client = Signal()
sig_got_reply = Signal()
def __init__(self, additional_options, interpreter_versions,
external_kernel, *args, **kw):
self.custom_control = ControlWidget
self.custom_page_control = PageControlWidget
super(ShellWidget, self).__init__(*args, **kw)
self.additional_options = additional_options
self.interpreter_versions = interpreter_versions
self.set_background_color()
self.ipyclient = None
self.external_kernel = external_kernel
self.shortcuts = self.create_shortcuts()
self._kernel_reply = None
def set_ipyclient(self, ipyclient):
self.ipyclient = ipyclient
self.exit_requested.connect(ipyclient.exit_callback)
def is_running(self):
if self.kernel_client is not None and \
self.kernel_client.channels_running:
return True
else:
return False
def long_banner(self):
from IPython.core.usage import quick_guide
banner_parts = [
'Python %s\n' % self.interpreter_versions['python_version'],
'Type "copyright", "credits" or "license" for more information.\n\n',
'IPython %s -- An enhanced Interactive Python.\n' % \
self.interpreter_versions['ipython_version'],
quick_guide
]
banner = ''.join(banner_parts)
pylab_o = self.additional_options['pylab']
autoload_pylab_o = self.additional_options['autoload_pylab']
mpl_installed = programs.is_module_installed('matplotlib')
if mpl_installed and (pylab_o and autoload_pylab_o):
pylab_message = ("\nPopulating the interactive namespace from "
"numpy and matplotlib")
banner = banner + pylab_message
sympy_o = self.additional_options['sympy']
if sympy_o:
lines = """
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
"""
banner = banner + lines
return banner
def short_banner(self):
banner = 'Python %s -- IPython %s' % (
self.interpreter_versions['python_version'],
self.interpreter_versions['ipython_version'])
return banner
def clear_console(self):
self.execute("%clear")
def reset_namespace(self):
reply = QMessageBox.question(
self,
_("Reset IPython namespace"),
_("All user-defined variables will be removed."
"<br>Are you sure you want to reset the namespace?"),
QMessageBox.Yes | QMessageBox.No,
)
if reply == QMessageBox.Yes:
self.execute("%reset -f")
def set_background_color(self):
light_color_o = self.additional_options['light_color']
if not light_color_o:
self.set_default_style(colors='linux')
def create_shortcuts(self):
inspect = config_shortcut(self._control.inspect_current_object,
context='Console', name='Inspect current object',
parent=self)
clear_console = config_shortcut(self.clear_console, context='Console',
name='Clear shell', parent=self)
fixed_shortcut("Ctrl+T", self, lambda: self.new_client.emit())
fixed_shortcut("Ctrl+Alt+R", self, lambda: self.reset_namespace())
fixed_shortcut(SHORTCUT_INLINE, self,
lambda: self._control.enter_array_inline())
fixed_shortcut(SHORTCUT_TABLE, self,
lambda: self._control.enter_array_table())
return [inspect, clear_console]
def silent_execute(self, code):
self.kernel_client.execute(to_text_string(code), silent=True)
def silent_exec_method(self, code):
local_uuid = to_text_string(uuid.uuid1())
code = to_text_string(code)
msg_id = self.kernel_client.execute('', silent=True,
user_expressions={ local_uuid:code })
self._kernel_methods[local_uuid] = code
self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id,
'silent_exec_method')
def handle_exec_method(self, msg):
user_exp = msg['content'].get('user_expressions')
if not user_exp:
return
for expression in user_exp:
if expression in self._kernel_methods:
method = self._kernel_methods[expression]
reply = user_exp[expression]
data = reply.get('data')
if 'get_namespace_view' in method:
view = ast.literal_eval(data['text/plain'])
self.sig_namespace_view.emit(view)
elif 'get_var_properties' in method:
properties = ast.literal_eval(data['text/plain'])
self.sig_var_properties.emit(properties)
else:
if data is not None:
self._kernel_reply = ast.literal_eval(data['text/plain'])
else:
self._kernel_reply = None
self.sig_got_reply.emit()
self._kernel_methods.pop(expression)
def _context_menu_make(self, pos):
menu = super(ShellWidget, self)._context_menu_make(pos)
return self.ipyclient.add_actions_to_context_menu(menu)
def _banner_default(self):
if self.external_kernel:
return ''
show_banner_o = self.additional_options['show_banner']
if show_banner_o:
return self.long_banner()
else:
return self.short_banner()
#---- Qt methods ----------------------------------------------------------
def focusInEvent(self, event):
self.focus_changed.emit()
return super(ShellWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
self.focus_changed.emit()
return super(ShellWidget, self).focusOutEvent(event)
| true | true |
f7271cf5c26369051323e3140d6893796b0e8cba | 1,752 | py | Python | macarico/actors/bow.py | bgalbraith/macarico | 448e3e7f088dde0f4eb016fbdee857221b9523fb | [
"MIT"
] | 121 | 2019-04-09T15:44:26.000Z | 2022-03-29T19:56:19.000Z | macarico/actors/bow.py | bgalbraith/macarico | 448e3e7f088dde0f4eb016fbdee857221b9523fb | [
"MIT"
] | 1 | 2019-04-10T16:07:04.000Z | 2019-05-09T00:41:19.000Z | macarico/actors/bow.py | bgalbraith/macarico | 448e3e7f088dde0f4eb016fbdee857221b9523fb | [
"MIT"
] | 11 | 2019-04-09T16:13:34.000Z | 2019-09-30T23:31:14.000Z | from __future__ import division, generators, print_function
import torch
import torch.nn as nn
import macarico
import macarico.util as util
from macarico.util import Var, Varng
class BOWActor(macarico.Actor):
def __init__(self, attention, n_actions, act_history_length=1, obs_history_length=0):
self.att_dim = sum((att.dim for att in attention))
super().__init__(n_actions,
self.att_dim +
act_history_length * n_actions + \
obs_history_length * self.att_dim,
attention)
self.act_history_length = act_history_length
self.obs_history_length = obs_history_length
self._reset()
def _forward(self, state, x):
feats = x[:]
if self.act_history_length > 0:
f = util.zeros(self, 1, self.act_history_length * self.n_actions)
for i in range(min(self.act_history_length, len(state._trajectory))):
a = state._trajectory[-i]
f[0, i * self.n_actions + a] = 1
feats.append(Varng(f))
if self.obs_history_length > 0:
for i in range(self.obs_history_length):
feats.append(Varng(self.obs_history[(self.obs_history_pos+i) % self.obs_history_length]))
# update history
self.obs_history[self.obs_history_pos] = torch.cat(x, dim=1).data
self.obs_history_pos = (self.obs_history_pos + 1) % self.obs_history_length
return torch.cat(feats, dim=1)
def _reset(self):
self.obs_history = []
for _ in range(self.obs_history_length):
self.obs_history.append(util.zeros(self, 1, self.att_dim))
self.obs_history_pos = 0
| 39.818182 | 105 | 0.619863 | from __future__ import division, generators, print_function
import torch
import torch.nn as nn
import macarico
import macarico.util as util
from macarico.util import Var, Varng
class BOWActor(macarico.Actor):
def __init__(self, attention, n_actions, act_history_length=1, obs_history_length=0):
self.att_dim = sum((att.dim for att in attention))
super().__init__(n_actions,
self.att_dim +
act_history_length * n_actions + \
obs_history_length * self.att_dim,
attention)
self.act_history_length = act_history_length
self.obs_history_length = obs_history_length
self._reset()
def _forward(self, state, x):
feats = x[:]
if self.act_history_length > 0:
f = util.zeros(self, 1, self.act_history_length * self.n_actions)
for i in range(min(self.act_history_length, len(state._trajectory))):
a = state._trajectory[-i]
f[0, i * self.n_actions + a] = 1
feats.append(Varng(f))
if self.obs_history_length > 0:
for i in range(self.obs_history_length):
feats.append(Varng(self.obs_history[(self.obs_history_pos+i) % self.obs_history_length]))
self.obs_history[self.obs_history_pos] = torch.cat(x, dim=1).data
self.obs_history_pos = (self.obs_history_pos + 1) % self.obs_history_length
return torch.cat(feats, dim=1)
def _reset(self):
self.obs_history = []
for _ in range(self.obs_history_length):
self.obs_history.append(util.zeros(self, 1, self.att_dim))
self.obs_history_pos = 0
| true | true |
f7271d2609206d32d2b77afed5f598fa29a5e6b0 | 1,463 | py | Python | api/v2/serializers/details/project_volume.py | simpsonw/atmosphere | 3a5203ef0b563de3a0e8c8c8715df88186532d7a | [
"BSD-3-Clause"
] | 197 | 2016-12-08T02:33:32.000Z | 2022-03-23T14:27:47.000Z | api/v2/serializers/details/project_volume.py | simpsonw/atmosphere | 3a5203ef0b563de3a0e8c8c8715df88186532d7a | [
"BSD-3-Clause"
] | 385 | 2017-01-03T22:51:46.000Z | 2020-12-16T16:20:42.000Z | api/v2/serializers/details/project_volume.py | benlazarine/atmosphere | 38fad8e4002e510e8b4294f2bb5bc75e8e1817fa | [
"BSD-3-Clause"
] | 50 | 2016-12-08T08:32:25.000Z | 2021-12-10T00:21:39.000Z | from core.models import Project, Volume
from rest_framework import serializers
from api.v2.serializers.summaries import ProjectSummarySerializer
from .volume import VolumeSerializer
class ProjectRelatedField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
return Project.objects.all()
def to_representation(self, value):
project = Project.objects.get(pk=value.pk)
serializer = ProjectSummarySerializer(project, context=self.context)
return serializer.data
class VolumeRelatedField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
return Volume.objects.all()
def to_representation(self, value):
instance = Volume.objects.get(pk=value.pk)
serializer = VolumeSerializer(instance, context=self.context)
return serializer.data
class ProjectVolumeSerializer(serializers.HyperlinkedModelSerializer):
project = ProjectRelatedField(queryset=Project.objects.none())
volume = VolumeRelatedField(source="pk", queryset=Volume.objects.none())
# Could not fix 'ImproperlyConfiguredError'
# url = serializers.HyperlinkedIdentityField(
# view_name='api:v2:projectvolume-detail',
# )
class Meta:
model = Volume
fields = ('id', 'project', 'volume')
def create(self, validated_data):
validated_data['pk'].project = validated_data['project']
validated_data['pk'].save()
return validated_data
| 33.25 | 76 | 0.726589 | from core.models import Project, Volume
from rest_framework import serializers
from api.v2.serializers.summaries import ProjectSummarySerializer
from .volume import VolumeSerializer
class ProjectRelatedField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
return Project.objects.all()
def to_representation(self, value):
project = Project.objects.get(pk=value.pk)
serializer = ProjectSummarySerializer(project, context=self.context)
return serializer.data
class VolumeRelatedField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
return Volume.objects.all()
def to_representation(self, value):
instance = Volume.objects.get(pk=value.pk)
serializer = VolumeSerializer(instance, context=self.context)
return serializer.data
class ProjectVolumeSerializer(serializers.HyperlinkedModelSerializer):
project = ProjectRelatedField(queryset=Project.objects.none())
volume = VolumeRelatedField(source="pk", queryset=Volume.objects.none())
class Meta:
model = Volume
fields = ('id', 'project', 'volume')
def create(self, validated_data):
validated_data['pk'].project = validated_data['project']
validated_data['pk'].save()
return validated_data
| true | true |
f7271d3ae4367499cf666b0eda40d2fc6daee534 | 20,768 | py | Python | jax/_src/errors.py | machineko/jax | 5a9048a0058d027000afc5707413d24209aa6f9f | [
"Apache-2.0"
] | 1 | 2021-09-14T07:12:46.000Z | 2021-09-14T07:12:46.000Z | jax/_src/errors.py | josephrocca/jax | ab544cb26dfea3147c336754d3e3eb457a405e38 | [
"Apache-2.0"
] | 6 | 2022-01-03T22:13:42.000Z | 2022-02-14T22:07:51.000Z | jax/_src/errors.py | kbnarayanavit/jax | 1e3c4833c97302caf6046ff99656b8ff21430b8d | [
"Apache-2.0"
] | 1 | 2021-08-11T20:57:59.000Z | 2021-08-11T20:57:59.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jax import core
class _JAXErrorMixin:
"""Mixin for JAX-specific errors"""
_error_page = 'https://jax.readthedocs.io/en/latest/errors.html'
_module_name = "jax.errors"
def __init__(self, message: str):
error_page = self._error_page
module_name = self._module_name
class_name = self.__class__.__name__
error_msg = f'{message}\nSee {error_page}#{module_name}.{class_name}'
# https://github.com/python/mypy/issues/5887
super().__init__(error_msg) # type: ignore
class JAXTypeError(_JAXErrorMixin, TypeError):
pass
class JAXIndexError(_JAXErrorMixin, IndexError):
pass
class ConcretizationTypeError(JAXTypeError):
"""
This error occurs when a JAX Tracer object is used in a context where a
concrete value is required. In some situations, it can be easily fixed by
marking problematic values as static; in others, it may indicate that your
program is doing operations that are not directly supported by JAX's JIT
compilation model.
Traced value where static value is expected
One common cause of this error is using a traced value where a static value
is required. For example:
>>> from jax import jit, partial
>>> import jax.numpy as jnp
>>> @jit
... def func(x, axis):
... return x.min(axis)
>>> func(jnp.arange(4), 0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ConcretizationTypeError: Abstract tracer value encountered where concrete
value is expected: axis argument to jnp.min().
This can often be fixed by marking the problematic argument as static::
>>> @partial(jit, static_argnums=1)
... def func(x, axis):
... return x.min(axis)
>>> func(jnp.arange(4), 0)
DeviceArray(0, dtype=int32)
Traced value used in control flow
Another case where this often arises is when a traced value is used in
Python control flow. For example::
>>> @jit
... def func(x, y):
... return x if x.sum() < y.sum() else y
>>> func(jnp.ones(4), jnp.zeros(4)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ConcretizationTypeError: Abstract tracer value encountered where concrete
value is expected: [...]
We could mark both inputs ``x`` and ``y`` as static, but that would defeat
the purpose of using :func:`jax.jit` here. Another option is to re-express
the if statement in terms of :func:`jax.numpy.where`::
>>> @jit
... def func(x, y):
... return jnp.where(x.sum() < y.sum(), x, y)
>>> func(jnp.ones(4), jnp.zeros(4))
DeviceArray([0., 0., 0., 0.], dtype=float32)
For more complicated control flow including loops, see
:ref:`lax-control-flow`.
Shape depends on Traced Value
Such an error may also arise when a shape in your JIT-compiled computation
depends on the values within a traced quantity. For example::
>>> @jit
... def func(x):
... return jnp.where(x < 0)
>>> func(jnp.arange(4)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected:
The error arose in jnp.nonzero.
This is an example of an operation that is incompatible with JAX's JIT
compilation model, which requires array sizes to be known at compile-time.
Here the size of the returned array depends on the contents of `x`, and such
code cannot be JIT compiled.
In many cases it is possible to work around this by modifying the logic used
in the function; for example here is code with a similar issue::
>>> @jit
... def func(x):
... indices = jnp.where(x > 1)
... return x[indices].sum()
>>> func(jnp.arange(4)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ConcretizationTypeError: Abstract tracer value encountered where concrete
value is expected: The error arose in jnp.nonzero.
And here is how you might express the same operation in a way that avoids
creation of a dynamically-sized index array::
>>> @jit
... def func(x):
... return jnp.where(x > 1, x, 0).sum()
>>> func(jnp.arange(4))
DeviceArray(5, dtype=int32)
To understand more subtleties having to do with tracers vs. regular values,
and concrete vs. abstract values, you may want to read
:ref:`faq-different-kinds-of-jax-values`.
"""
def __init__(self, tracer: "core.Tracer", context: str = ""):
super().__init__(
"Abstract tracer value encountered where concrete value is expected: "
f"{tracer}\n{context}{tracer._origin_msg()}\n")
class NonConcreteBooleanIndexError(JAXIndexError):
"""
This error occurs when a program attempts to use non-concrete boolean indices
in a traced indexing operation. Under JIT compilation, JAX arrays must have
static shapes (i.e. shapes that are known at compile-time) and so boolean
masks must be used carefully. Some logic implemented via boolean masking is
simply not possible in a :func:`jax.jit` function; in other cases, the logic
can be re-expressed in a JIT-compatible way, often using the three-argument
version of :func:`~jax.numpy.where`.
Following are a few examples of when this error might arise.
Constructing arrays via boolean masking
This most commonly arises when attempting to create an array via a boolean
mask within a JIT context. For example::
>>> import jax
>>> import jax.numpy as jnp
>>> @jax.jit
... def positive_values(x):
... return x[x > 0]
>>> positive_values(jnp.arange(-5, 5)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[10])
This function is attempting to return only the positive values in the input
array; the size of this returned array cannot be determined at compile-time
unless `x` is marked as static, and so operations like this cannot be
performed under JIT compilation.
Reexpressible Boolean Logic
Although creating dynamically sized arrays is not supported directly, in
many cases it is possible to re-express the logic of the computation in
terms of a JIT-compatible operation. For example, here is another function
that fails under JIT for the same reason::
>>> @jax.jit
... def sum_of_positive(x):
... return x[x > 0].sum()
>>> sum_of_positive(jnp.arange(-5, 5)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[10])
In this case, however, the problematic array is only an intermediate value,
and we can instead express the same logic in terms of the JIT-compatible
three-argument version of :func:`jax.numpy.where`::
>>> @jax.jit
... def sum_of_positive(x):
... return jnp.where(x > 0, x, 0).sum()
>>> sum_of_positive(jnp.arange(-5, 5))
DeviceArray(10, dtype=int32)
This pattern of replacing boolean masking with three-argument
:func:`~jax.numpy.where` is a common solution to this sort of problem.
Boolean indices in :mod:`jax.ops`
The other situation where this error often arises is when using boolean
indices within functions in :mod:`jax.ops`, such as
:func:`jax.ops.index_update`. Here is a simple example::
>>> @jax.jit
... def manual_clip(x):
... return jax.ops.index_update(x, x < 0, 0)
>>> manual_clip(jnp.arange(-2, 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[4])
This function is attempting to set values smaller than zero to a scalar fill
value. As above, this can be addressed by re-expressing the logic in terms
of :func:`~jax.numpy.where`::
>>> @jax.jit
... def manual_clip(x):
... return jnp.where(x < 0, 0, x)
>>> manual_clip(jnp.arange(-2, 2))
DeviceArray([0, 0, 0, 1], dtype=int32)
These operations also commonly are written in terms of the
:ref:`syntactic-sugar-for-ops`; for example, this is syntactic sugar for
:func:`~jax.ops.index_mul`, and fails under JIT::
>>> @jax.jit
... def manual_abs(x):
... return x.at[x < 0].mul(-1)
>>> manual_abs(jnp.arange(-2, 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[4])
As above, the solution is to re-express this in terms of
:func:`~jax.numpy.where`::
>>> @jax.jit
... def manual_abs(x):
... return jnp.where(x < 0, x * -1, x)
>>> manual_abs(jnp.arange(-2, 2))
DeviceArray([2, 1, 0, 1], dtype=int32)
"""
def __init__(self, tracer: "core.Tracer"):
super().__init__(
f"Array boolean indices must be concrete; got {tracer}\n")
class TracerArrayConversionError(JAXTypeError):
"""
This error occurs when a program attempts to convert a JAX Tracer object into
a standard NumPy array. It typically occurs in one of a few situations.
Using `numpy` rather than `jax.numpy` functions
This error can occur when a JAX Tracer object is passed to a raw numpy
function, or a method on a numpy.ndarray object. For example::
>>> from jax import jit, partial
>>> import numpy as np
>>> import jax.numpy as jnp
>>> @jit
... def func(x):
... return np.sin(x)
>>> func(jnp.arange(4)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerArrayConversionError: The numpy.ndarray conversion method
__array__() was called on the JAX Tracer object
In this case, check that you are using `jax.numpy` methods rather than
`numpy` methods::
>>> @jit
... def func(x):
... return jnp.sin(x)
>>> func(jnp.arange(4))
DeviceArray([0. , 0.84147096, 0.9092974 , 0.14112 ], dtype=float32)
Indexing a numpy array with a tracer
If this error arises on a line that involves array indexing, it may be that
the array being indexed `x` is a raw numpy.ndarray while the indices `idx`
are traced. For example::
>>> x = np.arange(10)
>>> @jit
... def func(i):
... return x[i]
>>> func(0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerArrayConversionError: The numpy.ndarray conversion method
__array__() was called on the JAX Tracer object
Depending on the context, you may fix this by converting the numpy array
into a JAX array::
>>> @jit
... def func(i):
... return jnp.asarray(x)[i]
>>> func(0)
DeviceArray(0, dtype=int32)
or by declaring the index as a static argument::
>>> @partial(jit, static_argnums=(0,))
... def func(i):
... return x[i]
>>> func(0)
DeviceArray(0, dtype=int32)
To understand more subtleties having to do with tracers vs. regular values,
and concrete vs. abstract values, you may want to read
:ref:`faq-different-kinds-of-jax-values`.
"""
def __init__(self, tracer: "core.Tracer"):
super().__init__(
"The numpy.ndarray conversion method __array__() was called on "
f"the JAX Tracer object {tracer}{tracer._origin_msg()}")
class TracerIntegerConversionError(JAXTypeError):
"""
This error can occur when a JAX Tracer object is used in a context where a
Python integer is expected. It typically occurs in a few situations.
Passing a tracer in place of an integer
This error can occur if you attempt to pass a tracer to a function that
requires an integer argument; for example::
>>> from jax import jit, partial
>>> import numpy as np
>>> @jit
... def func(x, axis):
... return np.split(x, 2, axis)
>>> func(np.arange(4), 0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerIntegerConversionError: The __index__() method was called on the JAX
Tracer object
When this happens, the solution is often to mark the problematic argument as
static::
>>> @partial(jit, static_argnums=1)
... def func(x, axis):
... return np.split(x, 2, axis)
>>> func(np.arange(10), 0)
[DeviceArray([0, 1, 2, 3, 4], dtype=int32),
DeviceArray([5, 6, 7, 8, 9], dtype=int32)]
An alternative is to apply the transformation to a closure that encapsulates
the arguments to be protected, either manually as below or by using
:func:`functools.partial`::
>>> jit(lambda arr: np.split(arr, 2, 0))(np.arange(4))
[DeviceArray([0, 1], dtype=int32), DeviceArray([2, 3], dtype=int32)]
**Note a new closure is created at every invocation, which defeats the
compilation caching mechanism, which is why static_argnums is preferred.**
Indexing a list with a Tracer
This error can occur if you attempt to index a Python list with a traced
quantity.
For example::
>>> import jax.numpy as jnp
>>> from jax import jit, partial
>>> L = [1, 2, 3]
>>> @jit
... def func(i):
... return L[i]
>>> func(0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerIntegerConversionError: The __index__() method was called on the JAX Tracer object
Depending on the context, you can generally fix this either by converting
the list to a JAX array::
>>> @jit
... def func(i):
... return jnp.array(L)[i]
>>> func(0)
DeviceArray(1, dtype=int32)
or by declaring the index as a static argument::
>>> @partial(jit, static_argnums=0)
... def func(i):
... return L[i]
>>> func(0)
DeviceArray(1, dtype=int32, weak_type=True)
To understand more subtleties having to do with tracers vs. regular values,
and concrete vs. abstract values, you may want to read
:ref:`faq-different-kinds-of-jax-values`.
"""
def __init__(self, tracer: "core.Tracer"):
super().__init__(
f"The __index__() method was called on the JAX Tracer object {tracer}")
class UnexpectedTracerError(JAXTypeError):
"""
This error occurs when you use a JAX value that has leaked out of a function.
What does it mean to leak a value? If you use a JAX transformation on a
function ``f`` that stores, in some scope outside of ``f``, a reference to
an intermediate value, that value is considered to have been leaked.
Leaking values is a side effect. (Read more about avoiding side effects in
`Pure Functions <https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions>`_)
JAX detects leaks when you then use the leaked value in another
operation later on, at which point it raises an ``UnexpectedTracerError``.
To fix this, avoid side effects: if a function computes a value needed
in an outer scope, return that value from the transformed function explictly.
Specifically, a ``Tracer`` is JAX's internal representation of a function's
intermediate values during transformations, e.g. within ``jit``, ``pmap``,
``vmap``, etc. Encountering a ``Tracer`` outside of a transformation implies a
leak.
Life-cycle of a leaked value
Consider the following example of a transformed function which leaks a value
to an outer scope::
>>> from jax import jit
>>> import jax.numpy as jnp
>>> outs = []
>>> @jit # 1
... def side_effecting(x):
... y = x + 1 # 3
... outs.append(y) # 4
>>> x = 1
>>> side_effecting(x) # 2
>>> outs[0] + 1 # 5 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnexpectedTracerError: Encountered an unexpected tracer.
In this example we leak a Traced value from an inner transformed scope to an
outer scope. We get an ``UnexpectedTracerError`` when the leaked value is
used, not when the value is leaked.
This example also demonstrates the life-cycle of a leaked value:
1. A function is transformed (in this case, by ``jit``)
2. The transformed function is called (initiating an abstract trace of the
function and turning ``x`` into a ``Tracer``)
3. The intermediate value ``y``, which will later be leaked, is created
(an intermediate value of a traced function is also a ``Tracer``)
4. The value is leaked (appended to a list in an outer scope, escaping
the function through a side-channel)
5. The leaked value is used, and an UnexpectedTracerError is raised.
The UnexpectedTracerError message tries to point to these locations in your
code by including information about each stage. Respectively:
1. The name of the transformed function (``side_effecting``) and which
transform kicked of the trace (``jit``).
2. A reconstructed stack trace of where the leaked Tracer was created,
which includes where the transformed function was called.
(``When the Tracer was created, the final 5 stack frames were...``).
3. From the reconstructed stack trace, the line of code that created
the leaked Tracer.
4. The leak location is not included in the error message because it is
difficult to pin down! JAX can only tell you what the leaked value
looks like (what shape is has and where it was created) and what
boundary it was leaked over (the name of the transformation and the
name of the transformed function).
5. The current error's stack trace points to where the value is used.
The error can be fixed by the returning the value out of the
transformed function::
>>> from jax import jit
>>> import jax.numpy as jnp
>>> outs = []
>>> @jit
... def not_side_effecting(x):
... y = x+1
... return y
>>> x = 1
>>> y = not_side_effecting(x)
>>> outs.append(y)
>>> outs[0] + 1 # all good! no longer a leaked value.
DeviceArray(3, dtype=int32, weak_type=True)
Leak checker
As discussed in point 2 and 3 above, JAX shows a reconstructed stack trace
which points to where the leaked value was created. This is because
JAX only raises an error when the leaked value is used, not when the
value is leaked. This is not the most useful place to raise this error,
because you need to know the location where the Tracer was leaked to fix the
error.
To make this location easier to track down, you can use the leak checker.
When the leak checker is enabled, an error is raised as soon as a ``Tracer``
is leaked. (To be more exact, it will raise an error when the transformed
function from which the ``Tracer`` is leaked returns)
To enable the leak checker you can use the ``JAX_CHECK_TRACER_LEAKS``
environment variable or the ``with jax.checking_leaks()`` context manager.
.. note::
Note that this tool is experimental and may report false positives. It
works by disabling some JAX caches, so it will have a negative effect on
performance and should only be used when debugging.
Example usage::
>>> from jax import jit
>>> import jax.numpy as jnp
>>> outs = []
>>> @jit
... def side_effecting(x):
... y = x+1
... outs.append(y)
>>> x = 1
>>> with jax.checking_leaks():
... y = side_effecting(x) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Exception: Leaked Trace
"""
def __init__(self, msg: str):
super().__init__(msg)
| 36.499121 | 111 | 0.658995 |
from jax import core
class _JAXErrorMixin:
_error_page = 'https://jax.readthedocs.io/en/latest/errors.html'
_module_name = "jax.errors"
def __init__(self, message: str):
error_page = self._error_page
module_name = self._module_name
class_name = self.__class__.__name__
error_msg = f'{message}\nSee {error_page}#{module_name}.{class_name}'
super().__init__(error_msg)
class JAXTypeError(_JAXErrorMixin, TypeError):
pass
class JAXIndexError(_JAXErrorMixin, IndexError):
pass
class ConcretizationTypeError(JAXTypeError):
def __init__(self, tracer: "core.Tracer", context: str = ""):
super().__init__(
"Abstract tracer value encountered where concrete value is expected: "
f"{tracer}\n{context}{tracer._origin_msg()}\n")
class NonConcreteBooleanIndexError(JAXIndexError):
def __init__(self, tracer: "core.Tracer"):
super().__init__(
f"Array boolean indices must be concrete; got {tracer}\n")
class TracerArrayConversionError(JAXTypeError):
def __init__(self, tracer: "core.Tracer"):
super().__init__(
"The numpy.ndarray conversion method __array__() was called on "
f"the JAX Tracer object {tracer}{tracer._origin_msg()}")
class TracerIntegerConversionError(JAXTypeError):
def __init__(self, tracer: "core.Tracer"):
super().__init__(
f"The __index__() method was called on the JAX Tracer object {tracer}")
class UnexpectedTracerError(JAXTypeError):
def __init__(self, msg: str):
super().__init__(msg)
| true | true |
f7271e85642896049a5ab911d13a4ad8df8ec1de | 14,429 | py | Python | PaddleNLP/emotion_detection/run_classifier.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 1 | 2022-02-08T06:00:29.000Z | 2022-02-08T06:00:29.000Z | PaddleNLP/emotion_detection/run_classifier.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | null | null | null | PaddleNLP/emotion_detection/run_classifier.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 2 | 2019-05-06T12:10:15.000Z | 2019-09-01T04:28:10.000Z | """
Emotion Detection Task
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import multiprocessing
import sys
sys.path.append("../")
import paddle
import paddle.fluid as fluid
import numpy as np
from models.classification import nets
import reader
import config
import utils
parser = argparse.ArgumentParser(__doc__)
model_g = utils.ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("config_path", str, None, "Path to the json file for EmoTect model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("output_dir", str, None, "Directory path to save checkpoints")
train_g = utils.ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 10, "Number of epoches for training.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("lr", float, 0.002, "The Learning rate value for training.")
log_g = utils.ArgumentGroup(parser, "logging", "logging related")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log")
data_g = utils.ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("data_dir", str, None, "Directory path to training data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("batch_size", int, 256, "Total examples' number in batch for training.")
data_g.add_arg("random_seed", int, 0, "Random seed.")
run_type_g = utils.ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.")
run_type_g.add_arg("task_name", str, None, "The name of task to perform sentiment classification.")
run_type_g.add_arg("do_train", bool, False, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, False, "Whether to perform evaluation.")
run_type_g.add_arg("do_infer", bool, False, "Whether to perform inference.")
parser.add_argument('--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')
args = parser.parse_args()
def create_model(args,
pyreader_name,
emotect_config,
num_labels,
is_infer=False):
"""
Create Model for sentiment classification
"""
if is_infer:
pyreader = fluid.layers.py_reader(
capacity=16,
shapes=[[-1, 1]],
dtypes=['int64'],
lod_levels=[1],
name=pyreader_name,
use_double_buffer=False)
else:
pyreader = fluid.layers.py_reader(
capacity=16,
shapes=([-1, 1], [-1, 1]),
dtypes=('int64', 'int64'),
lod_levels=(1, 0),
name=pyreader_name,
use_double_buffer=False)
if emotect_config['model_type'] == "cnn_net":
network = nets.cnn_net
elif emotect_config['model_type'] == "bow_net":
network = nets.bow_net
elif emotect_config['model_type'] == "lstm_net":
network = nets.lstm_net
elif emotect_config['model_type'] == "bilstm_net":
network = nets.bilstm_net
elif emotect_config['model_type'] == "gru_net":
network = nets.gru_net
elif emotect_config['model_type'] == "textcnn_net":
network = nets.textcnn_net
else:
raise ValueError("Unknown network type!")
if is_infer:
data = fluid.layers.read_file(pyreader)
probs = network(data, None, emotect_config["vocab_size"], class_dim=num_labels, is_infer=True)
return pyreader, probs
data, label = fluid.layers.read_file(pyreader)
avg_loss, probs = network(data, label, emotect_config["vocab_size"], class_dim=num_labels)
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=label, total=num_seqs)
return pyreader, avg_loss, accuracy, num_seqs
def evaluate(exe, test_program, test_pyreader, fetch_list, eval_phase):
"""
Evaluation Function
"""
test_pyreader.start()
total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time()
while True:
try:
np_loss, np_acc, np_num_seqs = exe.run(program=test_program,
fetch_list=fetch_list,
return_numpy=False)
np_loss = np.array(np_loss)
np_acc = np.array(np_acc)
np_num_seqs = np.array(np_num_seqs)
total_cost.extend(np_loss * np_num_seqs)
total_acc.extend(np_acc * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
print("[%s evaluation] avg loss: %f, avg acc: %f, elapsed time: %f s" %
(eval_phase, np.sum(total_cost) / np.sum(total_num_seqs),
np.sum(total_acc) / np.sum(total_num_seqs), time_end - time_begin))
def infer(exe, infer_program, infer_pyreader, fetch_list, infer_phase):
infer_pyreader.start()
time_begin = time.time()
while True:
try:
batch_probs = exe.run(program=infer_program,
fetch_list=fetch_list,
return_numpy=True)
for probs in batch_probs[0]:
print("%d\t%f\t%f\t%f" % (np.argmax(probs), probs[0], probs[1], probs[2]))
except fluid.core.EOFException as e:
infer_pyreader.reset()
break
time_end = time.time()
print("[%s] elapsed time: %f s" % (infer_phase, time_end - time_begin))
def main(args):
"""
Main Function
"""
emotect_config = config.EmoTectConfig(args.config_path)
if args.use_cuda:
place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
task_name = args.task_name.lower()
processor = reader.EmoTectProcessor(data_dir=args.data_dir,
vocab_path=args.vocab_path,
random_seed=args.random_seed)
num_labels = len(processor.get_labels())
if not (args.do_train or args.do_val or args.do_infer):
raise ValueError("For args `do_train`, `do_val` and `do_infer`, at "
"least one of them must be True.")
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.do_train:
train_data_generator = processor.data_generator(
batch_size=args.batch_size,
phase='train',
epoch=args.epoch)
num_train_examples = processor.get_num_examples(phase="train")
max_train_steps = args.epoch * num_train_examples // args.batch_size + 1
print("Num train examples: %d" % num_train_examples)
print("Max train steps: %d" % max_train_steps)
train_program = fluid.Program()
if args.random_seed is not None:
train_program.random_seed = args.random_seed
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, loss, accuracy, num_seqs = create_model(
args,
pyreader_name='train_reader',
emotect_config=emotect_config,
num_labels=num_labels,
is_infer=False)
sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr)
sgd_optimizer.minimize(loss)
if args.verbose:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)
print("Theoretical memory usage in training: %.3f - %.3f %s" %
(lower_mem, upper_mem, unit))
if args.do_val:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, loss, accuracy, num_seqs = create_model(
args,
pyreader_name='test_reader',
emotect_config=emotect_config,
num_labels=num_labels,
is_infer=False)
test_prog = test_prog.clone(for_test=True)
if args.do_infer:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
infer_pyreader, probs = create_model(
args,
pyreader_name='infer_reader',
emotect_config=emotect_config,
num_labels=num_labels,
is_infer=True)
test_prog = test_prog.clone(for_test=True)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint:
utils.init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.do_val or args.do_infer:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or infer!")
utils.init_checkpoint(
exe,
args.init_checkpoint,
main_program=test_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_paddle_reader(train_data_generator)
else:
train_exe = None
if args.do_val or args.do_infer:
test_exe = exe
if args.do_train:
train_pyreader.start()
steps = 0
total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time()
ce_info = []
while True:
try:
steps += 1
if steps % args.skip_steps == 0:
fetch_list = [loss.name, accuracy.name, num_seqs.name]
else:
fetch_list = []
outputs = train_exe.run(program=train_program,
fetch_list=fetch_list,
return_numpy=False)
if steps % args.skip_steps == 0:
np_loss, np_acc, np_num_seqs = outputs
np_loss = np.array(np_loss)
np_acc = np.array(np_acc)
np_num_seqs = np.array(np_num_seqs)
total_cost.extend(np_loss * np_num_seqs)
total_acc.extend(np_acc * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size()
print(verbose)
time_end = time.time()
used_time = time_end - time_begin
print("step: %d, avg loss: %f, "
"avg acc: %f, speed: %f steps/s" %
(steps, np.sum(total_cost) / np.sum(total_num_seqs),
np.sum(total_acc) / np.sum(total_num_seqs),
args.skip_steps / used_time))
ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), used_time])
total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.output_dir, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program)
if steps % args.validation_steps == 0:
# evaluate on dev set
if args.do_val:
test_pyreader.decorate_paddle_reader(
processor.data_generator(
batch_size=args.batch_size,
phase='dev',
epoch=1))
evaluate(test_exe, test_prog, test_pyreader,
[loss.name, accuracy.name, num_seqs.name],
"dev")
except fluid.core.EOFException:
save_path = os.path.join(args.output_dir, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program)
train_pyreader.reset()
break
if args.do_train and args.enable_ce:
card_num = get_cards()
ce_loss = 0
ce_acc = 0
ce_time = 0
try:
ce_loss = ce_info[-2][0]
ce_acc = ce_info[-2][1]
ce_time = ce_info[-2][2]
except:
print("ce info error")
print("kpis\teach_step_duration_%s_card%s\t%s" %
(task_name, card_num, ce_time))
print("kpis\ttrain_loss_%s_card%s\t%f" %
(task_name, card_num, ce_loss))
print("kpis\ttrain_acc_%s_card%s\t%f" %
(task_name, card_num, ce_acc))
# evaluate on test set
if not args.do_train and args.do_val:
test_pyreader.decorate_paddle_reader(
processor.data_generator(
batch_size=args.batch_size,
phase='test',
epoch=1))
print("Final test result:")
evaluate(test_exe, test_prog, test_pyreader,
[loss.name, accuracy.name, num_seqs.name],
"test")
# infer
if args.do_infer:
infer_pyreader.decorate_paddle_reader(
processor.data_generator(
batch_size=args.batch_size,
phase='infer',
epoch=1))
infer(test_exe, test_prog, infer_pyreader,
[probs.name], "infer")
def get_cards():
num = 0
cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
if cards != '':
num = len(cards.split(","))
return num
if __name__ == "__main__":
utils.print_arguments(args)
main(args)
| 38.171958 | 136 | 0.587012 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import multiprocessing
import sys
sys.path.append("../")
import paddle
import paddle.fluid as fluid
import numpy as np
from models.classification import nets
import reader
import config
import utils
parser = argparse.ArgumentParser(__doc__)
model_g = utils.ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("config_path", str, None, "Path to the json file for EmoTect model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("output_dir", str, None, "Directory path to save checkpoints")
train_g = utils.ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 10, "Number of epoches for training.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("lr", float, 0.002, "The Learning rate value for training.")
log_g = utils.ArgumentGroup(parser, "logging", "logging related")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log")
data_g = utils.ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("data_dir", str, None, "Directory path to training data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("batch_size", int, 256, "Total examples' number in batch for training.")
data_g.add_arg("random_seed", int, 0, "Random seed.")
run_type_g = utils.ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.")
run_type_g.add_arg("task_name", str, None, "The name of task to perform sentiment classification.")
run_type_g.add_arg("do_train", bool, False, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, False, "Whether to perform evaluation.")
run_type_g.add_arg("do_infer", bool, False, "Whether to perform inference.")
parser.add_argument('--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')
args = parser.parse_args()
def create_model(args,
pyreader_name,
emotect_config,
num_labels,
is_infer=False):
if is_infer:
pyreader = fluid.layers.py_reader(
capacity=16,
shapes=[[-1, 1]],
dtypes=['int64'],
lod_levels=[1],
name=pyreader_name,
use_double_buffer=False)
else:
pyreader = fluid.layers.py_reader(
capacity=16,
shapes=([-1, 1], [-1, 1]),
dtypes=('int64', 'int64'),
lod_levels=(1, 0),
name=pyreader_name,
use_double_buffer=False)
if emotect_config['model_type'] == "cnn_net":
network = nets.cnn_net
elif emotect_config['model_type'] == "bow_net":
network = nets.bow_net
elif emotect_config['model_type'] == "lstm_net":
network = nets.lstm_net
elif emotect_config['model_type'] == "bilstm_net":
network = nets.bilstm_net
elif emotect_config['model_type'] == "gru_net":
network = nets.gru_net
elif emotect_config['model_type'] == "textcnn_net":
network = nets.textcnn_net
else:
raise ValueError("Unknown network type!")
if is_infer:
data = fluid.layers.read_file(pyreader)
probs = network(data, None, emotect_config["vocab_size"], class_dim=num_labels, is_infer=True)
return pyreader, probs
data, label = fluid.layers.read_file(pyreader)
avg_loss, probs = network(data, label, emotect_config["vocab_size"], class_dim=num_labels)
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=label, total=num_seqs)
return pyreader, avg_loss, accuracy, num_seqs
def evaluate(exe, test_program, test_pyreader, fetch_list, eval_phase):
test_pyreader.start()
total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time()
while True:
try:
np_loss, np_acc, np_num_seqs = exe.run(program=test_program,
fetch_list=fetch_list,
return_numpy=False)
np_loss = np.array(np_loss)
np_acc = np.array(np_acc)
np_num_seqs = np.array(np_num_seqs)
total_cost.extend(np_loss * np_num_seqs)
total_acc.extend(np_acc * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
print("[%s evaluation] avg loss: %f, avg acc: %f, elapsed time: %f s" %
(eval_phase, np.sum(total_cost) / np.sum(total_num_seqs),
np.sum(total_acc) / np.sum(total_num_seqs), time_end - time_begin))
def infer(exe, infer_program, infer_pyreader, fetch_list, infer_phase):
infer_pyreader.start()
time_begin = time.time()
while True:
try:
batch_probs = exe.run(program=infer_program,
fetch_list=fetch_list,
return_numpy=True)
for probs in batch_probs[0]:
print("%d\t%f\t%f\t%f" % (np.argmax(probs), probs[0], probs[1], probs[2]))
except fluid.core.EOFException as e:
infer_pyreader.reset()
break
time_end = time.time()
print("[%s] elapsed time: %f s" % (infer_phase, time_end - time_begin))
def main(args):
emotect_config = config.EmoTectConfig(args.config_path)
if args.use_cuda:
place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
task_name = args.task_name.lower()
processor = reader.EmoTectProcessor(data_dir=args.data_dir,
vocab_path=args.vocab_path,
random_seed=args.random_seed)
num_labels = len(processor.get_labels())
if not (args.do_train or args.do_val or args.do_infer):
raise ValueError("For args `do_train`, `do_val` and `do_infer`, at "
"least one of them must be True.")
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.do_train:
train_data_generator = processor.data_generator(
batch_size=args.batch_size,
phase='train',
epoch=args.epoch)
num_train_examples = processor.get_num_examples(phase="train")
max_train_steps = args.epoch * num_train_examples // args.batch_size + 1
print("Num train examples: %d" % num_train_examples)
print("Max train steps: %d" % max_train_steps)
train_program = fluid.Program()
if args.random_seed is not None:
train_program.random_seed = args.random_seed
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, loss, accuracy, num_seqs = create_model(
args,
pyreader_name='train_reader',
emotect_config=emotect_config,
num_labels=num_labels,
is_infer=False)
sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr)
sgd_optimizer.minimize(loss)
if args.verbose:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)
print("Theoretical memory usage in training: %.3f - %.3f %s" %
(lower_mem, upper_mem, unit))
if args.do_val:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, loss, accuracy, num_seqs = create_model(
args,
pyreader_name='test_reader',
emotect_config=emotect_config,
num_labels=num_labels,
is_infer=False)
test_prog = test_prog.clone(for_test=True)
if args.do_infer:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
infer_pyreader, probs = create_model(
args,
pyreader_name='infer_reader',
emotect_config=emotect_config,
num_labels=num_labels,
is_infer=True)
test_prog = test_prog.clone(for_test=True)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint:
utils.init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.do_val or args.do_infer:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or infer!")
utils.init_checkpoint(
exe,
args.init_checkpoint,
main_program=test_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_paddle_reader(train_data_generator)
else:
train_exe = None
if args.do_val or args.do_infer:
test_exe = exe
if args.do_train:
train_pyreader.start()
steps = 0
total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time()
ce_info = []
while True:
try:
steps += 1
if steps % args.skip_steps == 0:
fetch_list = [loss.name, accuracy.name, num_seqs.name]
else:
fetch_list = []
outputs = train_exe.run(program=train_program,
fetch_list=fetch_list,
return_numpy=False)
if steps % args.skip_steps == 0:
np_loss, np_acc, np_num_seqs = outputs
np_loss = np.array(np_loss)
np_acc = np.array(np_acc)
np_num_seqs = np.array(np_num_seqs)
total_cost.extend(np_loss * np_num_seqs)
total_acc.extend(np_acc * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size()
print(verbose)
time_end = time.time()
used_time = time_end - time_begin
print("step: %d, avg loss: %f, "
"avg acc: %f, speed: %f steps/s" %
(steps, np.sum(total_cost) / np.sum(total_num_seqs),
np.sum(total_acc) / np.sum(total_num_seqs),
args.skip_steps / used_time))
ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), used_time])
total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.output_dir, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program)
if steps % args.validation_steps == 0:
# evaluate on dev set
if args.do_val:
test_pyreader.decorate_paddle_reader(
processor.data_generator(
batch_size=args.batch_size,
phase='dev',
epoch=1))
evaluate(test_exe, test_prog, test_pyreader,
[loss.name, accuracy.name, num_seqs.name],
"dev")
except fluid.core.EOFException:
save_path = os.path.join(args.output_dir, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program)
train_pyreader.reset()
break
if args.do_train and args.enable_ce:
card_num = get_cards()
ce_loss = 0
ce_acc = 0
ce_time = 0
try:
ce_loss = ce_info[-2][0]
ce_acc = ce_info[-2][1]
ce_time = ce_info[-2][2]
except:
print("ce info error")
print("kpis\teach_step_duration_%s_card%s\t%s" %
(task_name, card_num, ce_time))
print("kpis\ttrain_loss_%s_card%s\t%f" %
(task_name, card_num, ce_loss))
print("kpis\ttrain_acc_%s_card%s\t%f" %
(task_name, card_num, ce_acc))
# evaluate on test set
if not args.do_train and args.do_val:
test_pyreader.decorate_paddle_reader(
processor.data_generator(
batch_size=args.batch_size,
phase='test',
epoch=1))
print("Final test result:")
evaluate(test_exe, test_prog, test_pyreader,
[loss.name, accuracy.name, num_seqs.name],
"test")
# infer
if args.do_infer:
infer_pyreader.decorate_paddle_reader(
processor.data_generator(
batch_size=args.batch_size,
phase='infer',
epoch=1))
infer(test_exe, test_prog, infer_pyreader,
[probs.name], "infer")
def get_cards():
num = 0
cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
if cards != '':
num = len(cards.split(","))
return num
if __name__ == "__main__":
utils.print_arguments(args)
main(args)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.